Changeset 184 for trunk/modules/vci_mem_cache_v4/caba/source
- Timestamp:
- Jan 7, 2012, 7:17:34 PM (13 years ago)
- Location:
- trunk/modules/vci_mem_cache_v4/caba/source
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/modules/vci_mem_cache_v4/caba/source/include/update_tab_v4.h
r2 r184 47 47 size_t i_count) 48 48 { 49 valid 50 update 49 valid = i_valid; 50 update = i_update; 51 51 brdcast = i_brdcast; 52 52 rsp = i_rsp; 53 srcid 54 trdid 55 pktid 56 nline 57 count 53 srcid = i_srcid; 54 trdid = i_trdid; 55 pktid = i_pktid; 56 nline = i_nline; 57 count = i_count; 58 58 } 59 59 … … 156 156 157 157 //////////////////////////////////////////////////////////////////// 158 // The size() function returns the size of the tab158 // The print() function diplays the tab content 159 159 //////////////////////////////////////////////////////////////////// 160 160 void print(){ … … 262 262 263 263 ///////////////////////////////////////////////////////////////////// 264 // The is_not_empty() function returns true if the table is not empty 265 ///////////////////////////////////////////////////////////////////// 266 bool is_not_empty() 267 { 268 for(size_t i = 0 ; i < size_tab ; i++){ 269 if(tab[i].valid){ 270 return true; 271 } 272 } 273 return false; 274 } 275 276 ///////////////////////////////////////////////////////////////////// 264 277 // The need_rsp() function returns the need of a response 265 278 // Arguments : -
trunk/modules/vci_mem_cache_v4/caba/source/include/vci_mem_cache_v4.h
r177 r184 80 80 TGT_CMD_IDLE, 81 81 TGT_CMD_READ, 82 TGT_CMD_READ_EOP,83 82 TGT_CMD_WRITE, 84 83 TGT_CMD_ATOMIC, … … 89 88 TGT_RSP_READ_IDLE, 90 89 TGT_RSP_WRITE_IDLE, 91 TGT_RSP_ LLSC_IDLE,90 TGT_RSP_SC_IDLE, 92 91 TGT_RSP_XRAM_IDLE, 93 92 TGT_RSP_INIT_IDLE, … … 95 94 TGT_RSP_READ, 96 95 TGT_RSP_WRITE, 97 TGT_RSP_ LLSC,96 TGT_RSP_SC, 98 97 TGT_RSP_XRAM, 99 98 TGT_RSP_INIT, … … 139 138 READ_TRT_LOCK, 140 139 READ_TRT_SET, 141 READ_ XRAM_REQ,140 READ_TRT_REQ, 142 141 }; 143 142 … … 197 196 IXR_CMD_READ_IDLE, 198 197 IXR_CMD_WRITE_IDLE, 199 IXR_CMD_ LLSC_IDLE,198 IXR_CMD_SC_IDLE, 200 199 IXR_CMD_XRAM_IDLE, 201 200 IXR_CMD_READ_NLINE, 202 201 IXR_CMD_WRITE_NLINE, 203 IXR_CMD_ LLSC_NLINE,202 IXR_CMD_SC_NLINE, 204 203 IXR_CMD_XRAM_DATA, 205 204 }; 206 205 207 /* States of the LLSC fsm */208 enum llsc_fsm_state_e{209 LLSC_IDLE,206 /* States of the SC fsm */ 207 enum sc_fsm_state_e{ 208 SC_IDLE, 210 209 SC_DIR_LOCK, 211 210 SC_DIR_HIT_READ, … … 215 214 SC_HEAP_LOCK, 216 215 SC_UPT_REQ, 217 SC_UP DATE,218 SC_TRT_ LOCK,216 SC_UPT_NEXT, 217 SC_TRT_PUT_LOCK, 219 218 SC_INVAL_LOCK, 220 219 SC_DIR_INVAL, 221 220 SC_INVAL, 222 SC_ XRAM_SEND,223 SC_RSP_FA LSE,224 SC_RSP_ TRUE,225 LLSC_TRT_LOCK,226 LLSC_TRT_SET,227 LLSC_XRAM_REQ,221 SC_TRT_PUT_REQ, 222 SC_RSP_FAIL, 223 SC_RSP_SUCCESS, 224 SC_TRT_GET_LOCK, 225 SC_TRT_GET_SET, 226 SC_TRT_GET_REQ, 228 227 }; 229 228 … … 247 246 ALLOC_DIR_READ, 248 247 ALLOC_DIR_WRITE, 249 ALLOC_DIR_ LLSC,248 ALLOC_DIR_SC, 250 249 ALLOC_DIR_CLEANUP, 251 250 ALLOC_DIR_XRAM_RSP, … … 256 255 ALLOC_TRT_READ, 257 256 ALLOC_TRT_WRITE, 258 ALLOC_TRT_ LLSC,257 ALLOC_TRT_SC, 259 258 ALLOC_TRT_XRAM_RSP, 260 259 ALLOC_TRT_IXR_RSP, … … 267 266 ALLOC_UPT_INIT_RSP, 268 267 ALLOC_UPT_CLEANUP, 269 ALLOC_UPT_ LLSC,268 ALLOC_UPT_SC, 270 269 }; 271 270 … … 274 273 ALLOC_HEAP_READ, 275 274 ALLOC_HEAP_WRITE, 276 ALLOC_HEAP_ LLSC,275 ALLOC_HEAP_SC, 277 276 ALLOC_HEAP_CLEANUP, 278 277 ALLOC_HEAP_XRAM_RSP, 279 278 }; 280 279 280 // debug variables (for each FSM) 281 size_t m_debug_start_cycle; 282 bool m_debug_ok; 283 bool m_debug_global; 284 bool m_debug_tgt_cmd_fsm; 285 bool m_debug_tgt_rsp_fsm; 286 bool m_debug_init_cmd_fsm; 287 bool m_debug_init_rsp_fsm; 288 bool m_debug_read_fsm; 289 bool m_debug_write_fsm; 290 bool m_debug_sc_fsm; 291 bool m_debug_cleanup_fsm; 292 bool m_debug_ixr_cmd_fsm; 293 bool m_debug_ixr_rsp_fsm; 294 bool m_debug_xram_rsp_fsm; 295 bool m_debug_previous_hit; 296 size_t m_debug_previous_count; 297 298 // instrumentation counters 281 299 uint32_t m_cpt_cycles; // Counter of cycles 282 300 uint32_t m_cpt_read; // Number of READ transactions … … 297 315 uint32_t m_cpt_sc; // Number of SC transactions 298 316 317 size_t m_prev_count; 318 299 319 protected: 300 320 … … 310 330 311 331 VciMemCacheV4( 312 sc_module_name name, // Instance Name 313 const soclib::common::MappingTable &mtp, // Mapping table for primary requets 314 const soclib::common::MappingTable &mtc, // Mapping table for coherence requets 315 const soclib::common::MappingTable &mtx, // Mapping table for XRAM 316 const soclib::common::IntTab &vci_ixr_index, // VCI port to XRAM (initiator) 317 const soclib::common::IntTab &vci_ini_index, // VCI port to PROC (initiator) 318 const soclib::common::IntTab &vci_tgt_index, // VCI port to PROC (target) 319 const soclib::common::IntTab &vci_tgt_index_cleanup, // VCI port to PROC (target) for cleanup 320 size_t nways, // Number of ways per set 321 size_t nsets, // Number of sets 322 size_t nwords, // Number of words per line 323 size_t heap_size=1024, // Size of the heap 324 size_t transaction_tab_lines=TRANSACTION_TAB_LINES,// Size of the TRT 325 size_t update_tab_lines=UPDATE_TAB_LINES // Size of the UPT 326 ); 332 sc_module_name name, // Instance Name 333 const soclib::common::MappingTable &mtp, // Mapping table for primary requets 334 const soclib::common::MappingTable &mtc, // Mapping table for coherence requets 335 const soclib::common::MappingTable &mtx, // Mapping table for XRAM 336 const soclib::common::IntTab &vci_ixr_index, // VCI port to XRAM (initiator) 337 const soclib::common::IntTab &vci_ini_index, // VCI port to PROC (initiator) 338 const soclib::common::IntTab &vci_tgt_index, // VCI port to PROC (target) 339 const soclib::common::IntTab &vci_tgt_index_cleanup,// VCI port to PROC (target) for cleanup 340 size_t nways, // Number of ways per set 341 size_t nsets, // Number of sets 342 size_t nwords, // Number of words per line 343 size_t heap_size=1024, // Size of the heap 344 size_t transaction_tab_lines=TRANSACTION_TAB_LINES, // Size of the TRT 345 size_t update_tab_lines=UPDATE_TAB_LINES, // Size of the UPT 346 size_t debug_start_cycle=0, 347 bool debug_ok=false); 327 348 328 349 ~VciMemCacheV4(); 329 350 330 351 void transition(); 331 332 352 void genMoore(); 333 334 353 void print_stats(); 335 336 354 void print_trace(); 355 void cache_monitor(vci_addr_t addr); 337 356 338 357 private: … … 343 362 const size_t m_ways; // Number of ways in a set 344 363 const size_t m_sets; // Number of cache sets 345 const size_t m_words; // Number of words in a line346 const size_t m_srcid_ixr; // Srcid for requests to XRAM347 const size_t m_srcid_ini; // Srcid for requests to processors364 const size_t m_words; // Number of words in a line 365 const size_t m_srcid_ixr; // Srcid for requests to XRAM 366 const size_t m_srcid_ini; // Srcid for requests to processors 348 367 std::list<soclib::common::Segment> m_seglist; // memory cached into the cache 349 368 std::list<soclib::common::Segment> m_cseglist; // coherence segment for the cache 350 369 vci_addr_t *m_coherence_table; // address(srcid) 351 370 uint32_t m_transaction_tab_lines; 352 TransactionTab m_transaction_tab; // xram transaction table371 TransactionTab m_transaction_tab; // xram transaction table 353 372 uint32_t m_update_tab_lines; 354 UpdateTab m_update_tab; // pending update & invalidate355 CacheDirectory m_cache_directory; // data cache directory356 HeapDirectory m_heap _directory; // heap directory357 358 data_t ***m_cache_data; // data array[set][way][word]373 UpdateTab m_update_tab; // pending update & invalidate 374 CacheDirectory m_cache_directory; // data cache directory 375 HeapDirectory m_heap; // heap for copies 376 377 data_t ***m_cache_data; // data array[set][way][word] 359 378 360 379 // adress masks … … 371 390 ////////////////////////////////////////////////// 372 391 sc_signal<size_t> r_copies_limit; // Limit of the number of copies for one line 392 sc_signal<size_t> xxx_count; 373 393 374 394 ////////////////////////////////////////////////// … … 392 412 GenericFifo<be_t> m_cmd_write_be_fifo; 393 413 394 // Fifo between TGT_CMD fsm and LLSC fsm395 GenericFifo<uint64_t> m_cmd_ llsc_addr_fifo;396 GenericFifo<bool> m_cmd_ llsc_eop_fifo;397 GenericFifo<size_t> m_cmd_ llsc_srcid_fifo;398 GenericFifo<size_t> m_cmd_ llsc_trdid_fifo;399 GenericFifo<size_t> m_cmd_ llsc_pktid_fifo;400 GenericFifo<data_t> m_cmd_ llsc_wdata_fifo;414 // Fifo between TGT_CMD fsm and SC fsm 415 GenericFifo<uint64_t> m_cmd_sc_addr_fifo; 416 GenericFifo<bool> m_cmd_sc_eop_fifo; 417 GenericFifo<size_t> m_cmd_sc_srcid_fifo; 418 GenericFifo<size_t> m_cmd_sc_trdid_fifo; 419 GenericFifo<size_t> m_cmd_sc_pktid_fifo; 420 GenericFifo<data_t> m_cmd_sc_wdata_fifo; 401 421 402 422 sc_signal<int> r_tgt_cmd_fsm; … … 555 575 556 576 /////////////////////////////////////////////////////// 557 // Registers controlled by LLSC fsm577 // Registers controlled by SC fsm 558 578 /////////////////////////////////////////////////////// 559 579 560 sc_signal<int> r_ llsc_fsm;// FSM state561 sc_signal<data_t> r_ llsc_wdata;// write data word562 sc_signal<data_t> *r_ llsc_rdata;// read data word563 sc_signal<uint32_t> r_ llsc_lfsr;// lfsr for random introducing564 sc_signal<size_t> r_ llsc_cpt;// size of command565 sc_signal<copy_t> r_ llsc_copy;// Srcid of the first copy566 sc_signal<copy_t> r_ llsc_copy_cache;// Srcid of the first copy567 sc_signal<bool> r_ llsc_copy_inst;// Type of the first copy568 sc_signal<size_t> r_ llsc_count;// number of copies569 sc_signal<size_t> r_ llsc_ptr;// pointer to the heap570 sc_signal<size_t> r_ llsc_next_ptr;// next pointer to the heap571 sc_signal<bool> r_ llsc_is_cnt;// is_cnt bit (in directory)572 sc_signal<bool> r_ llsc_dirty;// dirty bit (in directory)573 sc_signal<size_t> r_ llsc_way;// way in directory574 sc_signal<size_t> r_ llsc_set;// set in directory575 sc_signal<data_t> r_ llsc_tag;// cache line tag (in directory)576 sc_signal<size_t> r_ llsc_trt_index;// Transaction Table index577 sc_signal<size_t> r_ llsc_upt_index;// Update Table index578 579 // Buffer between LLSC fsm and INIT_CMD fsm (XRAM read)580 sc_signal<bool> r_ llsc_to_ixr_cmd_req;// valid request581 sc_signal<addr_t> r_ llsc_to_ixr_cmd_nline;// cache line index582 sc_signal<size_t> r_ llsc_to_ixr_cmd_trdid;// index in Transaction Table583 sc_signal<bool> r_ llsc_to_ixr_cmd_write;// write request584 sc_signal<data_t> *r_ llsc_to_ixr_cmd_data;// cache line data585 586 587 // Buffer between LLSC fsm and TGT_RSP fsm588 sc_signal<bool> r_ llsc_to_tgt_rsp_req;// valid request589 sc_signal<data_t> r_ llsc_to_tgt_rsp_data;// read data word590 sc_signal<size_t> r_ llsc_to_tgt_rsp_srcid;// Transaction srcid591 sc_signal<size_t> r_ llsc_to_tgt_rsp_trdid;// Transaction trdid592 sc_signal<size_t> r_ llsc_to_tgt_rsp_pktid;// Transaction pktid593 594 // Buffer between LLSC fsm and INIT_CMD fsm (Update/Invalidate L1 caches)595 sc_signal<bool> r_ llsc_to_init_cmd_multi_req;// valid request596 sc_signal<bool> r_ llsc_to_init_cmd_brdcast_req;// brdcast request597 sc_signal<addr_t> r_ llsc_to_init_cmd_nline;// cache line index598 sc_signal<size_t> r_ llsc_to_init_cmd_trdid;// index in Update Table599 sc_signal<data_t> r_ llsc_to_init_cmd_wdata;// data (one word)600 sc_signal<bool> r_ llsc_to_init_cmd_is_long;// it is a 64 bits SC601 sc_signal<data_t> r_ llsc_to_init_cmd_wdata_high;// data high (one word)602 sc_signal<size_t> r_ llsc_to_init_cmd_index;// index of the word in line603 GenericFifo<bool> m_ llsc_to_init_cmd_inst_fifo;// fifo for the L1 type604 GenericFifo<size_t> m_ llsc_to_init_cmd_srcid_fifo;// fifo for srcids605 GenericFifo<size_t> m_ llsc_to_init_cmd_cache_id_fifo;// fifo for srcids606 607 // Buffer between LLSC fsm and INIT_RSP fsm (Decrement UPT entry)608 sc_signal<bool> r_ llsc_to_init_rsp_req;// valid request609 sc_signal<size_t> r_ llsc_to_init_rsp_upt_index;// index in update table580 sc_signal<int> r_sc_fsm; // FSM state 581 sc_signal<data_t> r_sc_wdata; // write data word 582 sc_signal<data_t> *r_sc_rdata; // read data word 583 sc_signal<uint32_t> r_sc_lfsr; // lfsr for random introducing 584 sc_signal<size_t> r_sc_cpt; // size of command 585 sc_signal<copy_t> r_sc_copy; // Srcid of the first copy 586 sc_signal<copy_t> r_sc_copy_cache; // Srcid of the first copy 587 sc_signal<bool> r_sc_copy_inst; // Type of the first copy 588 sc_signal<size_t> r_sc_count; // number of copies 589 sc_signal<size_t> r_sc_ptr; // pointer to the heap 590 sc_signal<size_t> r_sc_next_ptr; // next pointer to the heap 591 sc_signal<bool> r_sc_is_cnt; // is_cnt bit (in directory) 592 sc_signal<bool> r_sc_dirty; // dirty bit (in directory) 593 sc_signal<size_t> r_sc_way; // way in directory 594 sc_signal<size_t> r_sc_set; // set in directory 595 sc_signal<data_t> r_sc_tag; // cache line tag (in directory) 596 sc_signal<size_t> r_sc_trt_index; // Transaction Table index 597 sc_signal<size_t> r_sc_upt_index; // Update Table index 598 599 // Buffer between SC fsm and INIT_CMD fsm (XRAM read) 600 sc_signal<bool> r_sc_to_ixr_cmd_req; // valid request 601 sc_signal<addr_t> r_sc_to_ixr_cmd_nline; // cache line index 602 sc_signal<size_t> r_sc_to_ixr_cmd_trdid; // index in Transaction Table 603 sc_signal<bool> r_sc_to_ixr_cmd_write; // write request 604 sc_signal<data_t> *r_sc_to_ixr_cmd_data; // cache line data 605 606 607 // Buffer between SC fsm and TGT_RSP fsm 608 sc_signal<bool> r_sc_to_tgt_rsp_req; // valid request 609 sc_signal<data_t> r_sc_to_tgt_rsp_data; // read data word 610 sc_signal<size_t> r_sc_to_tgt_rsp_srcid; // Transaction srcid 611 sc_signal<size_t> r_sc_to_tgt_rsp_trdid; // Transaction trdid 612 sc_signal<size_t> r_sc_to_tgt_rsp_pktid; // Transaction pktid 613 614 // Buffer between SC fsm and INIT_CMD fsm (Update/Invalidate L1 caches) 615 sc_signal<bool> r_sc_to_init_cmd_multi_req; // valid request 616 sc_signal<bool> r_sc_to_init_cmd_brdcast_req; // brdcast request 617 sc_signal<addr_t> r_sc_to_init_cmd_nline; // cache line index 618 sc_signal<size_t> r_sc_to_init_cmd_trdid; // index in Update Table 619 sc_signal<data_t> r_sc_to_init_cmd_wdata; // data (one word) 620 sc_signal<bool> r_sc_to_init_cmd_is_long; // it is a 64 bits SC 621 sc_signal<data_t> r_sc_to_init_cmd_wdata_high; // data high (one word) 622 sc_signal<size_t> r_sc_to_init_cmd_index; // index of the word in line 623 GenericFifo<bool> m_sc_to_init_cmd_inst_fifo; // fifo for the L1 type 624 GenericFifo<size_t> m_sc_to_init_cmd_srcid_fifo; // fifo for srcids 625 GenericFifo<size_t> m_sc_to_init_cmd_cache_id_fifo; // fifo for srcids 626 627 // Buffer between SC fsm and INIT_RSP fsm (Decrement UPT entry) 628 sc_signal<bool> r_sc_to_init_rsp_req; // valid request 629 sc_signal<size_t> r_sc_to_init_rsp_upt_index; // index in update table 610 630 611 631 //////////////////////////////////////////////////// -
trunk/modules/vci_mem_cache_v4/caba/source/include/xram_transaction_v4.h
r138 r184 14 14 15 15 class TransactionTabEntry { 16 typedef uint32_t size_t;17 typedef uint32_t data_t;18 typedef sc_dt::sc_uint<40> addr_t;19 typedef uint32_t be_t;16 typedef uint32_t size_t; 17 typedef uint32_t data_t; 18 typedef sc_dt::sc_uint<40> addr_t; 19 typedef uint32_t be_t; 20 20 21 21 public: 22 bool valid; // entry valid23 bool xram_read; // read request to XRAM24 addr_t nline; // index (zy) of the requested line25 size_t srcid; // processor requesting the transaction26 size_t trdid; // processor requesting the transaction27 size_t pktid; // processor requesting the transaction28 bool proc_read; // read request from processor29 size_t read_length;// length of the read (for the response)30 size_t word_index; // index of the first read word (for the response)31 std::vector<data_t> wdata;// write buffer (one cache line)32 std::vector<be_t> wdata_be; // be for each data in the write buffer33 bool rerror;// error returned by xram22 bool valid; // entry valid 23 bool xram_read; // read request to XRAM 24 addr_t nline; // index (zy) of the requested line 25 size_t srcid; // processor requesting the transaction 26 size_t trdid; // processor requesting the transaction 27 size_t pktid; // processor requesting the transaction 28 bool proc_read; // read request from processor 29 size_t read_length; // length of the read (for the response) 30 size_t word_index; // index of the first read word (for the response) 31 std::vector<data_t> wdata; // write buffer (one cache line) 32 std::vector<be_t> wdata_be; // be for each data in the write buffer 33 bool rerror; // error returned by xram 34 34 35 35 ///////////////////////////////////////////////////////////////////// … … 39 39 { 40 40 valid = false; 41 rerror = false; 41 42 } 42 43 … … 50 51 wdata_be.reserve( (int)n_words ); 51 52 wdata.reserve( (int)n_words ); 52 for(size_t i=0; i<n_words; i++){ 53 wdata_be.push_back(false); 53 for(size_t i=0; i<n_words; i++) 54 { 55 wdata_be.push_back(0); 54 56 wdata.push_back(0); 55 57 } … … 352 354 tab[index].read_length = read_length; 353 355 tab[index].word_index = word_index; 354 for(size_t i=0; i<tab[index].wdata.size(); i++) { 356 for(size_t i=0; i<tab[index].wdata.size(); i++) 357 { 355 358 tab[index].wdata_be[i] = data_be[i]; 356 359 tab[index].wdata[i] = data[i]; … … 361 364 // The write_rsp() function writes a word of the response to an 362 365 // XRAM read transaction. 363 // The data is only written when the corresponding BE field is Ox0.366 // The BE field in TRT is taken into account. 364 367 // Arguments : 365 368 // - index : the index of the transaction in the transaction tab 366 369 // - word_index : the index of the data in the line 367 370 // - data : the data to write 371 // - error : invalid data 368 372 ///////////////////////////////////////////////////////////////////// 369 373 void write_rsp(const size_t index, … … 396 400 && "The selected entry is out of range in erase() Transaction Tab"); 397 401 tab[index].valid = false; 402 tab[index].rerror = false; 398 403 } 399 404 }; // end class TransactionTab -
trunk/modules/vci_mem_cache_v4/caba/source/src/vci_mem_cache_v4.cpp
r175 r184 1 /* -*- c++ -*-1 /* -*- c++ -*- 2 2 * File : vci_mem_cache_v4.cpp 3 3 * Date : 30/10/2008 … … 27 27 * Maintainers: alain eric.guthmuller@polytechnique.edu 28 28 */ 29 29 30 #include "../include/vci_mem_cache_v4.h" 30 31 31 #define DEBUG_VCI_MEM_CACHE 0 32 #define DEBUG_START_CYCLE 1013300 33 #define RANDOMIZE_SC 34 35 #define ASSERT_VERBOSE 36 #define ASSERT_NCYCLES m_cpt_cycles 37 38 #if DEBUG_VCI_MEM_CACHE 39 #define TDEBUG // Transaction tab debug 40 #define IDEBUG // Update tab debug 41 #define DDEBUG // Directory debug 42 #define LOCK_DEBUG // Lock debug 43 #endif 44 45 46 #include "debug.h" 47 48 #if DEBUG_VCI_MEM_CACHE 49 # define PRINTF(msg...) PRINTF_COND(m_cpt_cycles > DEBUG_START_CYCLE,msg) 50 #else 51 # define PRINTF(msg...) 52 #endif 32 ////// debug services /////////////////////////////////////////////////////// 33 // All debug messages are conditionned by two variables: 34 // - compile time : DEBUG_MEMC_*** : defined below 35 // - execution time : m_debug_*** : defined by constructor arguments 36 // m_debug_* = (m_debug_ok) and (m_cpt_cycle > m_debug_start_cycle) 37 ///////////////////////////////////////////////////////////////////////////////// 38 39 #define DEBUG_MEMC_GLOBAL 0 // synthetic trace of all FSMs 40 #define DEBUG_MEMC_READ 1 // detailed trace of READ FSM 41 #define DEBUG_MEMC_WRITE 1 // detailed trace of WRITE FSM 42 #define DEBUG_MEMC_SC 0 // detailed trace of SC FSM 43 #define DEBUG_MEMC_IXR_CMD 1 // detailed trace of IXR_RSP FSM 44 #define DEBUG_MEMC_IXR_RSP 1 // detailed trace of IXR_RSP FSM 45 #define DEBUG_MEMC_XRAM_RSP 1 // detailed trace of XRAM_RSP FSM 46 #define DEBUG_MEMC_INIT_CMD 0 // detailed trace of INIT_CMD FSM 47 #define DEBUG_MEMC_INIT_RSP 0 // detailed trace of INIT_RSP FSM 48 #define DEBUG_MEMC_TGT_CMD 0 // detailed trace of TGT_CMD FSM 49 #define DEBUG_MEMC_TGT_RSP 0 // detailed trace of TGT_RSP FSM 50 #define DEBUG_MEMC_CLEANUP 0 // detailed trace of CLEANUP FSM 51 52 #define RANDOMIZE_SC 1 53 53 54 54 namespace soclib { namespace caba { 55 55 56 56 const char *tgt_cmd_fsm_str[] = { 57 "TGT_CMD_IDLE ", 58 "TGT_CMD_READ ", 59 "TGT_CMD_READ_EOP", 60 "TGT_CMD_WRITE ", 61 "TGT_CMD_ATOMIC ", 57 "TGT_CMD_IDLE", 58 "TGT_CMD_READ", 59 "TGT_CMD_WRITE", 60 "TGT_CMD_ATOMIC", 62 61 }; 63 62 const char *tgt_rsp_fsm_str[] = { 64 "TGT_RSP_READ_IDLE 65 "TGT_RSP_WRITE_IDLE 66 "TGT_RSP_ LLSC_IDLE",67 "TGT_RSP_XRAM_IDLE 68 "TGT_RSP_INIT_IDLE 63 "TGT_RSP_READ_IDLE", 64 "TGT_RSP_WRITE_IDLE", 65 "TGT_RSP_SC_IDLE", 66 "TGT_RSP_XRAM_IDLE", 67 "TGT_RSP_INIT_IDLE", 69 68 "TGT_RSP_CLEANUP_IDLE", 70 "TGT_RSP_READ 71 "TGT_RSP_WRITE 72 "TGT_RSP_ LLSC",73 "TGT_RSP_XRAM 74 "TGT_RSP_INIT 75 "TGT_RSP_CLEANUP 69 "TGT_RSP_READ", 70 "TGT_RSP_WRITE", 71 "TGT_RSP_SC", 72 "TGT_RSP_XRAM", 73 "TGT_RSP_INIT", 74 "TGT_RSP_CLEANUP", 76 75 }; 77 76 const char *init_cmd_fsm_str[] = { 78 "INIT_CMD_INVAL_IDLE 79 "INIT_CMD_INVAL_NLINE 80 "INIT_CMD_XRAM_BRDCAST 81 "INIT_CMD_UPDT_IDLE 82 "INIT_CMD_WRITE_BRDCAST 83 "INIT_CMD_UPDT_NLINE 84 "INIT_CMD_UPDT_INDEX 85 "INIT_CMD_UPDT_DATA 86 "INIT_CMD_SC_UPDT_IDLE 87 "INIT_CMD_SC_BRDCAST 88 "INIT_CMD_SC_UPDT_NLINE 89 "INIT_CMD_SC_UPDT_INDEX 90 "INIT_CMD_SC_UPDT_DATA 77 "INIT_CMD_INVAL_IDLE", 78 "INIT_CMD_INVAL_NLINE", 79 "INIT_CMD_XRAM_BRDCAST", 80 "INIT_CMD_UPDT_IDLE", 81 "INIT_CMD_WRITE_BRDCAST", 82 "INIT_CMD_UPDT_NLINE", 83 "INIT_CMD_UPDT_INDEX", 84 "INIT_CMD_UPDT_DATA", 85 "INIT_CMD_SC_UPDT_IDLE", 86 "INIT_CMD_SC_BRDCAST", 87 "INIT_CMD_SC_UPDT_NLINE", 88 "INIT_CMD_SC_UPDT_INDEX", 89 "INIT_CMD_SC_UPDT_DATA", 91 90 "INIT_CMD_SC_UPDT_DATA_HIGH", 92 91 }; 93 92 const char *init_rsp_fsm_str[] = { 94 "INIT_RSP_IDLE 95 "INIT_RSP_UPT_LOCK 93 "INIT_RSP_IDLE", 94 "INIT_RSP_UPT_LOCK", 96 95 "INIT_RSP_UPT_CLEAR", 97 "INIT_RSP_END 96 "INIT_RSP_END", 98 97 }; 99 98 const char *read_fsm_str[] = { 100 "READ_IDLE 101 "READ_DIR_LOCK 102 "READ_DIR_HIT 103 "READ_HEAP_LOCK 99 "READ_IDLE", 100 "READ_DIR_LOCK", 101 "READ_DIR_HIT", 102 "READ_HEAP_LOCK", 104 103 "READ_HEAP_WRITE", 105 104 "READ_HEAP_ERASE", 106 "READ_HEAP_LAST 107 "READ_RSP 108 "READ_TRT_LOCK 109 "READ_TRT_SET 110 "READ_ XRAM_REQ",105 "READ_HEAP_LAST", 106 "READ_RSP", 107 "READ_TRT_LOCK", 108 "READ_TRT_SET", 109 "READ_TRT_REQ", 111 110 }; 112 111 const char *write_fsm_str[] = { 113 "WRITE_IDLE 114 "WRITE_NEXT 115 "WRITE_DIR_LOCK 116 "WRITE_DIR_HIT_READ 117 "WRITE_DIR_HIT 118 "WRITE_UPT_LOCK 119 "WRITE_HEAP_LOCK 120 "WRITE_UPT_REQ 121 "WRITE_UPDATE 122 "WRITE_UPT_DEC 123 "WRITE_RSP 124 "WRITE_TRT_LOCK 125 "WRITE_TRT_DATA 126 "WRITE_TRT_SET 127 "WRITE_WAIT 128 "WRITE_XRAM_REQ 112 "WRITE_IDLE", 113 "WRITE_NEXT", 114 "WRITE_DIR_LOCK", 115 "WRITE_DIR_HIT_READ", 116 "WRITE_DIR_HIT", 117 "WRITE_UPT_LOCK", 118 "WRITE_HEAP_LOCK", 119 "WRITE_UPT_REQ", 120 "WRITE_UPDATE", 121 "WRITE_UPT_DEC", 122 "WRITE_RSP", 123 "WRITE_TRT_LOCK", 124 "WRITE_TRT_DATA", 125 "WRITE_TRT_SET", 126 "WRITE_WAIT", 127 "WRITE_XRAM_REQ", 129 128 "WRITE_TRT_WRITE_LOCK", 130 "WRITE_INVAL_LOCK 131 "WRITE_DIR_INVAL 132 "WRITE_INVAL 133 "WRITE_XRAM_SEND 129 "WRITE_INVAL_LOCK", 130 "WRITE_DIR_INVAL", 131 "WRITE_INVAL", 132 "WRITE_XRAM_SEND", 134 133 }; 135 134 const char *ixr_rsp_fsm_str[] = { 136 "IXR_RSP_IDLE 137 "IXR_RSP_ACK 135 "IXR_RSP_IDLE", 136 "IXR_RSP_ACK", 138 137 "IXR_RSP_TRT_ERASE", 139 "IXR_RSP_TRT_READ 138 "IXR_RSP_TRT_READ", 140 139 }; 141 140 const char *xram_rsp_fsm_str[] = { 142 "XRAM_RSP_IDLE 143 "XRAM_RSP_TRT_COPY 144 "XRAM_RSP_TRT_DIRTY 145 "XRAM_RSP_DIR_LOCK 146 "XRAM_RSP_DIR_UPDT 147 "XRAM_RSP_DIR_RSP 148 "XRAM_RSP_INVAL_LOCK 149 "XRAM_RSP_INVAL_WAIT 150 "XRAM_RSP_INVAL 141 "XRAM_RSP_IDLE", 142 "XRAM_RSP_TRT_COPY", 143 "XRAM_RSP_TRT_DIRTY", 144 "XRAM_RSP_DIR_LOCK", 145 "XRAM_RSP_DIR_UPDT", 146 "XRAM_RSP_DIR_RSP", 147 "XRAM_RSP_INVAL_LOCK", 148 "XRAM_RSP_INVAL_WAIT", 149 "XRAM_RSP_INVAL", 151 150 "XRAM_RSP_WRITE_DIRTY", 152 "XRAM_RSP_HEAP_ERASE 153 "XRAM_RSP_HEAP_LAST 151 "XRAM_RSP_HEAP_ERASE", 152 "XRAM_RSP_HEAP_LAST", 154 153 "XRAM_RSP_ERROR_ERASE", 155 "XRAM_RSP_ERROR_RSP 154 "XRAM_RSP_ERROR_RSP", 156 155 }; 157 156 const char *ixr_cmd_fsm_str[] = { 158 "IXR_CMD_READ_IDLE 159 "IXR_CMD_WRITE_IDLE 160 "IXR_CMD_ LLSC_IDLE",161 "IXR_CMD_XRAM_IDLE 162 "IXR_CMD_READ_NLINE 163 "IXR_CMD_WRITE_NLINE 164 "IXR_CMD_ LLSC_NLINE",165 "IXR_CMD_XRAM_DATA 157 "IXR_CMD_READ_IDLE", 158 "IXR_CMD_WRITE_IDLE", 159 "IXR_CMD_SC_IDLE", 160 "IXR_CMD_XRAM_IDLE", 161 "IXR_CMD_READ_NLINE", 162 "IXR_CMD_WRITE_NLINE", 163 "IXR_CMD_SC_NLINE", 164 "IXR_CMD_XRAM_DATA", 166 165 }; 167 const char * llsc_fsm_str[] = {168 " LLSC_IDLE",169 "SC_DIR_LOCK 170 "SC_DIR_HIT_READ 166 const char *sc_fsm_str[] = { 167 "SC_IDLE", 168 "SC_DIR_LOCK", 169 "SC_DIR_HIT_READ", 171 170 "SC_DIR_HIT_WRITE", 172 "SC_UPT_LOCK 173 "SC_WAIT 174 "SC_HEAP_LOCK 175 "SC_UPT_REQ 176 "SC_UP DATE",177 "SC_TRT_ LOCK",178 "SC_INVAL_LOCK 179 "SC_DIR_INVAL 180 "SC_INVAL 181 "SC_ XRAM_SEND",182 "SC_RSP_FA LSE",183 "SC_RSP_ TRUE",184 " LLSC_TRT_LOCK",185 " LLSC_TRT_SET",186 " LLSC_XRAM_REQ",171 "SC_UPT_LOCK", 172 "SC_WAIT", 173 "SC_HEAP_LOCK", 174 "SC_UPT_REQ", 175 "SC_UPT_NEXT", 176 "SC_TRT_PUT_LOCK", 177 "SC_INVAL_LOCK", 178 "SC_DIR_INVAL", 179 "SC_INVAL", 180 "SC_TRT_PUT_REQ", 181 "SC_RSP_FAIL", 182 "SC_RSP_SUCCESS", 183 "SC_TRT_GET_LOCK", 184 "SC_TRT_GET_SET", 185 "SC_TRT_GET_REQ", 187 186 }; 188 187 const char *cleanup_fsm_str[] = { 189 "CLEANUP_IDLE 190 "CLEANUP_DIR_LOCK 191 "CLEANUP_DIR_WRITE 192 "CLEANUP_HEAP_LOCK 188 "CLEANUP_IDLE", 189 "CLEANUP_DIR_LOCK", 190 "CLEANUP_DIR_WRITE", 191 "CLEANUP_HEAP_LOCK", 193 192 "CLEANUP_HEAP_SEARCH", 194 "CLEANUP_HEAP_CLEAN 195 "CLEANUP_HEAP_FREE 196 "CLEANUP_UPT_LOCK 197 "CLEANUP_UPT_WRITE 198 "CLEANUP_WRITE_RSP 199 "CLEANUP_RSP 193 "CLEANUP_HEAP_CLEAN", 194 "CLEANUP_HEAP_FREE", 195 "CLEANUP_UPT_LOCK", 196 "CLEANUP_UPT_WRITE", 197 "CLEANUP_WRITE_RSP", 198 "CLEANUP_RSP", 200 199 }; 201 200 const char *alloc_dir_fsm_str[] = { 202 "ALLOC_DIR_READ 203 "ALLOC_DIR_WRITE 204 "ALLOC_DIR_ LLSC",205 "ALLOC_DIR_CLEANUP 201 "ALLOC_DIR_READ", 202 "ALLOC_DIR_WRITE", 203 "ALLOC_DIR_SC", 204 "ALLOC_DIR_CLEANUP", 206 205 "ALLOC_DIR_XRAM_RSP", 207 206 }; 208 207 const char *alloc_trt_fsm_str[] = { 209 "ALLOC_TRT_READ 210 "ALLOC_TRT_WRITE 211 "ALLOC_TRT_ LLSC",208 "ALLOC_TRT_READ", 209 "ALLOC_TRT_WRITE", 210 "ALLOC_TRT_SC", 212 211 "ALLOC_TRT_XRAM_RSP", 213 "ALLOC_TRT_IXR_RSP 212 "ALLOC_TRT_IXR_RSP", 214 213 }; 215 214 const char *alloc_upt_fsm_str[] = { 216 "ALLOC_UPT_WRITE 215 "ALLOC_UPT_WRITE", 217 216 "ALLOC_UPT_XRAM_RSP", 218 217 "ALLOC_UPT_INIT_RSP", 219 "ALLOC_UPT_CLEANUP 218 "ALLOC_UPT_CLEANUP", 220 219 }; 221 220 const char *alloc_heap_fsm_str[] = { 222 "ALLOC_HEAP_READ 223 "ALLOC_HEAP_WRITE 224 "ALLOC_HEAP_ LLSC",225 "ALLOC_HEAP_CLEANUP 221 "ALLOC_HEAP_READ", 222 "ALLOC_HEAP_WRITE", 223 "ALLOC_HEAP_SC", 224 "ALLOC_HEAP_CLEANUP", 226 225 "ALLOC_HEAP_XRAM_RSP", 227 226 }; … … 244 243 const soclib::common::IntTab &vci_tgt_index, 245 244 const soclib::common::IntTab &vci_tgt_index_cleanup, 246 size_t nways, 247 size_t nsets, 248 size_t nwords, 249 size_t heap_size, 250 size_t transaction_tab_lines, 251 size_t update_tab_lines) 245 size_t nways, // number of ways per set 246 size_t nsets, // number of cache sets 247 size_t nwords, // number of words in cache line 248 size_t heap_size, // number of heap entries 249 size_t transaction_tab_lines, // number of TRT entries 250 size_t update_tab_lines, // number of UPT entries 251 size_t debug_start_cycle, 252 bool debug_ok) 252 253 253 254 : soclib::caba::BaseModule(name), 255 256 m_debug_start_cycle( debug_start_cycle), 257 m_debug_ok ( debug_ok ), 254 258 255 259 p_clk("clk"), … … 275 279 m_update_tab( update_tab_lines ), 276 280 m_cache_directory( nways, nsets, nwords, vci_param::N ), 277 m_heap_directory( m_heap_size ), 281 m_heap( m_heap_size ), 282 278 283 #define L2 soclib::common::uint32_log2 279 284 m_x( L2(m_words), 2), … … 298 303 m_cmd_write_be_fifo("m_cmd_write_be_fifo",8), 299 304 300 m_cmd_ llsc_addr_fifo("m_cmd_llsc_addr_fifo",4),301 m_cmd_ llsc_eop_fifo("m_cmd_llsc_eop_fifo",4),302 m_cmd_ llsc_srcid_fifo("m_cmd_llsc_srcid_fifo",4),303 m_cmd_ llsc_trdid_fifo("m_cmd_llsc_trdid_fifo",4),304 m_cmd_ llsc_pktid_fifo("m_cmd_llsc_pktid_fifo",4),305 m_cmd_ llsc_wdata_fifo("m_cmd_llsc_wdata_fifo",4),305 m_cmd_sc_addr_fifo("m_cmd_sc_addr_fifo",4), 306 m_cmd_sc_eop_fifo("m_cmd_sc_eop_fifo",4), 307 m_cmd_sc_srcid_fifo("m_cmd_sc_srcid_fifo",4), 308 m_cmd_sc_trdid_fifo("m_cmd_sc_trdid_fifo",4), 309 m_cmd_sc_pktid_fifo("m_cmd_sc_pktid_fifo",4), 310 m_cmd_sc_wdata_fifo("m_cmd_sc_wdata_fifo",4), 306 311 307 312 r_tgt_cmd_fsm("r_tgt_cmd_fsm"), … … 319 324 r_init_rsp_fsm("r_init_rsp_fsm"), 320 325 r_cleanup_fsm("r_cleanup_fsm"), 321 r_ llsc_fsm("r_llsc_fsm"),322 m_ llsc_to_init_cmd_inst_fifo("m_llsc_to_init_cmd_inst_fifo",8),323 m_ llsc_to_init_cmd_srcid_fifo("m_llsc_to_init_cmd_srcid_fifo",8),326 r_sc_fsm("r_sc_fsm"), 327 m_sc_to_init_cmd_inst_fifo("m_sc_to_init_cmd_inst_fifo",8), 328 m_sc_to_init_cmd_srcid_fifo("m_sc_to_init_cmd_srcid_fifo",8), 324 329 #if L1_MULTI_CACHE 325 m_ llsc_to_init_cmd_cache_id_fifo("m_llsc_to_init_cmd_cache_id_fifo",8),330 m_sc_to_init_cmd_cache_id_fifo("m_sc_to_init_cmd_cache_id_fifo",8), 326 331 #endif 327 332 r_ixr_rsp_fsm("r_ixr_rsp_fsm"), … … 346 351 assert(nways); 347 352 353 // check Transaction table size 354 assert( (uint32_log2(transaction_tab_lines) <= vci_param::T) and 355 "Need more bits for VCI TRDID field"); 356 348 357 // Set the broadcast address with Xmin,Xmax,Ymin,Ymax set to maximum 349 358 m_broadcast_address = 0x3 | (0x7C1F << (vci_param::N-20)); … … 405 414 r_write_to_ixr_cmd_data = new sc_signal<data_t>[nwords]; 406 415 407 // Allocation for LLSC FSM408 r_ llsc_to_ixr_cmd_data= new sc_signal<data_t>[nwords];409 r_ llsc_rdata= new sc_signal<data_t>[2];416 // Allocation for SC FSM 417 r_sc_to_ixr_cmd_data = new sc_signal<data_t>[nwords]; 418 r_sc_rdata = new sc_signal<data_t>[2]; 410 419 411 420 … … 422 431 } // end constructor 423 432 424 ////////////////////////////////////////////////// 425 // This function prints a trace of internal states 426 ////////////////////////////////////////////////// 427 428 tmpl(void)::print_trace() 429 { 430 std::cout << "MEM_CACHE " << name() << std::endl; 431 std::cout << " / " << tgt_cmd_fsm_str[r_tgt_cmd_fsm] 432 << " / " << read_fsm_str[r_read_fsm] 433 << " / " << write_fsm_str[r_write_fsm] 434 << " / " << tgt_rsp_fsm_str[r_tgt_rsp_fsm] 435 << " / " << init_cmd_fsm_str[r_init_cmd_fsm] 436 << " / " << init_rsp_fsm_str[r_init_rsp_fsm] << std::endl; 437 } 438 439 ///////////////////////////////////////// 440 // This function prints the statistics 441 ///////////////////////////////////////// 442 443 tmpl(void)::print_stats() 444 { 433 ///////////////////////////////////////////////////// 434 tmpl(void)::cache_monitor( vci_addr_t addr ) 435 ///////////////////////////////////////////////////// 436 { 437 size_t way = 0; 438 DirectoryEntry entry = m_cache_directory.read(addr, way); 439 if ( (entry.count != m_debug_previous_count) or 440 (entry.valid != m_debug_previous_hit) ) 441 { 442 std::cout << " MEMC " << name() 443 << " cache change at cycle " << std::dec << m_cpt_cycles 444 << " for address " << std::hex << addr 445 << " / HIT = " << entry.valid 446 << " / COUNT = " << std::dec << entry.count << std::endl; 447 } 448 m_debug_previous_count = entry.count; 449 m_debug_previous_hit = entry.valid; 450 } 451 452 ////////////////////////////////////////////////// 453 tmpl(void)::print_trace() 454 ////////////////////////////////////////////////// 455 { 456 std::cout << "MEMC " << name() << std::endl; 457 std::cout << " " << tgt_cmd_fsm_str[r_tgt_cmd_fsm] 458 << " | " << tgt_rsp_fsm_str[r_tgt_rsp_fsm] 459 << " | " << read_fsm_str[r_read_fsm] 460 << " | " << write_fsm_str[r_write_fsm] 461 << " | " << sc_fsm_str[r_sc_fsm] 462 << " | " << cleanup_fsm_str[r_cleanup_fsm] << std::endl; 463 std::cout << " " << init_cmd_fsm_str[r_init_cmd_fsm] 464 << " | " << init_rsp_fsm_str[r_init_rsp_fsm] 465 << " | " << ixr_cmd_fsm_str[r_ixr_cmd_fsm] 466 << " | " << ixr_rsp_fsm_str[r_ixr_rsp_fsm] 467 << " | " << xram_rsp_fsm_str[r_xram_rsp_fsm] << std::endl; 468 } 469 470 ///////////////////////////////////////// 471 tmpl(void)::print_stats() 472 ///////////////////////////////////////// 473 { 445 474 std::cout << "----------------------------------" << std::dec << std::endl; 446 475 std::cout << "MEM_CACHE " << m_srcid_ini << " / Time = " << m_cpt_cycles << std::endl … … 464 493 << "- LL RATE = " << (double)m_cpt_ll/m_cpt_cycles << std::endl 465 494 << "- SC RATE = " << (double)m_cpt_sc/m_cpt_cycles << std::endl; 466 495 } 467 496 468 497 ///////////////////////////////// … … 495 524 } 496 525 497 498 499 500 526 ////////////////////////////////// 527 tmpl(void)::transition() 528 ////////////////////////////////// 529 { 501 530 using soclib::common::uint32_log2; 531 502 532 // RESET 503 533 if ( ! p_resetn.read() ) { … … 508 538 r_init_cmd_fsm = INIT_CMD_INVAL_IDLE; 509 539 r_init_rsp_fsm = INIT_RSP_IDLE; 510 r_read_fsm = READ_IDLE;511 r_write_fsm = WRITE_IDLE;512 r_ llsc_fsm = LLSC_IDLE;540 r_read_fsm = READ_IDLE; 541 r_write_fsm = WRITE_IDLE; 542 r_sc_fsm = SC_IDLE; 513 543 r_cleanup_fsm = CLEANUP_IDLE; 514 544 r_alloc_dir_fsm = ALLOC_DIR_READ; … … 519 549 r_ixr_cmd_fsm = IXR_CMD_READ_IDLE; 520 550 551 m_debug_global = false; 552 m_debug_tgt_cmd_fsm = false; 553 m_debug_tgt_rsp_fsm = false; 554 m_debug_init_cmd_fsm = false; 555 m_debug_init_rsp_fsm = false; 556 m_debug_read_fsm = false; 557 m_debug_write_fsm = false; 558 m_debug_sc_fsm = false; 559 m_debug_cleanup_fsm = false; 560 m_debug_ixr_cmd_fsm = false; 561 m_debug_ixr_rsp_fsm = false; 562 m_debug_xram_rsp_fsm = false; 563 m_debug_previous_hit = false; 564 m_debug_previous_count = 0; 565 521 566 // Initializing Tables 522 567 m_cache_directory.init(); 523 568 m_transaction_tab.init(); 524 m_heap _directory.init();569 m_heap.init(); 525 570 526 571 // initializing FIFOs and communication Buffers … … 539 584 m_cmd_write_data_fifo.init(); 540 585 541 m_cmd_ llsc_addr_fifo.init();542 m_cmd_ llsc_srcid_fifo.init();543 m_cmd_ llsc_trdid_fifo.init();544 m_cmd_ llsc_pktid_fifo.init();545 m_cmd_ llsc_wdata_fifo.init();546 m_cmd_ llsc_eop_fifo.init();547 548 r_read_to_tgt_rsp_req = false;549 r_read_to_ixr_cmd_req = false;586 m_cmd_sc_addr_fifo.init(); 587 m_cmd_sc_srcid_fifo.init(); 588 m_cmd_sc_trdid_fifo.init(); 589 m_cmd_sc_pktid_fifo.init(); 590 m_cmd_sc_wdata_fifo.init(); 591 m_cmd_sc_eop_fifo.init(); 592 593 r_read_to_tgt_rsp_req = false; 594 r_read_to_ixr_cmd_req = false; 550 595 551 596 r_write_to_tgt_rsp_req = false; … … 560 605 #endif 561 606 562 r_cleanup_to_tgt_rsp_req = false;563 564 r_init_rsp_to_tgt_rsp_req = false;565 566 r_ llsc_to_tgt_rsp_req = false;567 r_ llsc_cpt= 0;568 r_ llsc_lfsr= -1;569 r_ llsc_to_ixr_cmd_req = false;570 r_ llsc_to_init_cmd_multi_req = false;571 r_ llsc_to_init_cmd_brdcast_req= false;572 m_ llsc_to_init_cmd_inst_fifo.init();573 m_ llsc_to_init_cmd_srcid_fifo.init();607 r_cleanup_to_tgt_rsp_req = false; 608 609 r_init_rsp_to_tgt_rsp_req = false; 610 611 r_sc_to_tgt_rsp_req = false; 612 r_sc_cpt = 0; 613 r_sc_lfsr = -1; 614 r_sc_to_ixr_cmd_req = false; 615 r_sc_to_init_cmd_multi_req = false; 616 r_sc_to_init_cmd_brdcast_req = false; 617 m_sc_to_init_cmd_inst_fifo.init(); 618 m_sc_to_init_cmd_srcid_fifo.init(); 574 619 #if L1_MULTI_CACHE 575 m_ llsc_to_init_cmd_cache_id_fifo.init();620 m_sc_to_init_cmd_cache_id_fifo.init(); 576 621 #endif 577 622 … … 611 656 m_cpt_ll = 0; 612 657 m_cpt_sc = 0; 613 m_cpt_trt_full = 0;614 m_cpt_trt_rb = 0;658 m_cpt_trt_full = 0; 659 m_cpt_trt_rb = 0; 615 660 616 661 return; … … 623 668 bool cmd_write_fifo_get = false; 624 669 625 bool cmd_ llsc_fifo_put = false;626 bool cmd_ llsc_fifo_get = false;670 bool cmd_sc_fifo_put = false; 671 bool cmd_sc_fifo_get = false; 627 672 628 673 bool write_to_init_cmd_fifo_put = false; … … 630 675 bool write_to_init_cmd_fifo_inst = false; 631 676 size_t write_to_init_cmd_fifo_srcid = 0; 677 632 678 #if L1_MULTI_CACHE 633 679 size_t write_to_init_cmd_fifo_cache_id = 0; … … 638 684 bool xram_rsp_to_init_cmd_fifo_inst = false; 639 685 size_t xram_rsp_to_init_cmd_fifo_srcid = 0; 686 640 687 #if L1_MULTI_CACHE 641 688 size_t xram_rsp_to_init_cmd_fifo_cache_id = 0; 642 689 #endif 643 690 644 bool llsc_to_init_cmd_fifo_put = false; 645 bool llsc_to_init_cmd_fifo_get = false; 646 bool llsc_to_init_cmd_fifo_inst = false; 647 size_t llsc_to_init_cmd_fifo_srcid = 0; 691 bool sc_to_init_cmd_fifo_put = false; 692 bool sc_to_init_cmd_fifo_get = false; 693 bool sc_to_init_cmd_fifo_inst = false; 694 size_t sc_to_init_cmd_fifo_srcid = 0; 695 648 696 #if L1_MULTI_CACHE 649 size_t llsc_to_init_cmd_fifo_cache_id = 0; 650 #endif 651 652 #if DEBUG_VCI_MEM_CACHE 653 if(m_cpt_cycles > DEBUG_START_CYCLE){ 697 size_t sc_to_init_cmd_fifo_cache_id = 0; 698 #endif 699 700 m_debug_global = (m_cpt_cycles > m_debug_start_cycle) and m_debug_ok; 701 m_debug_tgt_cmd_fsm = (m_cpt_cycles > m_debug_start_cycle) and m_debug_ok; 702 m_debug_tgt_rsp_fsm = (m_cpt_cycles > m_debug_start_cycle) and m_debug_ok; 703 m_debug_init_cmd_fsm = (m_cpt_cycles > m_debug_start_cycle) and m_debug_ok; 704 m_debug_init_rsp_fsm = (m_cpt_cycles > m_debug_start_cycle) and m_debug_ok; 705 m_debug_read_fsm = (m_cpt_cycles > m_debug_start_cycle) and m_debug_ok; 706 m_debug_write_fsm = (m_cpt_cycles > m_debug_start_cycle) and m_debug_ok; 707 m_debug_sc_fsm = (m_cpt_cycles > m_debug_start_cycle) and m_debug_ok; 708 m_debug_cleanup_fsm = (m_cpt_cycles > m_debug_start_cycle) and m_debug_ok; 709 m_debug_ixr_cmd_fsm = (m_cpt_cycles > m_debug_start_cycle) and m_debug_ok; 710 m_debug_ixr_rsp_fsm = (m_cpt_cycles > m_debug_start_cycle) and m_debug_ok; 711 m_debug_xram_rsp_fsm = (m_cpt_cycles > m_debug_start_cycle) and m_debug_ok; 712 713 714 #if DEBUG_MEMC_GLOBAL 715 if( m_debug_global ) 716 { 654 717 std::cout << "---------------------------------------------" << std::dec << std::endl; 655 718 std::cout << "MEM_CACHE " << m_srcid_ini << " ; Time = " << m_cpt_cycles << std::endl … … 660 723 << " - READ FSM = " << read_fsm_str[r_read_fsm] << std::endl 661 724 << " - WRITE FSM = " << write_fsm_str[r_write_fsm] << std::endl 662 << " - LLSC FSM = " << llsc_fsm_str[r_llsc_fsm] << std::endl725 << " - SC FSM = " << sc_fsm_str[r_sc_fsm] << std::endl 663 726 << " - CLEANUP FSM = " << cleanup_fsm_str[r_cleanup_fsm] << std::endl 664 727 << " - IXR_CMD FSM = " << ixr_cmd_fsm_str[r_ixr_cmd_fsm] << std::endl … … 672 735 #endif 673 736 674 #ifdef IDEBUG675 if(m_cpt_cycles > DEBUG_START_CYCLE){676 std::cout << sc_time_stamp() << " " << name() << " INIT_RSP_UPT_LOCK update table : " << std::endl;677 m_update_tab.print();678 }679 #endif680 681 737 //////////////////////////////////////////////////////////////////////////////////// 682 738 // TGT_CMD FSM … … 684 740 // The TGT_CMD_FSM controls the incoming VCI command pakets from the processors 685 741 // 686 // There is 4 types of packets for the m_mem_segment:742 // There is 3 types of accepted commands : 687 743 // - READ : a READ request has a length of 1 VCI cell. It can be a single word 688 744 // or an entire cache line, depending on the PLEN value. 689 745 // - WRITE : a WRITE request has a maximum length of 16 cells, and can only 690 746 // concern words in a same line. 691 // - LL : The LL request has a length of 1 cell. 692 // - SC : The SC request has a length of 1 cell. 693 // The WDATA field contains the data to write. 694 // 747 // - SC : The SC request has a length of 2 cells or 4 cells. 695 748 //////////////////////////////////////////////////////////////////////////////////// 696 749 697 switch ( r_tgt_cmd_fsm.read() ) { 698 699 ////////////////// 700 case TGT_CMD_IDLE: 701 { 702 if ( p_vci_tgt.cmdval ) { 703 704 PRINTF(" * <MEM_CACHE.TGT> Request from %d.%d (%d) at address %llx\n",(uint32_t)p_vci_tgt.srcid.read(),(uint32_t)p_vci_tgt.pktid.read(),(uint32_t)p_vci_tgt.trdid.read(),(uint64_t)p_vci_tgt.address.read()); 705 706 if ( p_vci_tgt.cmd.read() == vci_param::CMD_READ ) 707 { 708 r_tgt_cmd_fsm = TGT_CMD_READ; 750 switch ( r_tgt_cmd_fsm.read() ) 751 { 752 ////////////////// 753 case TGT_CMD_IDLE: 754 { 755 if ( p_vci_tgt.cmdval ) 756 { 757 758 #if DEBUG_MEMC_TGT_CMD 759 if( m_debug_tgt_cmd_fsm ) 760 { 761 std::cout << " <MEMC.TGT_CMD_IDLE> Receive command from srcid " << p_vci_tgt.srcid.read() 762 << " / for address " << p_vci_tgt.address.read() << std::endl; 763 } 764 #endif 765 if ( p_vci_tgt.cmd.read() == vci_param::CMD_READ ) 766 { 767 r_tgt_cmd_fsm = TGT_CMD_READ; 768 } 769 else if ( p_vci_tgt.cmd.read() == vci_param::CMD_WRITE ) 770 { 771 r_tgt_cmd_fsm = TGT_CMD_WRITE; 772 } 773 else if ( p_vci_tgt.cmd.read() == vci_param::CMD_STORE_COND ) 774 { 775 r_tgt_cmd_fsm = TGT_CMD_ATOMIC; 776 } 777 else 778 { 779 std::cout << "VCI_MEM_CACHE ERROR " << name() 780 << " TGT_CMD_IDLE state" << std::endl; 781 std::cout << " illegal VCI command type" << std::endl; 782 exit(0); 783 } 784 } 785 break; 786 } 787 ////////////////// 788 case TGT_CMD_READ: 789 { 790 if ((m_x[(vci_addr_t)p_vci_tgt.address.read()]+(p_vci_tgt.plen.read()>>2)) > 16) 791 { 792 std::cout << "VCI_MEM_CACHE ERROR " << name() << " TGT_CMD_READ state" << std::endl; 793 std::cout << " illegal address/plen combination for VCI read command" << std::endl; 794 exit(0); 795 } 796 if ( !p_vci_tgt.eop.read() ) 797 { 798 std::cout << "VCI_MEM_CACHE ERROR " << name() << " TGT_CMD_READ state" << std::endl; 799 std::cout << " read command packets must contain one single flit" << std::endl; 800 exit(0); 801 } 802 803 if ( p_vci_tgt.cmdval && m_cmd_read_addr_fifo.wok() ) 804 { 805 806 #if DEBUG_MEMC_TGT_CMD 807 if( m_debug_tgt_cmd_fsm ) 808 { 809 std::cout << " <MEMC.TGT_CMD_READ> Push into read_fifo:" 810 << " address = " << std::hex << p_vci_tgt.address.read() 811 << " srcid = " << p_vci_tgt.srcid.read() 812 << " trdid = " << p_vci_tgt.trdid.read() 813 << " plen = " << p_vci_tgt.plen.read() << std::endl; 814 } 815 #endif 816 cmd_read_fifo_put = true; 817 m_cpt_read++; 818 r_tgt_cmd_fsm = TGT_CMD_IDLE; 709 819 } 710 // else if (( p_vci_tgt.cmd.read() == vci_param::CMD_WRITE ) && ( p_vci_tgt.trdid.read() == 0x0 )) 711 else if ( p_vci_tgt.cmd.read() == vci_param::CMD_WRITE ) 712 { 713 r_tgt_cmd_fsm = TGT_CMD_WRITE; 714 } 715 else if ( p_vci_tgt.cmd.read() == vci_param::CMD_STORE_COND ) 716 { 717 r_tgt_cmd_fsm = TGT_CMD_ATOMIC; 718 } else { 719 std::cout << "MemCache error : wrong command " << std::endl; 820 break; 821 } 822 /////////////////// 823 case TGT_CMD_WRITE: 824 { 825 if ( p_vci_tgt.cmdval && m_cmd_write_addr_fifo.wok() ) 826 { 827 828 #if DEBUG_MEMC_TGT_CMD 829 if( m_debug_tgt_cmd_fsm ) 830 { 831 std::cout << " <MEMC.TGT_CMD_WRITE> Push into write_fifo:" 832 << " address = " << std::hex << p_vci_tgt.address.read() 833 << " srcid = " << p_vci_tgt.srcid.read() 834 << " trdid = " << p_vci_tgt.trdid.read() 835 << " wdata = " << p_vci_tgt.wdata.read() 836 << " be = " << p_vci_tgt.be.read() 837 << " plen = " << p_vci_tgt.plen.read() << std::endl; 838 } 839 #endif 840 cmd_write_fifo_put = true; 841 if( p_vci_tgt.eop ) r_tgt_cmd_fsm = TGT_CMD_IDLE; 842 } 843 break; 844 } 845 //////////////////// 846 case TGT_CMD_ATOMIC: 847 { 848 if ( (p_vci_tgt.plen.read() != 8) && (p_vci_tgt.plen.read() != 16) ) 849 { 850 std::cout << "VCI_MEM_CACHE ERROR " << name() << " TGT_CMD_ATOMIC state" << std::endl; 851 std::cout << "illegal format for sc command " << std::endl; 720 852 exit(0); 721 853 } 722 } 723 break; 724 } 725 ////////////////// 726 case TGT_CMD_READ: 727 728 { 729 ASSERT(((m_x[(vci_addr_t)p_vci_tgt.address.read()]+(p_vci_tgt.plen.read()>>2))<=16), 730 "VCI_MEM_CACHE All read request to the MemCache must stay within a cache line"); 731 732 if ( p_vci_tgt.cmdval && m_cmd_read_addr_fifo.wok() ) { 733 cmd_read_fifo_put = true; 734 if ( p_vci_tgt.eop ) { 735 m_cpt_read++; 736 r_tgt_cmd_fsm = TGT_CMD_IDLE; 737 } else r_tgt_cmd_fsm = TGT_CMD_READ_EOP; 738 } 739 break; 740 } 741 ////////////////////// 742 case TGT_CMD_READ_EOP: 743 { 744 if ( p_vci_tgt.cmdval && p_vci_tgt.eop ){ 745 m_cpt_read++; 746 r_tgt_cmd_fsm = TGT_CMD_IDLE; 747 } 748 break; 749 } 750 /////////////////// 751 case TGT_CMD_WRITE: 752 { 753 754 if ( p_vci_tgt.cmdval && m_cmd_write_addr_fifo.wok() ) { 755 cmd_write_fifo_put = true; 756 if( p_vci_tgt.eop ) r_tgt_cmd_fsm = TGT_CMD_IDLE; 757 758 } 759 break; 760 } 761 //////////////////// 762 case TGT_CMD_ATOMIC: 763 { 764 if ( p_vci_tgt.cmdval && m_cmd_llsc_addr_fifo.wok() ) { 765 cmd_llsc_fifo_put = true; 766 if( p_vci_tgt.eop ) r_tgt_cmd_fsm = TGT_CMD_IDLE; 767 } 768 break; 854 855 if ( p_vci_tgt.cmdval && m_cmd_sc_addr_fifo.wok() ) 856 { 857 858 #if DEBUG_MEMC_TGT_CMD 859 if( m_debug_tgt_cmd_fsm ) 860 { 861 std::cout << " <MEMC.TGT_CMD_ATOMIC> Pushing command into cmd_sc_fifo:" 862 << " address = " << std::hex << p_vci_tgt.address.read() 863 << " srcid = " << p_vci_tgt.srcid.read() 864 << " trdid = " << p_vci_tgt.trdid.read() 865 << " wdata = " << p_vci_tgt.wdata.read() 866 << " be = " << p_vci_tgt.be.read() 867 << " plen = " << p_vci_tgt.plen.read() << std::endl; 868 } 869 #endif 870 cmd_sc_fifo_put = true; 871 if( p_vci_tgt.eop ) r_tgt_cmd_fsm = TGT_CMD_IDLE; 872 } 873 break; 769 874 } 770 875 } // end switch tgt_cmd_fsm … … 773 878 // INIT_RSP FSM 774 879 ///////////////////////////////////////////////////////////////////////// 775 // This FSM controls the response to the update or inval idate requests776 // sent by the memory cache to the L1 caches :880 // This FSM controls the response to the update or inval coherence 881 // requests sent by the memory cache to the L1 caches : 777 882 // 778 // - update request initiated by the WRITE FSM. 779 // The FSM decrements the proper entry in the Update/Inval Table. 780 // It sends a request to the TGT_RSP FSM to complete the pending 781 // write transaction (acknowledge response to the writer processor), 782 // and clear the UPT entry when all responses have been received. 783 // - invalidate request initiated by the XRAM_RSP FSM. 784 // The FSM decrements the proper entry in the Update/Inval_Table, 785 // and clear the entry when all responses have been received. 883 // It can be update or inval requests initiated by the WRITE FSM, 884 // or inval requests initiated by the XRAM_RSP FSM. 885 // The FSM decrements the proper entry in UPT. 886 // It sends a request to the TGT_RSP FSM to complete the pending 887 // write transaction (acknowledge response to the writer processor), 888 // and clear the UPT entry when all responses have been received. 786 889 // 787 890 // All those response packets are one word, compact 788 891 // packets complying with the VCI advanced format. 789 892 // The index in the Table is defined in the RTRDID field, and 790 // the Transaction type is defined in the Update/Inval Table.893 // the transaction type is defined in the UPT entry. 791 894 ///////////////////////////////////////////////////////////////////// 792 895 793 switch ( r_init_rsp_fsm.read() ) { 794 795 /////////////////// 796 case INIT_RSP_IDLE: 797 { 798 799 if ( p_vci_ini.rspval ) { 800 PRINTF(" * <MEM_CACHE.INIT_RSP> rsp val - trdid %d\n",(uint32_t)p_vci_ini.rtrdid.read()); 801 802 ASSERT (( p_vci_ini.rtrdid.read() < m_update_tab.size()) 803 ,"VCI_MEM_CACHE UPT index too large in VCI response paquet received by memory cache" ); 804 ASSERT (p_vci_ini.reop 805 ,"VCI_MEM_CACHE All response packets to update/invalidate requests must be one cell") ; 806 r_init_rsp_upt_index = p_vci_ini.rtrdid.read(); 807 r_init_rsp_fsm = INIT_RSP_UPT_LOCK; 808 } else if( r_write_to_init_rsp_req.read() ){ 809 r_init_rsp_upt_index = r_write_to_init_rsp_upt_index.read(); 810 r_write_to_init_rsp_req = false; 811 r_init_rsp_fsm = INIT_RSP_UPT_LOCK; 812 } 813 break; 896 switch ( r_init_rsp_fsm.read() ) 897 { 898 /////////////////// 899 case INIT_RSP_IDLE: // wait a response for a coherence transaction 900 { 901 if ( p_vci_ini.rspval ) 902 { 903 904 #if DEBUG_MEMC_INIT_RSP 905 if( m_debug_init_rsp_fsm ) 906 { 907 std::cout << " <MEMC.INIT_RSP_IDLE> Response for UPT entry " 908 << p_vci_ini.rtrdid.read() << std::endl; 909 } 910 #endif 911 if ( p_vci_ini.rtrdid.read() >= m_update_tab.size() ) 912 { 913 std::cout << "VCI_MEM_CACHE ERROR " << name() 914 << " INIT_RSP_IDLE state" << std::endl; 915 std::cout << "index too large for UPT: " 916 << " / rtrdid = " << p_vci_ini.rtrdid.read() 917 << " / UPT size = " << m_update_tab.size() << std::endl; 918 exit(0); 919 } 920 if ( !p_vci_ini.reop.read() ) 921 { 922 std::cout << "VCI_MEM_CACHE ERROR " << name() 923 << " INIT_RSP_IDLE state" << std::endl; 924 std::cout << "all coherence response packets must be one flit" << std::endl; 925 exit(0); 926 } 927 928 r_init_rsp_upt_index = p_vci_ini.rtrdid.read(); 929 r_init_rsp_fsm = INIT_RSP_UPT_LOCK; 930 } 931 else if( r_write_to_init_rsp_req.read() ) 932 { 933 r_init_rsp_upt_index = r_write_to_init_rsp_upt_index.read(); 934 r_write_to_init_rsp_req = false; 935 r_init_rsp_fsm = INIT_RSP_UPT_LOCK; 936 } 937 break; 814 938 } 815 939 /////////////////////// 816 case INIT_RSP_UPT_LOCK: // decrement the number of expected responses 817 { 818 819 if ( r_alloc_upt_fsm.read() == ALLOC_UPT_INIT_RSP ) { 820 size_t count = 0; 821 bool valid = m_update_tab.decrement(r_init_rsp_upt_index.read(), count); 822 #ifdef IDEBUG 823 if(m_cpt_cycles > DEBUG_START_CYCLE){ 824 std::cout << sc_time_stamp() << " " << name() << " INIT_RSP_UPT_LOCK update table : " << std::endl; 825 m_update_tab.print(); 826 } 827 #endif 828 while(!valid); 829 ASSERT ( valid 830 ,"VCI_MEM_CACHE Invalid UPT entry in VCI response paquet received by memory cache" ); 831 832 if ( count == 0 ) r_init_rsp_fsm = INIT_RSP_UPT_CLEAR; 833 else r_init_rsp_fsm = INIT_RSP_IDLE; 834 } 835 break; 940 case INIT_RSP_UPT_LOCK: // decrement the number of expected responses 941 { 942 if ( r_alloc_upt_fsm.read() == ALLOC_UPT_INIT_RSP ) 943 { 944 size_t count = 0; 945 bool valid = m_update_tab.decrement(r_init_rsp_upt_index.read(), count); 946 947 #if DEBUG_MEMC_INIT_RSP 948 if( m_debug_init_rsp_fsm ) 949 { 950 std::cout << " <MEMC.INIT_RSP_UPT_LOCK> Decrement the responses counter for UPT:" 951 << " entry = " << r_init_rsp_upt_index.read() 952 << " / rsp_count = " << std::dec << count << std::endl; 953 } 954 #endif 955 if ( not valid ) 956 { 957 std::cout << "VCI_MEM_CACHE ERROR " << name() << " INIT_RSP_UPT_LOCK state" << std::endl; 958 std::cout << "unsuccessful access to decrement the UPT" << std::endl; 959 exit(0); 960 } 961 962 if ( count == 0 ) r_init_rsp_fsm = INIT_RSP_UPT_CLEAR; 963 else r_init_rsp_fsm = INIT_RSP_IDLE; 964 } 965 break; 836 966 } 837 967 //////////////////////// 838 case INIT_RSP_UPT_CLEAR: // clear the UPT entry 839 { 840 if ( r_alloc_upt_fsm.read() == ALLOC_UPT_INIT_RSP ) { 841 r_init_rsp_srcid = m_update_tab.srcid(r_init_rsp_upt_index.read()); 842 r_init_rsp_trdid = m_update_tab.trdid(r_init_rsp_upt_index.read()); 843 r_init_rsp_pktid = m_update_tab.pktid(r_init_rsp_upt_index.read()); 844 r_init_rsp_nline = m_update_tab.nline(r_init_rsp_upt_index.read()); 845 bool need_rsp = m_update_tab.need_rsp(r_init_rsp_upt_index.read()); 846 if ( need_rsp ) r_init_rsp_fsm = INIT_RSP_END; 847 else r_init_rsp_fsm = INIT_RSP_IDLE; 848 m_update_tab.clear(r_init_rsp_upt_index.read()); 849 #ifdef IDEBUG 850 if(m_cpt_cycles > DEBUG_START_CYCLE){ 851 std::cout << sc_time_stamp() << " " << name() << " INIT_RSP_UPT_CLEAR update table : " << std::endl; 852 m_update_tab.print(); 853 } 854 #endif 855 } 856 break; 968 case INIT_RSP_UPT_CLEAR: // clear the UPT entry 969 { 970 if ( r_alloc_upt_fsm.read() == ALLOC_UPT_INIT_RSP ) 971 { 972 r_init_rsp_srcid = m_update_tab.srcid(r_init_rsp_upt_index.read()); 973 r_init_rsp_trdid = m_update_tab.trdid(r_init_rsp_upt_index.read()); 974 r_init_rsp_pktid = m_update_tab.pktid(r_init_rsp_upt_index.read()); 975 r_init_rsp_nline = m_update_tab.nline(r_init_rsp_upt_index.read()); 976 bool need_rsp = m_update_tab.need_rsp(r_init_rsp_upt_index.read()); 977 978 if ( need_rsp ) r_init_rsp_fsm = INIT_RSP_END; 979 else r_init_rsp_fsm = INIT_RSP_IDLE; 980 981 m_update_tab.clear(r_init_rsp_upt_index.read()); 982 983 #if DEBUG_MEMC_INIT_RSP 984 if ( m_debug_init_rsp_fsm ) 985 { 986 std::cout << " <MEMC.INIT_RSP_UPT_CLEAR> Clear UPT entry " 987 << r_init_rsp_upt_index.read() << std::endl; 988 } 989 #endif 990 } 991 break; 857 992 } 858 993 ////////////////// 859 case INIT_RSP_END: 860 { 861 862 if ( !r_init_rsp_to_tgt_rsp_req ) { 863 r_init_rsp_to_tgt_rsp_req = true; 864 r_init_rsp_to_tgt_rsp_srcid = r_init_rsp_srcid.read(); 865 r_init_rsp_to_tgt_rsp_trdid = r_init_rsp_trdid.read(); 866 r_init_rsp_to_tgt_rsp_pktid = r_init_rsp_pktid.read(); 867 r_init_rsp_fsm = INIT_RSP_IDLE; 868 } 869 break; 994 case INIT_RSP_END: // Post a request to TGT_RSP FSM 995 { 996 if ( !r_init_rsp_to_tgt_rsp_req ) 997 { 998 r_init_rsp_to_tgt_rsp_req = true; 999 r_init_rsp_to_tgt_rsp_srcid = r_init_rsp_srcid.read(); 1000 r_init_rsp_to_tgt_rsp_trdid = r_init_rsp_trdid.read(); 1001 r_init_rsp_to_tgt_rsp_pktid = r_init_rsp_pktid.read(); 1002 r_init_rsp_fsm = INIT_RSP_IDLE; 1003 1004 #if DEBUG_MEMC_INIT_RSP 1005 if ( m_debug_init_rsp_fsm ) 1006 { 1007 std::cout << " <MEMC.INIT_RSP_END> Request TGT_RSP FSM to send a response to srcid " 1008 << r_init_rsp_srcid.read() << std::endl; 1009 } 1010 #endif 1011 } 1012 break; 870 1013 } 871 1014 } // end switch r_init_rsp_fsm … … 874 1017 // READ FSM 875 1018 //////////////////////////////////////////////////////////////////////////////////// 876 // The READ FSM controls the read requests sent by processors.1019 // The READ FSM controls the VCI read requests. 877 1020 // It takes the lock protecting the cache directory to check the cache line status: 878 // - In case of HIT, the fsm copies the data (one line, or one single word) 1021 // - In case of HIT 1022 // The fsm copies the data (one line, or one single word) 879 1023 // in the r_read_to_tgt_rsp buffer. It waits if this buffer is not empty. 880 1024 // The requesting initiator is registered in the cache directory. 881 // - In case of MISS, the READ fsm takes the lock protecting the transaction tab. 1025 // If the number of copy is larger than 1, the new copy is registered 1026 // in the HEAP. 1027 // If the number of copy is larger than the threshold, the HEAP is cleared, 1028 // and the corresponding line switches to the counter mode. 1029 // - In case of MISS 1030 // The READ fsm takes the lock protecting the transaction tab. 882 1031 // If a read transaction to the XRAM for this line already exists, 883 1032 // or if the transaction tab is full, the fsm is stalled. 884 // If a transaction entry is free, the READ fsm sends a request to the XRAM. 1033 // If a TRT entry is free, the READ request is registered in TRT, 1034 // it is consumed in the request FIFO, and transmited to the IXR_CMD FSM. 1035 // The READ FSM returns in the IDLE state as the read transaction will be 1036 // completed when the missing line will be received. 885 1037 //////////////////////////////////////////////////////////////////////////////////// 886 1038 887 PRINTF(" * <MEM_CACHE.TOP> Request from %d.%d at address %llx\n",(uint32_t)m_cmd_read_srcid_fifo.read(),(uint32_t)m_cmd_read_pktid_fifo.read(),(uint64_t)m_cmd_read_addr_fifo.read()); 888 889 switch ( r_read_fsm.read() ) { 890 891 /////////////// 892 case READ_IDLE: 893 { 894 if (m_cmd_read_addr_fifo.rok()) { 895 PRINTF(" * <MEM_CACHE.READ> Request from %d.%d at address %llx\n",(uint32_t)m_cmd_read_srcid_fifo.read(),(uint32_t)m_cmd_read_pktid_fifo.read(),(uint64_t)m_cmd_read_addr_fifo.read()); 896 897 r_read_fsm = READ_DIR_LOCK; 898 } 899 break; 1039 switch ( r_read_fsm.read() ) 1040 { 1041 /////////////// 1042 case READ_IDLE: // waiting a read request 1043 { 1044 if (m_cmd_read_addr_fifo.rok()) 1045 { 1046 1047 #if DEBUG_MEMC_READ 1048 if( m_debug_read_fsm ) 1049 { 1050 std::cout << " <MEMC.READ_IDLE> Read request:" 1051 << " srcid = " << std::hex << m_cmd_read_srcid_fifo.read() 1052 << " / address = " << m_cmd_read_addr_fifo.read() 1053 << " / nwords = " << std::dec << m_cmd_read_length_fifo.read() << std::endl; 1054 } 1055 #endif 1056 r_read_fsm = READ_DIR_LOCK; 1057 } 1058 break; 900 1059 } 901 1060 /////////////////// 902 case READ_DIR_LOCK: // check directory for hit / miss 903 { 904 if( r_alloc_dir_fsm.read() == ALLOC_DIR_READ ) { 905 size_t way = 0; 906 DirectoryEntry entry = m_cache_directory.read(m_cmd_read_addr_fifo.read(), way); 907 #ifdef DDEBUG 908 if(m_cpt_cycles > DEBUG_START_CYCLE){ 909 std::cout << "In READ_DIR_LOCK printing the entry of address is : " << std::hex << m_cmd_read_addr_fifo.read() << std::endl; 910 entry.print(); 911 std::cout << "done" << std::endl; 912 } 913 #endif 914 r_read_is_cnt = entry.is_cnt; 915 r_read_dirty = entry.dirty; 916 r_read_lock = entry.lock; 917 r_read_tag = entry.tag; 918 r_read_way = way; 919 r_read_count = entry.count; 920 r_read_copy = entry.owner.srcid; 1061 case READ_DIR_LOCK: // check directory for hit / miss 1062 { 1063 if ( r_alloc_dir_fsm.read() == ALLOC_DIR_READ ) 1064 { 1065 size_t way = 0; 1066 DirectoryEntry entry = m_cache_directory.read(m_cmd_read_addr_fifo.read(), way); 1067 1068 r_read_is_cnt = entry.is_cnt; 1069 r_read_dirty = entry.dirty; 1070 r_read_lock = entry.lock; 1071 r_read_tag = entry.tag; 1072 r_read_way = way; 1073 r_read_count = entry.count; 1074 r_read_copy = entry.owner.srcid; 1075 921 1076 #if L1_MULTI_CACHE 922 r_read_copy_cache = entry.owner.cache_id; 923 #endif 924 r_read_copy_inst = entry.owner.inst; 925 r_read_ptr = entry.ptr; 926 927 bool cached_read = (m_cmd_read_trdid_fifo.read() & 0x1); 928 // In case of hit, the read acces must be registered in the copies bit-vector 929 if( entry.valid ) { 930 if(entry.is_cnt || (entry.count == 0) || !cached_read) { // No new entry in the heap 931 r_read_fsm = READ_DIR_HIT; 932 } else { 933 r_read_fsm = READ_HEAP_LOCK; 934 } 935 } else { 936 r_read_fsm = READ_TRT_LOCK; 937 } 938 } 939 break; 1077 r_read_copy_cache = entry.owner.cache_id; 1078 #endif 1079 r_read_copy_inst = entry.owner.inst; 1080 r_read_ptr = entry.ptr; // pointer to the heap 1081 1082 bool cached_read = (m_cmd_read_trdid_fifo.read() & 0x1); 1083 if( entry.valid ) // hit 1084 { 1085 // test if we need to register a new copy in the heap 1086 if ( entry.is_cnt || (entry.count == 0) || !cached_read ) 1087 r_read_fsm = READ_DIR_HIT; 1088 else 1089 r_read_fsm = READ_HEAP_LOCK; 1090 } 1091 else // miss 1092 { 1093 r_read_fsm = READ_TRT_LOCK; 1094 } 1095 1096 #if DEBUG_MEMC_READ 1097 if( m_debug_read_fsm ) 1098 { 1099 std::cout << " <MEMC.READ_DIR_LOCK> Accessing directory: " 1100 << " address = " << std::hex << m_cmd_read_addr_fifo.read() 1101 << " / hit = " << entry.valid 1102 << " / count = " <<std::dec << entry.count 1103 << " / is_cnt = " << entry.is_cnt << std::endl; 1104 } 1105 #endif 1106 } 1107 break; 940 1108 } 941 1109 ////////////////// 942 case READ_DIR_HIT: // read hit : update the memory cache 943 { 944 if( r_alloc_dir_fsm.read() == ALLOC_DIR_READ ) { 945 // signals generation 946 bool inst_read = (m_cmd_read_trdid_fifo.read() & 0x2); 947 bool cached_read = (m_cmd_read_trdid_fifo.read() & 0x1); 948 bool is_cnt = r_read_is_cnt.read(); 949 950 // read data in the cache 951 size_t set = m_y[(vci_addr_t)(m_cmd_read_addr_fifo.read())]; 952 size_t way = r_read_way.read(); 953 for ( size_t i=0 ; i<m_words ; i++ ) { 954 r_read_data[i] = m_cache_data[way][set][i]; 955 } 956 957 // update the cache directory (for the copies) 958 DirectoryEntry entry; 959 entry.valid = true; 960 entry.is_cnt = is_cnt; 961 entry.dirty = r_read_dirty.read(); 962 entry.tag = r_read_tag.read(); 963 entry.lock = r_read_lock.read(); 964 entry.ptr = r_read_ptr.read(); 965 if(cached_read){ // Cached read, we update the copy 966 if(!is_cnt){ // Not counter mode 967 entry.owner.srcid = m_cmd_read_srcid_fifo.read(); 1110 case READ_DIR_HIT: // read data in cache & update the directory 1111 // we enter this state in 3 cases: 1112 // - the read request is uncachable 1113 // - the cache line is in counter mode 1114 // - the cache line is valid but not replcated 1115 { 1116 if( r_alloc_dir_fsm.read() == ALLOC_DIR_READ ) 1117 { 1118 // signals generation 1119 bool inst_read = (m_cmd_read_trdid_fifo.read() & 0x2); 1120 bool cached_read = (m_cmd_read_trdid_fifo.read() & 0x1); 1121 bool is_cnt = r_read_is_cnt.read(); 1122 1123 // read data in the cache 1124 size_t set = m_y[(vci_addr_t)(m_cmd_read_addr_fifo.read())]; 1125 size_t way = r_read_way.read(); 1126 for ( size_t i=0 ; i<m_words ; i++ ) r_read_data[i] = m_cache_data[way][set][i]; 1127 1128 // update the cache directory 1129 DirectoryEntry entry; 1130 entry.valid = true; 1131 entry.is_cnt = is_cnt; 1132 entry.dirty = r_read_dirty.read(); 1133 entry.tag = r_read_tag.read(); 1134 entry.lock = r_read_lock.read(); 1135 entry.ptr = r_read_ptr.read(); 1136 if (cached_read) // Cached read => we must update the copies 1137 { 1138 if (!is_cnt) // Not counter mode 1139 { 1140 entry.owner.srcid = m_cmd_read_srcid_fifo.read(); 968 1141 #if L1_MULTI_CACHE 969 entry.owner.cache_id= m_cmd_read_pktid_fifo.read(); 970 #endif 971 972 entry.owner.inst = inst_read; 973 entry.count = r_read_count.read() + 1; 974 } else { // Counter mode 975 entry.owner.srcid = 0; 1142 entry.owner.cache_id = m_cmd_read_pktid_fifo.read(); 1143 #endif 1144 entry.owner.inst = inst_read; 1145 entry.count = r_read_count.read() + 1; 1146 } 1147 else // Counter mode 1148 { 1149 entry.owner.srcid = 0; 976 1150 #if L1_MULTI_CACHE 977 entry.owner.cache_id= 0; 978 #endif 979 entry.owner.inst = false; 980 entry.count = r_read_count.read() + 1; 981 } 982 } else { // Uncached read 983 entry.owner.srcid = r_read_copy.read(); 1151 entry.owner.cache_id = 0; 1152 #endif 1153 entry.owner.inst = false; 1154 entry.count = r_read_count.read() + 1; 1155 } 1156 } 1157 else // Uncached read 1158 { 1159 entry.owner.srcid = r_read_copy.read(); 984 1160 #if L1_MULTI_CACHE 985 entry.owner.cache_id = r_read_copy_cache.read(); 986 #endif 987 988 entry.owner.inst = r_read_copy_inst.read(); 989 entry.count = r_read_count.read(); 990 } 991 #ifdef DDEBUG 992 if(m_cpt_cycles > DEBUG_START_CYCLE){ 993 std::cout << "In READ_DIR_HIT printing the entry of address is : " << std::endl; 994 entry.print(); 995 std::cout << "done" << std::endl; 996 } 997 #endif 998 999 m_cache_directory.write(set, way, entry); 1000 r_read_fsm = READ_RSP; 1001 } 1002 break; 1161 entry.owner.cache_id = r_read_copy_cache.read(); 1162 #endif 1163 entry.owner.inst = r_read_copy_inst.read(); 1164 entry.count = r_read_count.read(); 1165 } 1166 1167 #if DEBUG_MEMC_READ 1168 if( m_debug_read_fsm ) 1169 { 1170 std::cout << " <MEMC.READ_DIR_HIT> Update directory entry:" 1171 << " set = " << std::dec << set 1172 << " / way = " << way 1173 << " / owner_id = " << entry.owner.srcid 1174 << " / owner_ins = " << entry.owner.inst 1175 << " / count = " << entry.count 1176 << " / is_cnt = " << entry.is_cnt << std::endl; 1177 } 1178 #endif 1179 1180 m_cache_directory.write(set, way, entry); 1181 r_read_fsm = READ_RSP; 1182 } 1183 break; 1184 } 1185 //////////////////// 1186 case READ_HEAP_LOCK: // read data in cache, update the directory 1187 // and prepare the HEAP update 1188 { 1189 if( r_alloc_heap_fsm.read() == ALLOC_HEAP_READ ) 1190 { 1191 // enter counter mode when we reach the limit of copies or the heap is full 1192 bool go_cnt = (r_read_count.read() >= r_copies_limit.read()) || m_heap.is_full(); 1193 1194 // read data in the cache 1195 size_t set = m_y[(vci_addr_t)(m_cmd_read_addr_fifo.read())]; 1196 size_t way = r_read_way.read(); 1197 for ( size_t i=0 ; i<m_words ; i++ ) r_read_data[i] = m_cache_data[way][set][i]; 1198 1199 // update the cache directory 1200 DirectoryEntry entry; 1201 entry.valid = true; 1202 entry.is_cnt = go_cnt; 1203 entry.dirty = r_read_dirty.read(); 1204 entry.tag = r_read_tag.read(); 1205 entry.lock = r_read_lock.read(); 1206 entry.count = r_read_count.read() + 1; 1207 1208 if (not go_cnt) // Not entering counter mode 1209 { 1210 entry.owner.srcid = r_read_copy.read(); 1211 #if L1_MULTI_CACHE 1212 entry.owner.cache_id= r_read_copy_cache.read(); 1213 #endif 1214 entry.owner.inst = r_read_copy_inst.read(); 1215 entry.ptr = m_heap.next_free_ptr(); // set pointer on the heap 1216 } 1217 else // Entering Counter mode 1218 { 1219 entry.owner.srcid = 0; 1220 #if L1_MULTI_CACHE 1221 entry.owner.cache_id= 0; 1222 #endif 1223 entry.owner.inst = false; 1224 entry.ptr = 0; 1225 } 1226 1227 m_cache_directory.write(set, way, entry); 1228 1229 // prepare the heap update (add an entry, or clear the linked list) 1230 if (not go_cnt) // not switching to counter mode 1231 { 1232 // We test if the next free entry in the heap is the last 1233 HeapEntry heap_entry = m_heap.next_free_entry(); 1234 r_read_next_ptr = heap_entry.next; 1235 r_read_last_free = ( heap_entry.next == m_heap.next_free_ptr() ); 1236 1237 r_read_fsm = READ_HEAP_WRITE; // add an entry in the HEAP 1238 } 1239 else // switching to counter mode 1240 { 1241 if ( r_read_count.read()>1 ) // heap must be cleared 1242 { 1243 HeapEntry next_entry = m_heap.read(r_read_ptr.read()); 1244 r_read_next_ptr = m_heap.next_free_ptr(); 1245 m_heap.write_free_ptr(r_read_ptr.read()); 1246 1247 if( next_entry.next == r_read_ptr.read() ) // last entry 1248 { 1249 r_read_fsm = READ_HEAP_LAST; // erase the entry 1250 } 1251 else // not the last entry 1252 { 1253 r_read_ptr = next_entry.next; 1254 r_read_fsm = READ_HEAP_ERASE; // erase the list 1255 } 1256 } 1257 else // the heap is not used / nothing to do 1258 { 1259 r_read_fsm = READ_RSP; 1260 } 1261 } 1262 1263 #if DEBUG_MEMC_READ 1264 if( m_debug_read_fsm ) 1265 { 1266 std::cout << " <MEMC.READ_HEAP_LOCK> Update directory:" 1267 << " tag = " << std::hex << entry.tag 1268 << " set = " << std::dec << set 1269 << " way = " << way 1270 << " count = " << entry.count 1271 << " is_cnt = " << entry.is_cnt << std::endl; 1272 } 1273 #endif 1274 } 1275 break; 1276 } 1277 ///////////////////// 1278 case READ_HEAP_WRITE: // add a entry in the heap 1279 { 1280 if ( r_alloc_heap_fsm.read() == ALLOC_HEAP_READ ) 1281 { 1282 HeapEntry heap_entry; 1283 heap_entry.owner.srcid = m_cmd_read_srcid_fifo.read(); 1284 #if L1_MULTI_CACHE 1285 heap_entry.owner.cache_id = m_cmd_read_pktid_fifo.read(); 1286 #endif 1287 heap_entry.owner.inst = (m_cmd_read_trdid_fifo.read() & 0x2); 1288 1289 if(r_read_count.read() == 1) // creation of a new linked list 1290 { 1291 heap_entry.next = m_heap.next_free_ptr(); 1292 } 1293 else // head insertion in existing list 1294 { 1295 heap_entry.next = r_read_ptr.read(); 1296 } 1297 m_heap.write_free_entry(heap_entry); 1298 m_heap.write_free_ptr(r_read_next_ptr.read()); 1299 if(r_read_last_free.read()) m_heap.set_full(); 1300 1301 r_read_fsm = READ_RSP; 1302 1303 #if DEBUG_MEMC_READ 1304 if( m_debug_read_fsm ) 1305 { 1306 std::cout << " <MEMC.READ_HEAP_WRITE> Add an entry in the heap:" 1307 << " owner_id = " << heap_entry.owner.srcid 1308 << " owner_ins = " << heap_entry.owner.inst << std::endl; 1309 } 1310 #endif 1311 } 1312 else 1313 { 1314 std::cout << "VCI_MEM_CACHE ERROR " << name() 1315 << " READ_HEAP_WRITE state" << std::endl; 1316 std::cout << "Bad HEAP allocation" << std::endl; 1317 exit(0); 1318 } 1319 break; 1320 } 1321 ///////////////////// 1322 case READ_HEAP_ERASE: 1323 { 1324 if ( r_alloc_heap_fsm.read() == ALLOC_HEAP_READ ) 1325 { 1326 HeapEntry next_entry = m_heap.read(r_read_ptr.read()); 1327 if( next_entry.next == r_read_ptr.read() ) 1328 { 1329 r_read_fsm = READ_HEAP_LAST; 1330 } 1331 else 1332 { 1333 r_read_ptr = next_entry.next; 1334 r_read_fsm = READ_HEAP_ERASE; 1335 } 1336 } 1337 else 1338 { 1339 std::cout << "VCI_MEM_CACHE ERROR " << name() 1340 << " READ_HEAP_ERASE state" << std::endl; 1341 std::cout << "Bad HEAP allocation" << std::endl; 1342 exit(0); 1343 } 1344 break; 1345 } 1346 //////////////////// 1347 case READ_HEAP_LAST: 1348 { 1349 if ( r_alloc_heap_fsm.read() == ALLOC_HEAP_READ ) 1350 { 1351 HeapEntry last_entry; 1352 last_entry.owner.srcid = 0; 1353 #if L1_MULTI_CACHE 1354 last_entry.owner.cache_id = 0; 1355 #endif 1356 last_entry.owner.inst = false; 1357 1358 if(m_heap.is_full()) 1359 { 1360 last_entry.next = r_read_ptr.read(); 1361 m_heap.unset_full(); 1362 } 1363 else 1364 { 1365 last_entry.next = r_read_next_ptr.read(); 1366 } 1367 m_heap.write(r_read_ptr.read(),last_entry); 1368 r_read_fsm = READ_RSP; 1369 } 1370 else 1371 { 1372 std::cout << "VCI_MEM_CACHE ERROR " << name() 1373 << " READ_HEAP_LAST state" << std::endl; 1374 std::cout << "Bad HEAP allocation" << std::endl; 1375 exit(0); 1376 } 1377 break; 1003 1378 } 1004 1379 ////////////// 1005 case READ_HEAP_LOCK: 1006 { 1007 if( r_alloc_heap_fsm.read() == ALLOC_HEAP_READ ) { 1008 bool is_cnt = (r_read_count.read() >= r_copies_limit.read()) || m_heap_directory.is_full(); 1009 // read data in the cache 1010 size_t set = m_y[(vci_addr_t)(m_cmd_read_addr_fifo.read())]; 1011 size_t way = r_read_way.read(); 1012 for ( size_t i=0 ; i<m_words ; i++ ) { 1013 r_read_data[i] = m_cache_data[way][set][i]; 1014 } 1015 1016 // update the cache directory (for the copies) 1017 DirectoryEntry entry; 1018 entry.valid = true; 1019 entry.is_cnt = is_cnt; // when we reach the limit of copies or the heap is full 1020 entry.dirty = r_read_dirty.read(); 1021 entry.tag = r_read_tag.read(); 1022 entry.lock = r_read_lock.read(); 1023 if(!is_cnt){ // Not counter mode 1024 entry.owner.srcid = r_read_copy.read(); 1025 #if L1_MULTI_CACHE 1026 entry.owner.cache_id= r_read_copy_cache.read(); 1027 #endif 1028 entry.owner.inst = r_read_copy_inst.read(); 1029 entry.count = r_read_count.read() + 1; 1030 entry.ptr = m_heap_directory.next_free_ptr(); 1031 } else { // Counter mode 1032 entry.owner.srcid = 0; 1033 #if L1_MULTI_CACHE 1034 entry.owner.cache_id= 0; 1035 #endif 1036 entry.owner.inst = false; 1037 entry.count = r_read_count.read() + 1; 1038 entry.ptr = 0; 1039 } 1040 #ifdef DDEBUG 1041 if(m_cpt_cycles > DEBUG_START_CYCLE){ 1042 std::cout << "In READ_HEAP_LOCK printing the entry of address is : " << std::endl; 1043 entry.print(); 1044 std::cout << "done" << std::endl; 1045 } 1046 #endif 1047 1048 m_cache_directory.write(set, way, entry); 1049 1050 if(!is_cnt){ 1051 HeapEntry free_heap_entry = m_heap_directory.next_free_entry(); 1052 r_read_next_ptr = free_heap_entry.next; 1053 if( free_heap_entry.next == m_heap_directory.next_free_ptr() ) { // Last free heap entry 1054 r_read_last_free = true; 1055 } else { 1056 r_read_last_free = false; 1057 } 1058 r_read_fsm = READ_HEAP_WRITE; // we add an entry in the list of copies 1059 } else { 1060 if(r_read_count.read()>1) { // else there is no list of copies... 1061 HeapEntry next_entry = m_heap_directory.read(r_read_ptr.read()); 1062 r_read_next_ptr = m_heap_directory.next_free_ptr(); 1063 m_heap_directory.write_free_ptr(r_read_ptr.read()); 1064 if( next_entry.next == r_read_ptr.read() ) { // The last list member 1065 r_read_fsm = READ_HEAP_LAST; // we erase the list of copies (counter mode) 1066 } else { // Not the end of the list 1067 r_read_ptr = next_entry.next; 1068 r_read_fsm = READ_HEAP_ERASE; // we erase the list of copies (counter mode) 1069 } 1070 } else { 1071 r_read_fsm = READ_RSP; 1072 } 1073 } 1074 } 1075 break; 1076 } 1077 ////////////// 1078 case READ_HEAP_WRITE: 1079 { 1080 if(r_alloc_heap_fsm.read() == ALLOC_HEAP_READ){ 1081 bool inst_read = (m_cmd_read_trdid_fifo.read() & 0x2); 1082 HeapEntry new_heap_entry; 1083 new_heap_entry.owner.srcid = m_cmd_read_srcid_fifo.read(); 1084 #if L1_MULTI_CACHE 1085 new_heap_entry.owner.cache_id = m_cmd_read_pktid_fifo.read(); 1086 #endif 1087 new_heap_entry.owner.inst = inst_read; 1088 if(r_read_count.read() == 1){ // creation of a new list 1089 new_heap_entry.next = m_heap_directory.next_free_ptr(); 1090 } else { // it is an insertion 1091 new_heap_entry.next = r_read_ptr.read(); 1092 } 1093 m_heap_directory.write_free_entry(new_heap_entry); 1094 m_heap_directory.write_free_ptr(r_read_next_ptr.read()); 1095 if(r_read_last_free.read()) { 1096 m_heap_directory.set_full(); 1097 } 1098 1099 r_read_fsm = READ_RSP; 1100 } else { 1101 ASSERT(false,"MEMCACHE Error : Bad HEAP allocation"); 1102 } 1103 break; 1104 } 1105 ////////////// 1106 case READ_HEAP_ERASE: 1107 { 1108 if(r_alloc_heap_fsm.read() == ALLOC_HEAP_READ){ 1109 HeapEntry next_entry = m_heap_directory.read(r_read_ptr.read()); 1110 if( next_entry.next == r_read_ptr.read() ){ 1111 r_read_fsm = READ_HEAP_LAST; 1112 } else { 1113 r_read_ptr = next_entry.next; 1114 r_read_fsm = READ_HEAP_ERASE; 1115 } 1116 } else { 1117 ASSERT(false,"MEMCACHE Error : Bad HEAP allocation"); 1118 } 1119 break; 1120 } 1121 ////////////// 1122 case READ_HEAP_LAST: 1123 { 1124 if(r_alloc_heap_fsm.read() == ALLOC_HEAP_READ){ 1125 HeapEntry last_entry; 1126 last_entry.owner.srcid = 0; 1127 #if L1_MULTI_CACHE 1128 last_entry.owner.cache_id = 0; 1129 #endif 1130 last_entry.owner.inst = false; 1131 if(m_heap_directory.is_full()){ 1132 last_entry.next = r_read_ptr.read(); 1133 m_heap_directory.unset_full(); 1134 } else { 1135 last_entry.next = r_read_next_ptr.read(); 1136 } 1137 m_heap_directory.write(r_read_ptr.read(),last_entry); 1138 r_read_fsm = READ_RSP; 1139 } else { 1140 ASSERT(false,"MEMCACHE Error : Bad HEAP allocation"); 1141 } 1142 break; 1143 } 1144 ////////////// 1145 case READ_RSP: // request the TGT_RSP FSM to return data 1146 { 1147 if( !r_read_to_tgt_rsp_req ) { 1148 for ( size_t i=0 ; i<m_words ; i++ ) { 1149 r_read_to_tgt_rsp_data[i] = r_read_data[i]; 1150 } 1151 r_read_to_tgt_rsp_word = m_x[(vci_addr_t)m_cmd_read_addr_fifo.read()]; 1152 r_read_to_tgt_rsp_length = m_cmd_read_length_fifo.read(); 1153 cmd_read_fifo_get = true; 1154 r_read_to_tgt_rsp_req = true; 1155 r_read_to_tgt_rsp_srcid = m_cmd_read_srcid_fifo.read(); 1156 r_read_to_tgt_rsp_trdid = m_cmd_read_trdid_fifo.read(); 1157 r_read_to_tgt_rsp_pktid = m_cmd_read_pktid_fifo.read(); 1158 r_read_fsm = READ_IDLE; 1159 } 1160 break; 1380 case READ_RSP: // request the TGT_RSP FSM to return data 1381 { 1382 if( !r_read_to_tgt_rsp_req ) 1383 { 1384 for ( size_t i=0 ; i<m_words ; i++ ) r_read_to_tgt_rsp_data[i] = r_read_data[i]; 1385 r_read_to_tgt_rsp_word = m_x[(vci_addr_t)m_cmd_read_addr_fifo.read()]; 1386 r_read_to_tgt_rsp_length = m_cmd_read_length_fifo.read(); 1387 r_read_to_tgt_rsp_srcid = m_cmd_read_srcid_fifo.read(); 1388 r_read_to_tgt_rsp_trdid = m_cmd_read_trdid_fifo.read(); 1389 r_read_to_tgt_rsp_pktid = m_cmd_read_pktid_fifo.read(); 1390 cmd_read_fifo_get = true; 1391 r_read_to_tgt_rsp_req = true; 1392 r_read_fsm = READ_IDLE; 1393 1394 #if DEBUG_MEMC_READ 1395 if( m_debug_read_fsm ) 1396 { 1397 std::cout << " <MEMC.READ_RSP> Request the TGT_RSP FSM to return data:" 1398 << " rsrcid = " << std::hex << m_cmd_read_srcid_fifo.read() 1399 << " / address = " << m_cmd_read_addr_fifo.read() 1400 << " / nwords = " << std::dec << m_cmd_read_length_fifo.read() << std::endl; 1401 } 1402 #endif 1403 } 1404 break; 1161 1405 } 1162 1406 /////////////////// 1163 case READ_TRT_LOCK: // read miss : check the Transaction Table 1164 { 1165 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_READ ) { 1166 #ifdef TDEBUG 1167 if(m_cpt_cycles > DEBUG_START_CYCLE){ 1168 std::cout << sc_time_stamp() << " " << name() << " READ_TRT_LOCK " << std::endl; 1169 } 1170 #endif 1171 size_t index = 0; 1172 bool hit_read = m_transaction_tab.hit_read(m_nline[(vci_addr_t)(m_cmd_read_addr_fifo.read())], index); 1173 bool hit_write = m_transaction_tab.hit_write(m_nline[(vci_addr_t)(m_cmd_read_addr_fifo.read())]); 1174 bool wok = !m_transaction_tab.full(index); 1175 if( hit_read || !wok || hit_write ) { // missing line already requested or no space 1176 if(!wok) 1177 m_cpt_trt_full++; 1178 if(hit_read || hit_write) 1179 m_cpt_trt_rb++; 1180 r_read_fsm = READ_IDLE; 1181 } else { // missing line is requested to the XRAM 1182 m_cpt_read_miss++; 1183 r_read_trt_index = index; 1184 r_read_fsm = READ_TRT_SET; 1185 } 1186 } 1187 break; 1407 case READ_TRT_LOCK: // read miss : check the Transaction Table 1408 { 1409 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_READ ) 1410 { 1411 size_t index = 0; 1412 vci_addr_t addr = (vci_addr_t)m_cmd_read_addr_fifo.read(); 1413 bool hit_read = m_transaction_tab.hit_read(m_nline[addr], index); 1414 bool hit_write = m_transaction_tab.hit_write(m_nline[addr]); 1415 bool wok = !m_transaction_tab.full(index); 1416 1417 if( hit_read || !wok || hit_write ) // missing line already requested or no space 1418 { 1419 if(!wok) m_cpt_trt_full++; 1420 if(hit_read || hit_write) m_cpt_trt_rb++; 1421 r_read_fsm = READ_IDLE; 1422 } 1423 else // missing line is requested to the XRAM 1424 { 1425 m_cpt_read_miss++; 1426 r_read_trt_index = index; 1427 r_read_fsm = READ_TRT_SET; 1428 } 1429 1430 #if DEBUG_MEMC_READ 1431 if( m_debug_read_fsm ) 1432 { 1433 std::cout << " <MEMC.READ_TRT_LOCK> Check TRT:" 1434 << " hit_read = " << hit_read 1435 << " / hit_write = " << hit_write 1436 << " / full = " << !wok << std::endl; 1437 } 1438 #endif 1439 } 1440 break; 1188 1441 } 1189 1442 ////////////////// 1190 case READ_TRT_SET: 1191 { 1192 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_READ ) { 1193 m_transaction_tab.set(r_read_trt_index.read(), 1194 true, 1195 m_nline[(vci_addr_t)(m_cmd_read_addr_fifo.read())], 1196 m_cmd_read_srcid_fifo.read(), 1197 m_cmd_read_trdid_fifo.read(), 1198 m_cmd_read_pktid_fifo.read(), 1199 true, 1200 m_cmd_read_length_fifo.read(), 1201 m_x[(vci_addr_t)(m_cmd_read_addr_fifo.read())], 1202 std::vector<be_t>(m_words,0), 1203 std::vector<data_t>(m_words,0)); 1204 #ifdef TDEBUG 1205 if(m_cpt_cycles > DEBUG_START_CYCLE){ 1206 std::cout << sc_time_stamp() << " " << name() << " READ_TRT_SET transaction table : " << std::endl; 1207 for(size_t i = 0 ; i < m_transaction_tab.size() ; i++) 1208 m_transaction_tab.print(i); 1209 } 1210 #endif 1211 1212 r_read_fsm = READ_XRAM_REQ; 1213 } 1214 break; 1215 } 1216 ///////////////////// 1217 case READ_XRAM_REQ: 1218 { 1219 if( !r_read_to_ixr_cmd_req ) { 1220 cmd_read_fifo_get = true; 1221 r_read_to_ixr_cmd_req = true; 1222 r_read_to_ixr_cmd_nline = m_nline[(vci_addr_t)(m_cmd_read_addr_fifo.read())]; 1223 r_read_to_ixr_cmd_trdid = r_read_trt_index.read(); 1224 r_read_fsm = READ_IDLE; 1225 } 1226 break; 1443 case READ_TRT_SET: // register get transaction in TRT 1444 { 1445 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_READ ) 1446 { 1447 m_transaction_tab.set(r_read_trt_index.read(), 1448 true, 1449 m_nline[(vci_addr_t)(m_cmd_read_addr_fifo.read())], 1450 m_cmd_read_srcid_fifo.read(), 1451 m_cmd_read_trdid_fifo.read(), 1452 m_cmd_read_pktid_fifo.read(), 1453 true, 1454 m_cmd_read_length_fifo.read(), 1455 m_x[(vci_addr_t)(m_cmd_read_addr_fifo.read())], 1456 std::vector<be_t>(m_words,0), 1457 std::vector<data_t>(m_words,0)); 1458 #if DEBUG_MEMC_READ 1459 if( m_debug_read_fsm ) 1460 { 1461 std::cout << " <MEMC.READ_TRT_SET> Write in Transaction Table: " << std::hex 1462 << " address = " << m_cmd_read_addr_fifo.read() 1463 << " / srcid = " << m_cmd_read_srcid_fifo.read() << std::endl; 1464 } 1465 #endif 1466 r_read_fsm = READ_TRT_REQ; 1467 } 1468 break; 1469 } 1470 ////////////////// 1471 case READ_TRT_REQ: // consume the read request in the FIFO, 1472 // and send it to the ixr_cmd_fsm 1473 { 1474 if( not r_read_to_ixr_cmd_req ) 1475 { 1476 cmd_read_fifo_get = true; 1477 r_read_to_ixr_cmd_req = true; 1478 r_read_to_ixr_cmd_nline = m_nline[(vci_addr_t)(m_cmd_read_addr_fifo.read())]; 1479 r_read_to_ixr_cmd_trdid = r_read_trt_index.read(); 1480 r_read_fsm = READ_IDLE; 1481 1482 #if DEBUG_MEMC_READ 1483 if( m_debug_read_fsm ) 1484 { 1485 std::cout << " <MEMC.READ_TRT_REQ> Request GET transaction for address " 1486 << m_cmd_read_addr_fifo.read() << std::endl; 1487 } 1488 #endif 1489 } 1490 break; 1227 1491 } 1228 1492 } // end switch read_fsm … … 1240 1504 // If there is no other copy, an acknowledge response is immediately 1241 1505 // returned to the writing processor. 1242 // if the data is cached by other processoris, the FSM takes the lock 1243 // protecting the Update Table (UPT) to register this update transaction. 1244 // If the UPT is full, it releases the lock and waits. Then, it sends 1506 // If the data is cached by other processors, a coherence transaction must 1507 // be launched: 1508 // It is a multicast update if the line is not in counter mode, and the processor 1509 // takes the lock protecting the Update Table (UPT) to register this transaction. 1510 // It is a broadcast invalidate if the line is in counter mode. 1511 // If the UPT is full, it releases the lock(s) and retry. Then, it sends 1245 1512 // a multi-update request to all owners of the line (but the writer), 1246 1513 // through the INIT_CMD FSM. In case of multi-update transaction, the WRITE FSM … … 1256 1523 ///////////////////////////////////////////////////////////////////////////////////// 1257 1524 1258 switch ( r_write_fsm.read() ) { 1259 1260 //////////////// 1261 case WRITE_IDLE: // copy first word of a write burst in local buffer 1262 { 1263 if ( m_cmd_write_addr_fifo.rok()) { 1264 PRINTF(" * <MEM_CACHE.WRITE> KANE Request from %d.%d (%d) at address %llx\n",(uint32_t)m_cmd_write_srcid_fifo.read(),(uint32_t)m_cmd_write_pktid_fifo.read(),(uint32_t) m_cmd_write_trdid_fifo.read(), (uint64_t)m_cmd_write_addr_fifo.read()); 1265 1266 m_cpt_write++; 1267 m_cpt_write_cells++; 1268 // consume a word in the FIFO & write it in the local buffer 1269 cmd_write_fifo_get = true; 1270 size_t index = m_x[(vci_addr_t)(m_cmd_write_addr_fifo.read())]; 1271 r_write_address = (addr_t)(m_cmd_write_addr_fifo.read()); 1272 r_write_word_index = index; 1273 r_write_word_count = 1; 1274 r_write_data[index] = m_cmd_write_data_fifo.read(); 1275 r_write_srcid = m_cmd_write_srcid_fifo.read(); 1276 r_write_trdid = m_cmd_write_trdid_fifo.read(); 1277 r_write_pktid = m_cmd_write_pktid_fifo.read(); 1278 1279 // the be field must be set for all words 1280 for ( size_t i=0 ; i<m_words ; i++ ) { 1281 if ( i == index ) r_write_be[i] = m_cmd_write_be_fifo.read(); 1282 else r_write_be[i] = 0x0; 1283 } 1284 if( !((m_cmd_write_be_fifo.read() == 0x0)||(m_cmd_write_be_fifo.read() == 0xF)) ) 1285 r_write_byte=true; 1286 else r_write_byte=false; 1287 1288 if( m_cmd_write_eop_fifo.read() ) r_write_fsm = WRITE_DIR_LOCK; 1289 else r_write_fsm = WRITE_NEXT; 1290 } 1291 break; 1292 } 1525 switch ( r_write_fsm.read() ) 1526 { 1293 1527 //////////////// 1294 case WRITE_NEXT: // copy next word of a write burst in local buffer 1295 { 1296 if ( m_cmd_write_addr_fifo.rok() ) { 1297 m_cpt_write_cells++; 1298 1299 // check that the next word is in the same cache line 1300 ASSERT( (m_nline[(vci_addr_t)(r_write_address.read())] == m_nline[(vci_addr_t)(m_cmd_write_addr_fifo.read())]) 1301 ,"VCI_MEM_CACHE write error in vci_mem_cache : write burst over a line" ); 1302 // consume a word in the FIFO & write it in the local buffer 1303 cmd_write_fifo_get=true; 1304 size_t index = r_write_word_index.read() + r_write_word_count.read(); 1305 r_write_be[index] = m_cmd_write_be_fifo.read(); 1306 r_write_data[index] = m_cmd_write_data_fifo.read(); 1307 r_write_word_count = r_write_word_count.read() + 1; 1308 if( !((m_cmd_write_be_fifo.read() == 0x0)||(m_cmd_write_be_fifo.read() == 0xF)) ) 1309 r_write_byte=true; 1310 if ( m_cmd_write_eop_fifo.read() ) r_write_fsm = WRITE_DIR_LOCK; 1311 } 1312 break; 1528 case WRITE_IDLE: // copy first word of a write burst in local buffer 1529 { 1530 if ( m_cmd_write_addr_fifo.rok() ) 1531 { 1532 m_cpt_write++; 1533 m_cpt_write_cells++; 1534 1535 // consume a word in the FIFO & write it in the local buffer 1536 cmd_write_fifo_get = true; 1537 size_t index = m_x[(vci_addr_t)(m_cmd_write_addr_fifo.read())]; 1538 1539 r_write_address = (addr_t)(m_cmd_write_addr_fifo.read()); 1540 r_write_word_index = index; 1541 r_write_word_count = 1; 1542 r_write_data[index] = m_cmd_write_data_fifo.read(); 1543 r_write_srcid = m_cmd_write_srcid_fifo.read(); 1544 r_write_trdid = m_cmd_write_trdid_fifo.read(); 1545 r_write_pktid = m_cmd_write_pktid_fifo.read(); 1546 1547 // initialize the be field for all words 1548 for ( size_t i=0 ; i<m_words ; i++ ) 1549 { 1550 if ( i == index ) r_write_be[i] = m_cmd_write_be_fifo.read(); 1551 else r_write_be[i] = 0x0; 1552 } 1553 1554 if( !((m_cmd_write_be_fifo.read() == 0x0)||(m_cmd_write_be_fifo.read() == 0xF)) ) 1555 r_write_byte = true; 1556 else 1557 r_write_byte = false; 1558 1559 if( m_cmd_write_eop_fifo.read() ) r_write_fsm = WRITE_DIR_LOCK; 1560 else r_write_fsm = WRITE_NEXT; 1561 1562 #if DEBUG_MEMC_WRITE 1563 if( m_debug_write_fsm ) 1564 { 1565 std::cout << " <MEMC.WRITE_IDLE> Write request " 1566 << " srcid = " << std::hex << m_cmd_write_srcid_fifo.read() 1567 << " / address = " << m_cmd_write_addr_fifo.read() 1568 << " / data = " << m_cmd_write_data_fifo.read() << std::endl; 1569 } 1570 #endif 1571 } 1572 break; 1573 } 1574 //////////////// 1575 case WRITE_NEXT: // copy next word of a write burst in local buffer 1576 { 1577 if ( m_cmd_write_addr_fifo.rok() ) 1578 { 1579 1580 #if DEBUG_MEMC_WRITE 1581 if( m_debug_write_fsm ) 1582 { 1583 std::cout << " <MEMC.WRITE_NEXT> Write another word in local buffer" << std::endl; 1584 } 1585 #endif 1586 m_cpt_write_cells++; 1587 1588 // check that the next word is in the same cache line 1589 if ( (m_nline[(vci_addr_t)(r_write_address.read())] != 1590 m_nline[(vci_addr_t)(m_cmd_write_addr_fifo.read())]) ) 1591 { 1592 std::cout << "VCI_MEM_CACHE ERROR " << name() << " WRITE_NEXT state" << std::endl; 1593 std::cout << "all words in a write burst must be in same cache line" << std::endl; 1594 exit(0); 1595 } 1596 1597 // consume a word in the FIFO & write it in the local buffer 1598 cmd_write_fifo_get=true; 1599 size_t index = r_write_word_index.read() + r_write_word_count.read(); 1600 1601 r_write_be[index] = m_cmd_write_be_fifo.read(); 1602 r_write_data[index] = m_cmd_write_data_fifo.read(); 1603 r_write_word_count = r_write_word_count.read() + 1; 1604 1605 if( !((m_cmd_write_be_fifo.read() == 0x0)||(m_cmd_write_be_fifo.read() == 0xF)) ) 1606 r_write_byte = true; 1607 1608 if ( m_cmd_write_eop_fifo.read() ) r_write_fsm = WRITE_DIR_LOCK; 1609 } 1610 break; 1313 1611 } 1314 1612 //////////////////// 1315 case WRITE_DIR_LOCK: // access directory to check hit/miss 1316 { 1317 if ( r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE ) { 1318 size_t way = 0; 1319 DirectoryEntry entry(m_cache_directory.read(r_write_address.read(), way)); 1320 1321 // copy directory entry in local buffers in case of hit 1322 if ( entry.valid ) { 1323 r_write_is_cnt = entry.is_cnt; 1324 r_write_lock = entry.lock; 1325 r_write_tag = entry.tag; 1326 r_write_copy = entry.owner.srcid; 1613 case WRITE_DIR_LOCK: // access directory to check hit/miss 1614 { 1615 if ( r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE ) 1616 { 1617 size_t way = 0; 1618 DirectoryEntry entry(m_cache_directory.read(r_write_address.read(), way)); 1619 1620 if ( entry.valid ) // hit 1621 { 1622 // copy directory entry in local buffer in case of hit 1623 r_write_is_cnt = entry.is_cnt; 1624 r_write_lock = entry.lock; 1625 r_write_tag = entry.tag; 1626 r_write_copy = entry.owner.srcid; 1327 1627 #if L1_MULTI_CACHE 1328 r_write_copy_cache= entry.owner.cache_id; 1329 #endif 1330 r_write_copy_inst = entry.owner.inst; 1331 r_write_count = entry.count; 1332 r_write_ptr = entry.ptr; 1333 r_write_way = way; 1334 if( entry.is_cnt && entry.count ) { 1335 r_write_fsm = WRITE_DIR_HIT_READ; 1336 } else { 1337 if(r_write_byte.read()) 1338 r_write_fsm = WRITE_DIR_HIT_READ; 1339 else r_write_fsm = WRITE_DIR_HIT; 1340 } 1341 } else { 1342 r_write_fsm = WRITE_TRT_LOCK; 1343 } 1344 } 1345 break; 1628 r_write_copy_cache = entry.owner.cache_id; 1629 #endif 1630 r_write_copy_inst = entry.owner.inst; 1631 r_write_count = entry.count; 1632 r_write_ptr = entry.ptr; 1633 r_write_way = way; 1634 1635 if( entry.is_cnt && entry.count ) 1636 { 1637 r_write_fsm = WRITE_DIR_HIT_READ; 1638 } 1639 else 1640 { 1641 if (r_write_byte.read()) r_write_fsm = WRITE_DIR_HIT_READ; 1642 else r_write_fsm = WRITE_DIR_HIT; 1643 } 1644 } 1645 else // miss 1646 { 1647 r_write_fsm = WRITE_TRT_LOCK; 1648 } 1649 1650 #if DEBUG_MEMC_WRITE 1651 if( m_debug_write_fsm ) 1652 { 1653 std::cout << " <MEMC.WRITE_DIR_LOCK> Check the directory: " 1654 << " address = " << r_write_address.read() 1655 << " hit = " << entry.valid 1656 << " count = " << std::dec << entry.count 1657 << " is_cnt = " << entry.is_cnt << std::endl; 1658 } 1659 #endif 1660 } 1661 break; 1662 } 1663 //////////////////////// 1664 case WRITE_DIR_HIT_READ: // read the cache and complete the buffer when be!=0xF 1665 { 1666 // update local buffer 1667 size_t set = m_y[(vci_addr_t)(r_write_address.read())]; 1668 size_t way = r_write_way.read(); 1669 for(size_t i=0 ; i<m_words ; i++) 1670 { 1671 data_t mask = 0; 1672 if (r_write_be[i].read() & 0x1) mask = mask | 0x000000FF; 1673 if (r_write_be[i].read() & 0x2) mask = mask | 0x0000FF00; 1674 if (r_write_be[i].read() & 0x4) mask = mask | 0x00FF0000; 1675 if (r_write_be[i].read() & 0x8) mask = mask | 0xFF000000; 1676 1677 // complete only if mask is not null (for energy consumption) 1678 if ( r_write_be[i].read() || r_write_is_cnt.read() ) 1679 { 1680 r_write_data[i] = (r_write_data[i].read() & mask) | 1681 (m_cache_data[way][set][i] & ~mask); 1682 } 1683 } // end for 1684 1685 // test if a coherence broadcast is required 1686 if( r_write_is_cnt.read() && r_write_count.read() ) r_write_fsm = WRITE_TRT_WRITE_LOCK; 1687 else r_write_fsm = WRITE_DIR_HIT; 1688 1689 #if DEBUG_MEMC_WRITE 1690 if( m_debug_write_fsm ) 1691 { 1692 if( r_write_is_cnt.read() && r_write_count.read() ) 1693 { 1694 std::cout << " <MEMC.WRITE_DIR_HIT_READ> Read the cache to complete local buffer /" 1695 << " coherence broadcast required" << std::endl; 1696 } 1697 else 1698 { 1699 std::cout << " <MEMC.WRITE_DIR_HIT_READ> Read the cache to complete local buffer" 1700 << std::endl; 1701 } 1702 } 1703 #endif 1704 break; 1346 1705 } 1347 1706 /////////////////// 1348 case WRITE_DIR_HIT_READ: // read the cache and complete the buffer (data, when be!=0xF) 1349 { 1350 // update local buffer 1351 size_t set = m_y[(vci_addr_t)(r_write_address.read())]; 1352 size_t way = r_write_way.read(); 1353 for(size_t i=0 ; i<m_words ; i++) { 1354 data_t mask = 0; 1355 if (r_write_be[i].read() & 0x1) mask = mask | 0x000000FF; 1356 if (r_write_be[i].read() & 0x2) mask = mask | 0x0000FF00; 1357 if (r_write_be[i].read() & 0x4) mask = mask | 0x00FF0000; 1358 if (r_write_be[i].read() & 0x8) mask = mask | 0xFF000000; 1359 if(r_write_be[i].read()||r_write_is_cnt.read()) { // complete only if mask is not null (for energy consumption) 1360 r_write_data[i] = (r_write_data[i].read() & mask) | 1361 (m_cache_data[way][set][i] & ~mask); 1362 } 1363 } // end for 1364 1365 if( r_write_is_cnt.read() && r_write_count.read() ) { 1366 r_write_fsm = WRITE_TRT_WRITE_LOCK; 1367 } else { 1368 r_write_fsm = WRITE_DIR_HIT; 1369 } 1370 break; 1371 } 1372 /////////////////// 1373 case WRITE_DIR_HIT: // update the cache (data & dirty bit) 1374 { 1375 // update directory with Dirty bit 1376 DirectoryEntry entry; 1377 entry.valid = true; 1378 entry.dirty = true; 1379 entry.tag = r_write_tag.read(); 1380 entry.is_cnt = r_write_is_cnt.read(); 1381 entry.lock = r_write_lock.read(); 1382 entry.owner.srcid = r_write_copy.read(); 1707 case WRITE_DIR_HIT: // update the cache (data & directory) 1708 { 1709 // update directory with Dirty bit 1710 DirectoryEntry entry; 1711 entry.valid = true; 1712 entry.dirty = true; 1713 entry.tag = r_write_tag.read(); 1714 entry.is_cnt = r_write_is_cnt.read(); 1715 entry.lock = r_write_lock.read(); 1716 entry.owner.srcid = r_write_copy.read(); 1383 1717 #if L1_MULTI_CACHE 1384 entry.owner.cache_id= r_write_copy_cache.read(); 1385 #endif 1386 entry.owner.inst = r_write_copy_inst.read(); 1387 entry.count = r_write_count.read(); 1388 entry.ptr = r_write_ptr.read(); 1389 size_t set = m_y[(vci_addr_t)(r_write_address.read())]; 1390 size_t way = r_write_way.read(); 1391 m_cache_directory.write(set, way, entry); 1392 1393 bool owner = (((r_write_copy.read()==r_write_srcid.read()) 1718 entry.owner.cache_id = r_write_copy_cache.read(); 1719 #endif 1720 entry.owner.inst = r_write_copy_inst.read(); 1721 entry.count = r_write_count.read(); 1722 entry.ptr = r_write_ptr.read(); 1723 size_t set = m_y[(vci_addr_t)(r_write_address.read())]; 1724 size_t way = r_write_way.read(); 1725 1726 // update directory 1727 m_cache_directory.write(set, way, entry); 1728 1729 // owner is true when the writer is the owner 1730 bool owner = (((r_write_copy.read() == r_write_srcid.read()) 1394 1731 #if L1_MULTI_CACHE 1395 1732 and (r_write_copy_cache.read()==r_write_pktid.read()) … … 1397 1734 ) and not r_write_copy_inst.read()); 1398 1735 1399 bool no_update = (r_write_count.read()==0) || ( owner && (r_write_count.read()==1)); 1400 1401 if( no_update ) // no update 1402 { 1403 // write data in cache 1404 for(size_t i=0 ; i<m_words ; i++) { 1405 if ( r_write_be[i].read() ) { 1406 m_cache_data[way][set][i] = r_write_data[i].read(); 1407 } 1408 } // end for 1409 } 1410 1411 size_t count_signal = r_write_count.read(); 1412 if(owner){ 1413 count_signal = count_signal - 1; 1414 } 1415 r_write_count = count_signal; 1416 r_write_to_dec = false; 1417 1418 if ( no_update ) r_write_fsm = WRITE_RSP; 1419 else 1736 // no_update is true when there is no need for coherence transaction 1737 bool no_update = (r_write_count.read()==0) || ( owner && (r_write_count.read()==1)); 1738 1739 // write data in the cache if no transaction on the coherence network 1740 if( no_update ) 1741 { 1742 for(size_t i=0 ; i<m_words ; i++) 1743 { 1744 if ( r_write_be[i].read() ) m_cache_data[way][set][i] = r_write_data[i].read(); 1745 } 1746 } 1747 1748 if ( owner ) r_write_count = r_write_count.read() - 1; 1749 r_write_to_dec = false; 1750 1751 if ( no_update ) // Write transaction completed 1752 { 1753 r_write_fsm = WRITE_RSP; 1754 } 1755 else // coherence update required 1756 { 1757 if( !r_write_to_init_cmd_multi_req.read() && 1758 !r_write_to_init_cmd_brdcast_req.read() ) r_write_fsm = WRITE_UPT_LOCK; 1759 else r_write_fsm = WRITE_WAIT; 1760 } 1761 1762 #if DEBUG_MEMC_WRITE 1763 if( m_debug_write_fsm ) 1764 { 1765 if ( no_update ) 1766 { 1767 std::cout << " <MEMC.WRITE_DIR_HIT> Write into cache / No coherence transaction" 1768 << std::endl; 1769 } 1770 else 1771 { 1772 std::cout << " <MEMC.WRITE_DIR_HIT> Coherence update required:" 1773 << " is_cnt = " << r_write_is_cnt.read() 1774 << " count = " << std::dec << r_write_count.read() 1775 << std::endl; 1776 } 1777 } 1778 #endif 1779 break; 1780 } 1781 ///////////////////// 1782 case WRITE_UPT_LOCK: // Try to register the update request in UPT 1783 { 1784 if ( r_alloc_upt_fsm.read() == ALLOC_UPT_WRITE ) 1785 { 1786 bool wok = false; 1787 size_t index = 0; 1788 size_t srcid = r_write_srcid.read(); 1789 size_t trdid = r_write_trdid.read(); 1790 size_t pktid = r_write_pktid.read(); 1791 addr_t nline = m_nline[(vci_addr_t)(r_write_address.read())]; 1792 size_t nb_copies = r_write_count.read(); 1793 size_t set = m_y[(vci_addr_t)(r_write_address.read())]; 1794 size_t way = r_write_way.read(); 1795 1796 wok = m_update_tab.set(true, // it's an update transaction 1797 false, // it's not a broadcast 1798 true, // it needs a response 1799 srcid, 1800 trdid, 1801 pktid, 1802 nline, 1803 nb_copies, 1804 index); 1805 if ( wok ) // write data in cache 1806 { 1807 for(size_t i=0 ; i<m_words ; i++) 1808 { 1809 if ( r_write_be[i].read() ) m_cache_data[way][set][i] = r_write_data[i].read(); 1810 } 1811 } 1812 1813 #if DEBUG_MEMC_WRITE 1814 if( m_debug_write_fsm ) 1815 { 1816 if ( wok ) 1817 { 1818 std::cout << " <MEMC.WRITE_UPT_LOCK> Register the multicast update in UPT / " 1819 << " nb_copies = " << r_write_count.read() << std::endl; 1820 //m_update_tab.print(); 1821 } 1822 } 1823 #endif 1824 r_write_upt_index = index; 1825 // releases the lock protecting the Update Table and the Directory if no entry... 1826 if ( wok ) r_write_fsm = WRITE_HEAP_LOCK; 1827 else r_write_fsm = WRITE_WAIT; 1828 } 1829 break; 1830 } 1831 ///////////////////// 1832 case WRITE_HEAP_LOCK: 1833 { 1834 if( r_alloc_heap_fsm.read() == ALLOC_HEAP_WRITE ) 1835 { 1836 1837 #if DEBUG_MEMC_WRITE 1838 if( m_debug_write_fsm ) 1839 { 1840 std::cout << " <MEMC.WRITE_HEAP_LOCK> Get acces to the HEAP" << std::endl; 1841 } 1842 #endif 1843 r_write_fsm = WRITE_UPT_REQ; 1844 } 1845 break; 1846 } 1847 ////////////////// 1848 case WRITE_UPT_REQ: 1849 { 1850 if ( r_alloc_heap_fsm.read() != ALLOC_HEAP_WRITE ) 1851 { 1852 std::cout << "VCI_MEM_CACHE ERROR " << name() << " WRITE_UPT_REQ state" << std::endl; 1853 std::cout << "bad HEAP allocation" << std::endl; 1854 exit(0); 1855 } 1856 1420 1857 if( !r_write_to_init_cmd_multi_req.read() && 1421 !r_write_to_init_cmd_brdcast_req.read() ) 1422 r_write_fsm = WRITE_UPT_LOCK; 1423 else 1424 r_write_fsm = WRITE_WAIT; 1425 break; 1858 !r_write_to_init_cmd_brdcast_req.read() ) 1859 { 1860 r_write_to_init_cmd_brdcast_req = false; 1861 r_write_to_init_cmd_trdid = r_write_upt_index.read(); 1862 r_write_to_init_cmd_nline = m_nline[(vci_addr_t)(r_write_address.read())]; 1863 r_write_to_init_cmd_index = r_write_word_index.read(); 1864 r_write_to_init_cmd_count = r_write_word_count.read(); 1865 1866 for(size_t i=0; i<m_words ; i++) 1867 { 1868 r_write_to_init_cmd_be[i]=r_write_be[i].read(); 1869 } 1870 1871 size_t min = r_write_word_index.read(); 1872 size_t max = r_write_word_index.read() + r_write_word_count.read(); 1873 for (size_t i=min ; i<max ; i++) r_write_to_init_cmd_data[i] = r_write_data[i]; 1874 1875 if( (r_write_copy.read() != r_write_srcid.read()) or 1876 #if L1_MULTI_CACHE 1877 (r_write_copy_cache.read() != r_write_pktid.read()) or 1878 #endif 1879 r_write_copy_inst.read() ) 1880 { 1881 // We put the first copy in the fifo 1882 write_to_init_cmd_fifo_put = true; 1883 write_to_init_cmd_fifo_inst = r_write_copy_inst.read(); 1884 write_to_init_cmd_fifo_srcid = r_write_copy.read(); 1885 #if L1_MULTI_CACHE 1886 write_to_init_cmd_fifo_cache_id= r_write_copy_cache.read(); 1887 #endif 1888 if(r_write_count.read() == 1) 1889 { 1890 r_write_fsm = WRITE_IDLE; 1891 r_write_to_init_cmd_multi_req = true; 1892 } 1893 else 1894 { 1895 r_write_fsm = WRITE_UPDATE; 1896 } 1897 } 1898 else 1899 { 1900 r_write_fsm = WRITE_UPDATE; 1901 } 1902 } 1903 break; 1904 } 1905 ////////////////// 1906 case WRITE_UPDATE: // send a multi-update request to INIT_CMD fsm 1907 { 1908 if ( r_alloc_heap_fsm.read() != ALLOC_HEAP_WRITE ) 1909 { 1910 std::cout << "VCI_MEM_CACHE ERROR " << name() << " WRITE_UPDATE state" << std::endl; 1911 std::cout << "bad HEAP allocation" << std::endl; 1912 exit(0); 1913 } 1914 1915 HeapEntry entry = m_heap.read(r_write_ptr.read()); 1916 write_to_init_cmd_fifo_inst = entry.owner.inst; 1917 write_to_init_cmd_fifo_srcid = entry.owner.srcid; 1918 #if L1_MULTI_CACHE 1919 write_to_init_cmd_fifo_cache_id = entry.owner.cache_id; 1920 #endif 1921 1922 bool dec_upt_counter = r_write_to_dec.read(); 1923 if( (entry.owner.srcid != r_write_srcid.read()) or 1924 #if L1_MULTI_CACHE 1925 (entry.owner.cache_id != r_write_pktid.read()) or 1926 #endif 1927 entry.owner.inst) 1928 { 1929 write_to_init_cmd_fifo_put = true; 1930 } 1931 else 1932 { 1933 dec_upt_counter = true; 1934 } 1935 r_write_to_dec = dec_upt_counter; 1936 1937 if( m_write_to_init_cmd_inst_fifo.wok() ) 1938 { 1939 r_write_ptr = entry.next; 1940 if( entry.next == r_write_ptr.read() ) // last copy 1941 { 1942 r_write_to_init_cmd_multi_req = true; 1943 if(dec_upt_counter) r_write_fsm = WRITE_UPT_DEC; 1944 else r_write_fsm = WRITE_IDLE; 1945 } 1946 else 1947 { 1948 r_write_fsm = WRITE_UPDATE; 1949 } 1950 } 1951 else 1952 { 1953 r_write_fsm = WRITE_UPDATE; 1954 } 1955 break; 1956 } 1957 ////////////////// 1958 case WRITE_UPT_DEC: // Post another coherence update request 1959 { 1960 if ( !r_write_to_init_rsp_req.read() ) 1961 { 1962 r_write_to_init_rsp_req = true; 1963 r_write_to_init_rsp_upt_index = r_write_upt_index.read(); 1964 r_write_fsm = WRITE_IDLE; 1965 } 1966 break; 1967 } 1968 /////////////// 1969 case WRITE_RSP: // Post a request to TGT_RSP FSM to acknowledge the write 1970 { 1971 if ( !r_write_to_tgt_rsp_req.read() ) 1972 { 1973 1974 #if DEBUG_MEMC_WRITE 1975 if( m_debug_write_fsm ) 1976 { 1977 std::cout << " <MEMC.WRITE_RSP> Post a request to TGT_RSP FSM: rsrcid = " 1978 << std::hex << r_write_srcid.read() << std:: endl; 1979 } 1980 #endif 1981 r_write_to_tgt_rsp_req = true; 1982 r_write_to_tgt_rsp_srcid = r_write_srcid.read(); 1983 r_write_to_tgt_rsp_trdid = r_write_trdid.read(); 1984 r_write_to_tgt_rsp_pktid = r_write_pktid.read(); 1985 r_write_fsm = WRITE_IDLE; 1986 } 1987 break; 1988 } 1989 //////////////////// 1990 case WRITE_TRT_LOCK: // Miss : check Transaction Table 1991 { 1992 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE ) 1993 { 1994 1995 #if DEBUG_MEMC_WRITE 1996 if( m_debug_write_fsm ) 1997 { 1998 std::cout << " <MEMC.WRITE_TRT_LOCK> Check the TRT" << std::endl; 1999 } 2000 #endif 2001 size_t hit_index = 0; 2002 size_t wok_index = 0; 2003 vci_addr_t addr = (vci_addr_t)r_write_address.read(); 2004 bool hit_read = m_transaction_tab.hit_read(m_nline[addr], hit_index); 2005 bool hit_write = m_transaction_tab.hit_write(m_nline[addr]); 2006 bool wok = !m_transaction_tab.full(wok_index); 2007 2008 if ( hit_read ) // register the modified data in TRT 2009 { 2010 r_write_trt_index = hit_index; 2011 r_write_fsm = WRITE_TRT_DATA; 2012 m_cpt_write_miss++; 2013 } 2014 else if ( wok && !hit_write ) // set a new entry in TRT 2015 { 2016 r_write_trt_index = wok_index; 2017 r_write_fsm = WRITE_TRT_SET; 2018 m_cpt_write_miss++; 2019 } 2020 else // wait an empty entry in TRT 2021 { 2022 r_write_fsm = WRITE_WAIT; 2023 m_cpt_trt_full++; 2024 } 2025 } 2026 break; 2027 } 2028 //////////////// 2029 case WRITE_WAIT: // release the locks protecting the shared ressources 2030 { 2031 2032 #if DEBUG_MEMC_WRITE 2033 if( m_debug_write_fsm ) 2034 { 2035 std::cout << " <MEMC.WRITE_WAIT> Releases the locks before retry" << std::endl; 2036 } 2037 #endif 2038 r_write_fsm = WRITE_DIR_LOCK; 2039 break; 2040 } 2041 /////////////////// 2042 case WRITE_TRT_SET: // register a new transaction in TRT (Write Buffer) 2043 { 2044 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE ) 2045 { 2046 std::vector<be_t> be_vector; 2047 std::vector<data_t> data_vector; 2048 be_vector.clear(); 2049 data_vector.clear(); 2050 for ( size_t i=0; i<m_words; i++ ) 2051 { 2052 be_vector.push_back(r_write_be[i]); 2053 data_vector.push_back(r_write_data[i]); 2054 } 2055 m_transaction_tab.set(r_write_trt_index.read(), 2056 true, // read request to XRAM 2057 m_nline[(vci_addr_t)(r_write_address.read())], 2058 r_write_srcid.read(), 2059 r_write_trdid.read(), 2060 r_write_pktid.read(), 2061 false, // not a processor read 2062 0, // not a single word 2063 0, // word index 2064 be_vector, 2065 data_vector); 2066 r_write_fsm = WRITE_XRAM_REQ; 2067 2068 #if DEBUG_MEMC_WRITE 2069 if( m_debug_write_fsm ) 2070 { 2071 std::cout << " <MEMC.WRITE_TRT_SET> Set a new entry in TRT" << std::endl; 2072 } 2073 #endif 2074 } 2075 break; 2076 } 2077 //////////////////// 2078 case WRITE_TRT_DATA: // update an entry in TRT (Write Buffer) 2079 { 2080 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE ) 2081 { 2082 std::vector<be_t> be_vector; 2083 std::vector<data_t> data_vector; 2084 be_vector.clear(); 2085 data_vector.clear(); 2086 for ( size_t i=0; i<m_words; i++ ) 2087 { 2088 be_vector.push_back(r_write_be[i]); 2089 data_vector.push_back(r_write_data[i]); 2090 } 2091 m_transaction_tab.write_data_mask(r_write_trt_index.read(), 2092 be_vector, 2093 data_vector); 2094 r_write_fsm = WRITE_RSP; 2095 2096 #if DEBUG_MEMC_WRITE 2097 if( m_debug_write_fsm ) 2098 { 2099 std::cout << " <MEMC.WRITE_TRT_DATA> Modify an existing entry in TRT" << std::endl; 2100 m_transaction_tab.print( r_write_trt_index.read() ); 2101 } 2102 #endif 2103 } 2104 break; 2105 } 2106 //////////////////// 2107 case WRITE_XRAM_REQ: // send a request to IXR_CMD FSM 2108 { 2109 if ( !r_write_to_ixr_cmd_req ) 2110 { 2111 r_write_to_ixr_cmd_req = true; 2112 r_write_to_ixr_cmd_write = false; 2113 r_write_to_ixr_cmd_nline = m_nline[(vci_addr_t)(r_write_address.read())]; 2114 r_write_to_ixr_cmd_trdid = r_write_trt_index.read(); 2115 r_write_fsm = WRITE_RSP; 2116 2117 #if DEBUG_MEMC_WRITE 2118 if( m_debug_write_fsm ) 2119 { 2120 std::cout << " <MEMC.WRITE_XRAM_REQ> Post a request to the IXR_CMD FSM" << std::endl; 2121 } 2122 #endif 2123 } 2124 break; 2125 } 2126 ////////////////////////// 2127 case WRITE_TRT_WRITE_LOCK: // Check TRT not full 2128 { 2129 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE ) 2130 { 2131 size_t wok_index = 0; 2132 bool wok = !m_transaction_tab.full( wok_index ); 2133 if ( wok ) // set a new entry in TRT 2134 { 2135 r_write_trt_index = wok_index; 2136 r_write_fsm = WRITE_INVAL_LOCK; 2137 } 2138 else // wait an empty entry in TRT 2139 { 2140 r_write_fsm = WRITE_WAIT; 2141 } 2142 2143 #if DEBUG_MEMC_WRITE 2144 if( m_debug_write_fsm ) 2145 { 2146 std::cout << " <MEMC.WRITE_TRT_WRITE_LOCK> Check TRT : wok = " 2147 << wok << " index = " << wok_index << std::endl; 2148 } 2149 #endif 2150 } 2151 break; 2152 } 2153 ////////////////////// 2154 case WRITE_INVAL_LOCK: 2155 { 2156 if ( r_alloc_upt_fsm.read() == ALLOC_UPT_WRITE ) 2157 { 2158 bool wok = false; 2159 size_t index = 0; 2160 size_t srcid = r_write_srcid.read(); 2161 size_t trdid = r_write_trdid.read(); 2162 size_t pktid = r_write_pktid.read(); 2163 addr_t nline = m_nline[(vci_addr_t)(r_write_address.read())]; 2164 size_t nb_copies = r_write_count.read(); 2165 2166 wok =m_update_tab.set(false, // it's an inval transaction 2167 true, // it's a broadcast 2168 true, // it needs a response 2169 srcid, 2170 trdid, 2171 pktid, 2172 nline, 2173 nb_copies, 2174 index); 2175 2176 #if DEBUG_MEMC_WRITE 2177 if( m_debug_write_fsm ) 2178 { 2179 if ( wok ) 2180 { 2181 std::cout << " <MEMC.WRITE_INVAL_LOCK> Register the broadcast inval in UPT / " 2182 << " nb_copies = " << r_write_count.read() << std::endl; 2183 //m_update_tab.print(); 2184 } 2185 } 2186 #endif 2187 r_write_upt_index = index; 2188 2189 if ( wok ) r_write_fsm = WRITE_DIR_INVAL; 2190 else r_write_fsm = WRITE_WAIT; 2191 } 2192 break; 1426 2193 } 1427 2194 ///////////////////// 1428 case WRITE_UPT_LOCK: // Try to register the request in Update Table 1429 { 1430 1431 if ( r_alloc_upt_fsm.read() == ALLOC_UPT_WRITE ) { 1432 bool wok = false; 1433 size_t index = 0; 1434 size_t srcid = r_write_srcid.read(); 1435 size_t trdid = r_write_trdid.read(); 1436 size_t pktid = r_write_pktid.read(); 1437 addr_t nline = m_nline[(vci_addr_t)(r_write_address.read())]; 1438 size_t nb_copies = r_write_count.read(); 1439 size_t set = m_y[(vci_addr_t)(r_write_address.read())]; 1440 size_t way = r_write_way.read(); 1441 1442 wok =m_update_tab.set(true, // it's an update transaction 1443 false, // it's not a broadcast 1444 true, // it needs a response 1445 srcid, 1446 trdid, 1447 pktid, 1448 nline, 1449 nb_copies, 1450 index); 1451 if(wok){ 1452 // write data in cache 1453 for(size_t i=0 ; i<m_words ; i++) { 1454 if ( r_write_be[i].read() ) { 1455 m_cache_data[way][set][i] = r_write_data[i].read(); 1456 } 1457 } // end for 1458 } 1459 #ifdef IDEBUG 1460 if(m_cpt_cycles > DEBUG_START_CYCLE){ 1461 if(wok){ 1462 std::cout << sc_time_stamp() << " " << name() << " WRITE_UPT_LOCK update table : " << std::endl; 1463 m_update_tab.print(); 1464 } 1465 } 1466 #endif 1467 r_write_upt_index = index; 1468 // releases the lock protecting the Update Table and the Directory if no entry... 1469 if ( wok ) r_write_fsm = WRITE_HEAP_LOCK; 1470 else r_write_fsm = WRITE_WAIT; 1471 } 1472 break; 1473 } 1474 ////////////////// 1475 case WRITE_HEAP_LOCK: 1476 { 1477 if( r_alloc_heap_fsm.read() == ALLOC_HEAP_WRITE ){ 1478 r_write_fsm = WRITE_UPT_REQ; 1479 } 1480 break; 1481 } 1482 ////////////////// 1483 case WRITE_UPT_REQ: 1484 { 1485 ASSERT( (r_alloc_heap_fsm.read() == ALLOC_HEAP_WRITE) 1486 ,"MemCache ERROR : bad HEAP allocation"); 1487 if( !r_write_to_init_cmd_multi_req.read() && 1488 !r_write_to_init_cmd_brdcast_req.read() ){ 1489 r_write_to_init_cmd_brdcast_req = false; 1490 r_write_to_init_cmd_trdid = r_write_upt_index.read(); 1491 r_write_to_init_cmd_nline = m_nline[(vci_addr_t)(r_write_address.read())]; 1492 r_write_to_init_cmd_index = r_write_word_index.read(); 1493 r_write_to_init_cmd_count = r_write_word_count.read(); 1494 1495 for(size_t i=0; i<m_words ; i++){ 1496 r_write_to_init_cmd_be[i]=r_write_be[i].read(); 1497 } 1498 1499 size_t min = r_write_word_index.read(); 1500 size_t max = r_write_word_index.read() + r_write_word_count.read(); 1501 for (size_t i=min ; i<max ; i++) { 1502 r_write_to_init_cmd_data[i] = r_write_data[i]; 1503 } 1504 1505 if((r_write_copy.read() != r_write_srcid.read()) or 1506 #if L1_MULTI_CACHE 1507 (r_write_copy_cache.read() != r_write_pktid.read()) or 1508 #endif 1509 r_write_copy_inst.read() ) { 1510 // We put the first copy in the fifo 1511 write_to_init_cmd_fifo_put = true; 1512 write_to_init_cmd_fifo_inst = r_write_copy_inst.read(); 1513 write_to_init_cmd_fifo_srcid = r_write_copy.read(); 1514 #if L1_MULTI_CACHE 1515 write_to_init_cmd_fifo_cache_id= r_write_copy_cache.read(); 1516 #endif 1517 if(r_write_count.read() == 1){ 1518 r_write_fsm = WRITE_IDLE; 1519 r_write_to_init_cmd_multi_req = true; 1520 } else { 1521 r_write_fsm = WRITE_UPDATE; 1522 } 1523 } else { 1524 r_write_fsm = WRITE_UPDATE; 1525 } 1526 } 1527 break; 1528 } 1529 ////////////////// 1530 case WRITE_UPDATE: // send a multi-update request to INIT_CMD fsm 1531 { 1532 ASSERT( (r_alloc_heap_fsm.read() == ALLOC_HEAP_WRITE) 1533 ,"MemCache ERROR : bad HEAP allocation"); 1534 HeapEntry entry = m_heap_directory.read(r_write_ptr.read()); 1535 write_to_init_cmd_fifo_inst = entry.owner.inst; 1536 write_to_init_cmd_fifo_srcid = entry.owner.srcid; 1537 #if L1_MULTI_CACHE 1538 write_to_init_cmd_fifo_cache_id = entry.owner.cache_id; 1539 #endif 1540 1541 bool dec_upt_counter = r_write_to_dec.read(); 1542 if( (entry.owner.srcid != r_write_srcid.read()) or 1543 #if L1_MULTI_CACHE 1544 (entry.owner.cache_id != r_write_pktid.read()) or 1545 #endif 1546 entry.owner.inst){ 1547 write_to_init_cmd_fifo_put = true; 1548 } else { 1549 dec_upt_counter = true; 1550 } 1551 r_write_to_dec = dec_upt_counter; 1552 1553 if( m_write_to_init_cmd_inst_fifo.wok() ){ 1554 r_write_ptr = entry.next; 1555 if( entry.next == r_write_ptr.read() ) { // last copy 1556 r_write_to_init_cmd_multi_req = true; 1557 if(dec_upt_counter){ 1558 r_write_fsm = WRITE_UPT_DEC; 1559 } else { 1560 r_write_fsm = WRITE_IDLE; 1561 } 1562 } else { 1563 r_write_fsm = WRITE_UPDATE; 1564 } 1565 } else { 1566 r_write_fsm = WRITE_UPDATE; 1567 } 1568 break; 1569 } 1570 ////////////////// 1571 case WRITE_UPT_DEC: 1572 { 1573 if(!r_write_to_init_rsp_req.read()){ 1574 r_write_to_init_rsp_req = true; 1575 r_write_to_init_rsp_upt_index = r_write_upt_index.read(); 1576 r_write_fsm = WRITE_IDLE; 1577 } 1578 break; 1579 } 1580 /////////////// 1581 case WRITE_RSP: // send a request to TGT_RSP FSM to acknowledge the write 1582 { 1583 if ( !r_write_to_tgt_rsp_req.read() ) { 1584 1585 PRINTF(" * <MEM_CACHE.WRITE> Request from %d.%d (%d)\n",(uint32_t)r_write_srcid.read(), (uint32_t)r_write_trdid.read(), (uint32_t)r_write_pktid.read()); 1586 1587 r_write_to_tgt_rsp_req = true; 1588 r_write_to_tgt_rsp_srcid = r_write_srcid.read(); 1589 r_write_to_tgt_rsp_trdid = r_write_trdid.read(); 1590 r_write_to_tgt_rsp_pktid = r_write_pktid.read(); 1591 r_write_fsm = WRITE_IDLE; 1592 } 1593 break; 1594 } 1595 //////////////////// 1596 case WRITE_TRT_LOCK: // Miss : check Transaction Table 1597 { 1598 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE ) { 1599 #ifdef TDEBUG 1600 if(m_cpt_cycles > DEBUG_START_CYCLE){ 1601 std::cout << sc_time_stamp() << " " << name() << " READ_TRT_LOCK " << std::endl; 1602 } 1603 #endif 1604 size_t hit_index = 0; 1605 size_t wok_index = 0; 1606 bool hit_read = m_transaction_tab.hit_read(m_nline[(vci_addr_t)(r_write_address.read())],hit_index); 1607 bool hit_write = m_transaction_tab.hit_write(m_nline[(vci_addr_t)(r_write_address.read())]); 1608 bool wok = !m_transaction_tab.full(wok_index); 1609 if ( hit_read ) { // register the modified data in TRT 1610 r_write_trt_index = hit_index; 1611 r_write_fsm = WRITE_TRT_DATA; 1612 m_cpt_write_miss++; 1613 } else if ( wok && !hit_write ) { // set a new entry in TRT 1614 r_write_trt_index = wok_index; 1615 r_write_fsm = WRITE_TRT_SET; 1616 m_cpt_write_miss++; 1617 } else { // wait an empty entry in TRT 1618 r_write_fsm = WRITE_WAIT; 1619 m_cpt_trt_full++; 1620 } 1621 } 1622 break; 1623 } 1624 //////////////////// 1625 case WRITE_WAIT: // release the lock protecting TRT 1626 { 1627 r_write_fsm = WRITE_DIR_LOCK; 1628 break; 1629 } 1630 /////////////////// 1631 case WRITE_TRT_SET: // register a new transaction in TRT (Write Buffer) 1632 { 1633 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE ) 1634 { 1635 std::vector<be_t> be_vector; 1636 std::vector<data_t> data_vector; 1637 be_vector.clear(); 1638 data_vector.clear(); 1639 for ( size_t i=0; i<m_words; i++ ) 1640 { 1641 be_vector.push_back(r_write_be[i]); 1642 data_vector.push_back(r_write_data[i]); 1643 } 2195 case WRITE_DIR_INVAL: // Register a put transaction to XRAM in TRT 2196 // and invalidate the line in directory 2197 { 2198 if ( (r_alloc_trt_fsm.read() != ALLOC_TRT_WRITE ) || 2199 (r_alloc_upt_fsm.read() != ALLOC_UPT_WRITE ) || 2200 (r_alloc_dir_fsm.read() != ALLOC_DIR_WRITE ) ) 2201 { 2202 std::cout << "VCI_MEM_CACHE ERROR " << name() << " WRITE_DIR_INVAL state" << std::endl; 2203 std::cout << "bad TRT, DIR, or UPT allocation" << std::endl; 2204 exit(0); 2205 } 2206 2207 // register a write request to XRAM in TRT 1644 2208 m_transaction_tab.set(r_write_trt_index.read(), 1645 true, // read request to XRAM 1646 m_nline[(vci_addr_t)(r_write_address.read())], 1647 r_write_srcid.read(), 1648 r_write_trdid.read(), 1649 r_write_pktid.read(), 1650 false, // not a processor read 1651 0, // not a single word 1652 0, // word index 1653 be_vector, 1654 data_vector); 1655 #ifdef TDEBUG 1656 if(m_cpt_cycles > DEBUG_START_CYCLE){ 1657 std::cout << sc_time_stamp() << " " << name() << " WRITE_TRT_SET transaction table : " << std::endl; 1658 for(size_t i = 0 ; i < m_transaction_tab.size() ; i++) 1659 m_transaction_tab.print(i); 1660 } 1661 #endif 1662 1663 r_write_fsm = WRITE_XRAM_REQ; 1664 } 1665 break; 1666 } 1667 /////////////////// 1668 case WRITE_TRT_DATA: // update an entry in TRT (Write Buffer) 1669 { 1670 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE ) { 1671 std::vector<be_t> be_vector; 1672 std::vector<data_t> data_vector; 1673 be_vector.clear(); 1674 data_vector.clear(); 1675 for ( size_t i=0; i<m_words; i++ ) { 1676 be_vector.push_back(r_write_be[i]); 1677 data_vector.push_back(r_write_data[i]); 1678 } 1679 m_transaction_tab.write_data_mask(r_write_trt_index.read(), 1680 be_vector, 1681 data_vector); 1682 r_write_fsm = WRITE_RSP; 1683 #ifdef TDEBUG 1684 if(m_cpt_cycles > DEBUG_START_CYCLE){ 1685 std::cout << sc_time_stamp() << " " << name() << " WRITE_TRT_DATA transaction table : " << std::endl; 1686 for(size_t i = 0 ; i < m_transaction_tab.size() ; i++) 1687 m_transaction_tab.print(i); 1688 } 1689 #endif 1690 1691 } 1692 break; 1693 } 1694 //////////////////// 1695 case WRITE_XRAM_REQ: // send a request to IXR_CMD FSM 1696 { 1697 1698 if ( !r_write_to_ixr_cmd_req ) { 1699 r_write_to_ixr_cmd_req = true; 1700 r_write_to_ixr_cmd_write = false; 1701 r_write_to_ixr_cmd_nline = m_nline[(vci_addr_t)(r_write_address.read())]; 1702 r_write_to_ixr_cmd_trdid = r_write_trt_index.read(); 1703 r_write_fsm = WRITE_RSP; 1704 } 1705 break; 1706 } 1707 //////////////////// 1708 case WRITE_TRT_WRITE_LOCK: 1709 { 1710 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE ) { 1711 size_t wok_index = 0; 1712 bool wok = !m_transaction_tab.full(wok_index); 1713 if ( wok ) { // set a new entry in TRT 1714 r_write_trt_index = wok_index; 1715 r_write_fsm = WRITE_INVAL_LOCK; 1716 } else { // wait an empty entry in TRT 1717 r_write_fsm = WRITE_WAIT; 1718 } 1719 } 1720 1721 break; 1722 } 1723 //////////////////// 1724 case WRITE_INVAL_LOCK: 1725 { 1726 if ( r_alloc_upt_fsm.read() == ALLOC_UPT_WRITE ) { 1727 bool wok = false; 1728 size_t index = 0; 1729 size_t srcid = r_write_srcid.read(); 1730 size_t trdid = r_write_trdid.read(); 1731 size_t pktid = r_write_pktid.read(); 1732 addr_t nline = m_nline[(vci_addr_t)(r_write_address.read())]; 1733 size_t nb_copies = r_write_count.read(); 1734 1735 wok =m_update_tab.set(false, // it's an inval transaction 1736 true, // it's a broadcast 1737 true, // it needs a response 1738 srcid, 1739 trdid, 1740 pktid, 1741 nline, 1742 nb_copies, 1743 index); 1744 #ifdef IDEBUG 1745 if(m_cpt_cycles > DEBUG_START_CYCLE){ 1746 if(wok){ 1747 std::cout << sc_time_stamp() << " " << name() << " WRITE_INVAL_LOCK update table : " << std::endl; 1748 m_update_tab.print(); 1749 } 1750 } 1751 #endif 1752 r_write_upt_index = index; 1753 // releases the lock protecting Update Table if no entry... 1754 if ( wok ) r_write_fsm = WRITE_DIR_INVAL; 1755 else r_write_fsm = WRITE_WAIT; 1756 } 1757 1758 break; 1759 } 1760 //////////////////// 1761 case WRITE_DIR_INVAL: 1762 { 1763 ASSERT(((r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE ) && 1764 (r_alloc_upt_fsm.read() == ALLOC_UPT_WRITE ) && 1765 (r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE )) 1766 ,"MemCache ERROR : bad TRT,DIR or UPT allocation error"); 1767 m_transaction_tab.set(r_write_trt_index.read(), 1768 false, // write request to XRAM 1769 m_nline[(vci_addr_t)(r_write_address.read())], 1770 0, 1771 0, 1772 0, 1773 false, // not a processor read 1774 0, // not a single word 1775 0, // word index 1776 std::vector<be_t>(m_words,0), 1777 std::vector<data_t>(m_words,0)); 1778 #ifdef TDEBUG 1779 if(m_cpt_cycles > DEBUG_START_CYCLE){ 1780 std::cout << sc_time_stamp() << " " << name() << " WRITE_DIR_INVAL transaction table : " << std::endl; 1781 for(size_t i = 0 ; i < m_transaction_tab.size() ; i++) 1782 m_transaction_tab.print(i); 1783 } 1784 #endif 1785 2209 false, // write request to XRAM 2210 m_nline[(vci_addr_t)(r_write_address.read())], 2211 0, 2212 0, 2213 0, 2214 false, // not a processor read 2215 0, // not a single word 2216 0, // word index 2217 std::vector<be_t>(m_words,0), 2218 std::vector<data_t>(m_words,0)); 1786 2219 // invalidate directory entry 1787 2220 DirectoryEntry entry; … … 1800 2233 size_t set = m_y[(vci_addr_t)(r_write_address.read())]; 1801 2234 size_t way = r_write_way.read(); 2235 1802 2236 m_cache_directory.write(set, way, entry); 1803 2237 2238 #if DEBUG_MEMC_WRITE 2239 if( m_debug_write_fsm ) 2240 { 2241 std::cout << " <MEMC.WRITE_DIR_INVAL> Invalidate the directory entry: @ = " 2242 << r_write_address.read() << " / register the put transaction in TRT:" << std::endl; 2243 } 2244 #endif 1804 2245 r_write_fsm = WRITE_INVAL; 1805 2246 break; 1806 2247 } 1807 //////////////////// 1808 case WRITE_INVAL: 1809 { 1810 if ( !r_write_to_init_cmd_multi_req.read() && 1811 !r_write_to_init_cmd_brdcast_req.read() ) { 1812 r_write_to_init_cmd_multi_req = false; 1813 r_write_to_init_cmd_brdcast_req = true; 1814 r_write_to_init_cmd_trdid = r_write_upt_index.read(); 1815 r_write_to_init_cmd_nline = m_nline[(vci_addr_t)(r_write_address.read())]; 1816 r_write_to_init_cmd_index = 0; 1817 r_write_to_init_cmd_count = 0; 1818 1819 for(size_t i=0; i<m_words ; i++){ 1820 r_write_to_init_cmd_be[i]=0; 1821 r_write_to_init_cmd_data[i] = 0; 1822 } 1823 r_write_fsm = WRITE_XRAM_SEND; 1824 // all inval responses 1825 } 1826 1827 break; 1828 } 1829 //////////////////// 1830 case WRITE_XRAM_SEND: 1831 { 1832 if ( !r_write_to_ixr_cmd_req ) { 1833 r_write_to_ixr_cmd_req = true; 1834 r_write_to_ixr_cmd_write = true; 1835 r_write_to_ixr_cmd_nline = m_nline[(vci_addr_t)(r_write_address.read())]; 1836 r_write_to_ixr_cmd_trdid = r_write_trt_index.read(); 1837 for(size_t i=0; i<m_words; i++){ 1838 r_write_to_ixr_cmd_data[i] = r_write_data[i]; 1839 } 1840 r_write_fsm = WRITE_IDLE; 1841 } 1842 break; 2248 ///////////////// 2249 case WRITE_INVAL: // Post a coherence broadcast request to INIT_CMD FSM 2250 { 2251 if ( !r_write_to_init_cmd_multi_req.read() && !r_write_to_init_cmd_brdcast_req.read() ) 2252 { 2253 r_write_to_init_cmd_multi_req = false; 2254 r_write_to_init_cmd_brdcast_req = true; 2255 r_write_to_init_cmd_trdid = r_write_upt_index.read(); 2256 r_write_to_init_cmd_nline = m_nline[(vci_addr_t)(r_write_address.read())]; 2257 r_write_to_init_cmd_index = 0; 2258 r_write_to_init_cmd_count = 0; 2259 2260 for(size_t i=0; i<m_words ; i++) 2261 { 2262 r_write_to_init_cmd_be[i]=0; 2263 r_write_to_init_cmd_data[i] = 0; 2264 } 2265 r_write_fsm = WRITE_XRAM_SEND; 2266 2267 #if DEBUG_MEMC_WRITE 2268 if( m_debug_write_fsm ) 2269 { 2270 std::cout << " <MEMC.WRITE_INVAL> Post a broadcast request to INIT_CMD FSM" << std::endl; 2271 } 2272 #endif 2273 } 2274 break; 2275 } 2276 ///////////////////// 2277 case WRITE_XRAM_SEND: // Post a put request to IXR_CMD FSM 2278 { 2279 if ( !r_write_to_ixr_cmd_req ) 2280 { 2281 r_write_to_ixr_cmd_req = true; 2282 r_write_to_ixr_cmd_write = true; 2283 r_write_to_ixr_cmd_nline = m_nline[(vci_addr_t)(r_write_address.read())]; 2284 r_write_to_ixr_cmd_trdid = r_write_trt_index.read(); 2285 2286 for(size_t i=0; i<m_words; i++) r_write_to_ixr_cmd_data[i] = r_write_data[i]; 2287 2288 r_write_fsm = WRITE_IDLE; 2289 2290 #if DEBUG_MEMC_WRITE 2291 if( m_debug_write_fsm ) 2292 { 2293 std::cout << " <MEMC.WRITE_XRAM_SEND> Post a put request to IXR_CMD FSM" << std::endl; 2294 } 2295 #endif 2296 } 2297 break; 1843 2298 } 1844 2299 } // end switch r_write_fsm … … 1848 2303 /////////////////////////////////////////////////////////////////////// 1849 2304 // The IXR_CMD fsm controls the command packets to the XRAM : 1850 // - It sends a single cell VCI read to the XRAM in case of MISS request1851 // posted by the READ, WRITE or LLSC FSMs : the TRDID field contains2305 // - It sends a single cell VCI get request to the XRAM in case of MISS 2306 // posted by the READ, WRITE or SC FSMs : the TRDID field contains 1852 2307 // the Transaction Tab index. 1853 2308 // The VCI response is a multi-cell packet : the N cells contain 1854 2309 // the N data words. 1855 // - It sends a multi-cell VCI write when the XRAM_RSP FSM request1856 // to save a dirty line to the XRAM.2310 // - It sends a multi-cell VCI write when the XRAM_RSP FSM, WRITE FSM 2311 // or SC FSM request to save a dirty line to the XRAM. 1857 2312 // The VCI response is a single cell packet. 1858 // This FSM handles requests from the READ, WRITE, LLSC & XRAM_RSP FSMs2313 // This FSM handles requests from the READ, WRITE, SC & XRAM_RSP FSMs 1859 2314 // with a round-robin priority. 1860 2315 //////////////////////////////////////////////////////////////////////// 1861 2316 1862 switch ( r_ixr_cmd_fsm.read() ) { 1863 //////////////////////// 1864 case IXR_CMD_READ_IDLE: 2317 switch ( r_ixr_cmd_fsm.read() ) 2318 { 2319 //////////////////////// 2320 case IXR_CMD_READ_IDLE: 1865 2321 if ( r_write_to_ixr_cmd_req ) r_ixr_cmd_fsm = IXR_CMD_WRITE_NLINE; 1866 else if ( r_ llsc_to_ixr_cmd_req ) r_ixr_cmd_fsm = IXR_CMD_LLSC_NLINE;2322 else if ( r_sc_to_ixr_cmd_req ) r_ixr_cmd_fsm = IXR_CMD_SC_NLINE; 1867 2323 else if ( r_xram_rsp_to_ixr_cmd_req ) r_ixr_cmd_fsm = IXR_CMD_XRAM_DATA; 1868 2324 else if ( r_read_to_ixr_cmd_req ) r_ixr_cmd_fsm = IXR_CMD_READ_NLINE; 1869 2325 break; 1870 2326 //////////////////////// 1871 case IXR_CMD_WRITE_IDLE:1872 if ( r_ llsc_to_ixr_cmd_req ) r_ixr_cmd_fsm = IXR_CMD_LLSC_NLINE;2327 case IXR_CMD_WRITE_IDLE: 2328 if ( r_sc_to_ixr_cmd_req ) r_ixr_cmd_fsm = IXR_CMD_SC_NLINE; 1873 2329 else if ( r_xram_rsp_to_ixr_cmd_req ) r_ixr_cmd_fsm = IXR_CMD_XRAM_DATA; 1874 2330 else if ( r_read_to_ixr_cmd_req ) r_ixr_cmd_fsm = IXR_CMD_READ_NLINE; … … 1876 2332 break; 1877 2333 //////////////////////// 1878 case IXR_CMD_LLSC_IDLE:2334 case IXR_CMD_SC_IDLE: 1879 2335 if ( r_xram_rsp_to_ixr_cmd_req ) r_ixr_cmd_fsm = IXR_CMD_XRAM_DATA; 1880 2336 else if ( r_read_to_ixr_cmd_req ) r_ixr_cmd_fsm = IXR_CMD_READ_NLINE; 1881 2337 else if ( r_write_to_ixr_cmd_req ) r_ixr_cmd_fsm = IXR_CMD_WRITE_NLINE; 1882 else if ( r_ llsc_to_ixr_cmd_req ) r_ixr_cmd_fsm = IXR_CMD_LLSC_NLINE;2338 else if ( r_sc_to_ixr_cmd_req ) r_ixr_cmd_fsm = IXR_CMD_SC_NLINE; 1883 2339 break; 1884 2340 //////////////////////// 1885 case IXR_CMD_XRAM_IDLE:2341 case IXR_CMD_XRAM_IDLE: 1886 2342 if ( r_read_to_ixr_cmd_req ) r_ixr_cmd_fsm = IXR_CMD_READ_NLINE; 1887 2343 else if ( r_write_to_ixr_cmd_req ) r_ixr_cmd_fsm = IXR_CMD_WRITE_NLINE; 1888 else if ( r_ llsc_to_ixr_cmd_req ) r_ixr_cmd_fsm = IXR_CMD_LLSC_NLINE;2344 else if ( r_sc_to_ixr_cmd_req ) r_ixr_cmd_fsm = IXR_CMD_SC_NLINE; 1889 2345 else if ( r_xram_rsp_to_ixr_cmd_req ) r_ixr_cmd_fsm = IXR_CMD_XRAM_DATA; 1890 2346 break; 1891 2347 ///////////////////////// 1892 case IXR_CMD_READ_NLINE: 1893 if ( p_vci_ixr.cmdack ) { 1894 r_ixr_cmd_fsm = IXR_CMD_READ_IDLE; 1895 r_read_to_ixr_cmd_req = false; 2348 case IXR_CMD_READ_NLINE: 2349 if ( p_vci_ixr.cmdack ) 2350 { 2351 r_ixr_cmd_fsm = IXR_CMD_READ_IDLE; 2352 r_read_to_ixr_cmd_req = false; 2353 2354 #if DEBUG_MEMC_IXR_CMD 2355 if( m_debug_ixr_cmd_fsm ) 2356 { 2357 std::cout << " <MEMC.IXR_CMD_READ_NLINE> Send a get request to xram" ; 2358 } 2359 #endif 1896 2360 } 1897 2361 break; 1898 2362 ////////////////////////// 1899 case IXR_CMD_WRITE_NLINE: 1900 if ( p_vci_ixr.cmdack ) { 1901 if( r_write_to_ixr_cmd_write.read()){ 1902 if ( r_ixr_cmd_cpt.read() == (m_words - 1) ) { 1903 r_ixr_cmd_cpt = 0; 1904 r_ixr_cmd_fsm = IXR_CMD_WRITE_IDLE; 1905 r_write_to_ixr_cmd_req = false; 1906 } else { 1907 r_ixr_cmd_cpt = r_ixr_cmd_cpt + 1; 1908 } 1909 } else { 1910 r_ixr_cmd_fsm = IXR_CMD_WRITE_IDLE; 1911 r_write_to_ixr_cmd_req = false; 1912 } 2363 case IXR_CMD_WRITE_NLINE: 2364 if ( p_vci_ixr.cmdack ) 2365 { 2366 if( r_write_to_ixr_cmd_write.read()) 2367 { 2368 if ( r_ixr_cmd_cpt.read() == (m_words - 1) ) 2369 { 2370 r_ixr_cmd_cpt = 0; 2371 r_ixr_cmd_fsm = IXR_CMD_WRITE_IDLE; 2372 r_write_to_ixr_cmd_req = false; 2373 } 2374 else 2375 { 2376 r_ixr_cmd_cpt = r_ixr_cmd_cpt + 1; 2377 } 2378 2379 #if DEBUG_MEMC_IXR_CMD 2380 if( m_debug_ixr_cmd_fsm ) 2381 { 2382 std::cout << " <MEMC.IXR_CMD_WRITE_NLINE> Send a put request to xram" ; 2383 } 2384 #endif 2385 } 2386 else 2387 { 2388 r_ixr_cmd_fsm = IXR_CMD_WRITE_IDLE; 2389 r_write_to_ixr_cmd_req = false; 2390 2391 #if DEBUG_MEMC_IXR_CMD 2392 if( m_debug_ixr_cmd_fsm ) 2393 { 2394 std::cout << " <MEMC.IXR_CMD_WRITE_NLINE> Send a get request to xram" ; 2395 } 2396 #endif 2397 } 1913 2398 } 1914 2399 break; 1915 ///////////////////////// 1916 case IXR_CMD_LLSC_NLINE: 1917 if ( p_vci_ixr.cmdack ) { 1918 if( r_llsc_to_ixr_cmd_write.read()){ 1919 if ( r_ixr_cmd_cpt.read() == (m_words - 1) ) { 1920 r_ixr_cmd_cpt = 0; 1921 r_ixr_cmd_fsm = IXR_CMD_LLSC_IDLE; 1922 r_llsc_to_ixr_cmd_req = false; 1923 } else { 1924 r_ixr_cmd_cpt = r_ixr_cmd_cpt + 1; 1925 } 1926 } else { 1927 r_ixr_cmd_fsm = IXR_CMD_LLSC_IDLE; 1928 r_llsc_to_ixr_cmd_req = false; 1929 } 2400 ////////////////////// 2401 case IXR_CMD_SC_NLINE: 2402 if ( p_vci_ixr.cmdack ) 2403 { 2404 if( r_sc_to_ixr_cmd_write.read()) 2405 { 2406 if ( r_ixr_cmd_cpt.read() == (m_words - 1) ) 2407 { 2408 r_ixr_cmd_cpt = 0; 2409 r_ixr_cmd_fsm = IXR_CMD_SC_IDLE; 2410 r_sc_to_ixr_cmd_req = false; 2411 } 2412 else 2413 { 2414 r_ixr_cmd_cpt = r_ixr_cmd_cpt + 1; 2415 } 2416 2417 #if DEBUG_MEMC_IXR_CMD 2418 if( m_debug_ixr_cmd_fsm ) 2419 { 2420 std::cout << " <MEMC.IXR_CMD_SC_NLINE> Send a put request to xram" ; 2421 } 2422 #endif 2423 } 2424 else 2425 { 2426 r_ixr_cmd_fsm = IXR_CMD_SC_IDLE; 2427 r_sc_to_ixr_cmd_req = false; 2428 2429 #if DEBUG_MEMC_IXR_CMD 2430 if( m_debug_ixr_cmd_fsm ) 2431 { 2432 std::cout << " <MEMC.IXR_CMD_SC_NLINE> Send a get request to xram" ; 2433 } 2434 #endif 2435 } 1930 2436 } 1931 2437 break; 1932 2438 //////////////////////// 1933 case IXR_CMD_XRAM_DATA: 1934 if ( p_vci_ixr.cmdack ) { 1935 if ( r_ixr_cmd_cpt.read() == (m_words - 1) ) { 1936 r_ixr_cmd_cpt = 0; 1937 r_ixr_cmd_fsm = IXR_CMD_XRAM_IDLE; 1938 r_xram_rsp_to_ixr_cmd_req = false; 1939 } else { 1940 r_ixr_cmd_cpt = r_ixr_cmd_cpt + 1; 1941 } 2439 case IXR_CMD_XRAM_DATA: 2440 if ( p_vci_ixr.cmdack ) 2441 { 2442 if ( r_ixr_cmd_cpt.read() == (m_words - 1) ) 2443 { 2444 r_ixr_cmd_cpt = 0; 2445 r_ixr_cmd_fsm = IXR_CMD_XRAM_IDLE; 2446 r_xram_rsp_to_ixr_cmd_req = false; 2447 } 2448 else 2449 { 2450 r_ixr_cmd_cpt = r_ixr_cmd_cpt + 1; 2451 } 2452 2453 #if DEBUG_MEMC_IXR_CMD 2454 if( m_debug_ixr_cmd_fsm ) 2455 { 2456 std::cout << " <MEMC.IXR_CMD_XRAM_DATA> Send a put request to xram" ; 2457 } 2458 #endif 1942 2459 } 1943 2460 break; … … 1949 2466 //////////////////////////////////////////////////////////////////////////// 1950 2467 // The IXR_RSP FSM receives the response packets from the XRAM, 1951 // for both write transaction, and readtransaction.2468 // for both put transaction, and get transaction. 1952 2469 // 1953 // - A response to a writerequest is a single-cell VCI packet.2470 // - A response to a put request is a single-cell VCI packet. 1954 2471 // The Transaction Tab index is contained in the RTRDID field. 1955 2472 // The FSM takes the lock protecting the TRT, and the corresponding 1956 2473 // entry is erased. 1957 2474 // 1958 // - A response to a readrequest is a multi-cell VCI packet.2475 // - A response to a get request is a multi-cell VCI packet. 1959 2476 // The Transaction Tab index is contained in the RTRDID field. 1960 2477 // The N cells contain the N words of the cache line in the RDATA field. … … 1964 2481 /////////////////////////////////////////////////////////////////////////////// 1965 2482 1966 switch ( r_ixr_rsp_fsm.read() ) { 1967 1968 /////////////////// 1969 case IXR_RSP_IDLE: // test if it's a read or a write transaction 1970 { 1971 if ( p_vci_ixr.rspval.read() ) { 1972 r_ixr_rsp_cpt = 0; 1973 r_ixr_rsp_trt_index = p_vci_ixr.rtrdid.read(); 1974 if ( p_vci_ixr.reop.read() && !(p_vci_ixr.rerror.read()&0x1)) 1975 r_ixr_rsp_fsm = IXR_RSP_ACK; 1976 else 1977 r_ixr_rsp_fsm = IXR_RSP_TRT_READ; 1978 } 1979 break; 2483 switch ( r_ixr_rsp_fsm.read() ) 2484 { 2485 /////////////////// 2486 case IXR_RSP_IDLE: // test if it's a get or a put transaction 2487 { 2488 if ( p_vci_ixr.rspval.read() ) 2489 { 2490 r_ixr_rsp_cpt = 0; 2491 r_ixr_rsp_trt_index = p_vci_ixr.rtrdid.read(); 2492 if ( p_vci_ixr.reop.read() && !(p_vci_ixr.rerror.read()&0x1)) // put transaction 2493 { 2494 r_ixr_rsp_fsm = IXR_RSP_ACK; 2495 2496 #if DEBUG_MEMC_IXR_RSP 2497 if( m_debug_ixr_rsp_fsm ) 2498 { 2499 std::cout << " <MEMC.IXR_RSP_IDLE> Response from XRAM to a put transaction" << std::endl; 2500 } 2501 #endif 2502 } 2503 else // get transaction 2504 { 2505 r_ixr_rsp_fsm = IXR_RSP_TRT_READ; 2506 2507 #if DEBUG_MEMC_IXR_RSP 2508 if( m_debug_ixr_rsp_fsm ) 2509 { 2510 std::cout << " <MEMC.IXR_RSP_IDLE> Response from XRAM to a get transaction" << std::endl; 2511 } 2512 #endif 2513 } 2514 } 2515 break; 1980 2516 } 1981 2517 //////////////////////// 1982 case IXR_RSP_ACK: // Acknowledge the vci response 1983 { 1984 if(p_vci_ixr.rspval.read()) 1985 r_ixr_rsp_fsm = IXR_RSP_TRT_ERASE; 2518 case IXR_RSP_ACK: // Aknowledge the VCI response 2519 { 2520 if(p_vci_ixr.rspval.read()) r_ixr_rsp_fsm = IXR_RSP_TRT_ERASE; 2521 2522 #if DEBUG_MEMC_IXR_RSP 2523 if( m_debug_ixr_rsp_fsm ) 2524 { 2525 std::cout << " <MEMC.IXR_RSP_ACK>" << std::endl; 2526 } 2527 #endif 1986 2528 break; 1987 2529 } 1988 2530 //////////////////////// 1989 case IXR_RSP_TRT_ERASE: // erase the entry in the TRT 1990 { 1991 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_RSP ) { 1992 m_transaction_tab.erase(r_ixr_rsp_trt_index.read()); 1993 r_ixr_rsp_fsm = IXR_RSP_IDLE; 1994 #ifdef TDEBUG 1995 if(m_cpt_cycles > DEBUG_START_CYCLE){ 1996 std::cout << sc_time_stamp() << " " << name() << " IXR_RSP_TRT_ERASE transaction table : " << std::endl; 1997 for(size_t i = 0 ; i < m_transaction_tab.size() ; i++) 1998 m_transaction_tab.print(i); 1999 } 2000 #endif 2001 2002 } 2003 break; 2531 case IXR_RSP_TRT_ERASE: // erase the entry in the TRT 2532 { 2533 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_RSP ) 2534 { 2535 m_transaction_tab.erase(r_ixr_rsp_trt_index.read()); 2536 r_ixr_rsp_fsm = IXR_RSP_IDLE; 2537 2538 #if DEBUG_MEMC_IXR_RSP 2539 if( m_debug_ixr_rsp_fsm ) 2540 { 2541 std::cout << " <MEMC.IXR_RSP_TRT_ERASE> Erase TRT entry " 2542 << r_ixr_rsp_trt_index.read() << std::endl; 2543 } 2544 #endif 2545 } 2546 break; 2004 2547 } 2005 2548 /////////////////////// 2006 case IXR_RSP_TRT_READ: // write data in the TRT 2007 { 2008 if ( (r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_RSP) && p_vci_ixr.rspval ) { 2009 bool eop = p_vci_ixr.reop.read(); 2010 data_t data = p_vci_ixr.rdata.read(); 2011 size_t index = r_ixr_rsp_trt_index.read(); 2012 ASSERT(((eop == (r_ixr_rsp_cpt.read() == (m_words-1))) || 2013 p_vci_ixr.rerror.read()) 2014 ,"Error in VCI_MEM_CACHE : invalid length for a response from XRAM"); 2015 m_transaction_tab.write_rsp(index, r_ixr_rsp_cpt.read(), data, p_vci_ixr.rerror.read()&0x1); 2016 r_ixr_rsp_cpt = r_ixr_rsp_cpt.read() + 1; 2017 if ( eop ) { 2018 #ifdef TDEBUG 2019 if(m_cpt_cycles > DEBUG_START_CYCLE){ 2020 std::cout << sc_time_stamp() << " " << name() << " IXR_RSP_TRT_READ transaction table : " << std::endl; 2021 for(size_t i = 0 ; i < m_transaction_tab.size() ; i++) 2022 m_transaction_tab.print(i); 2023 } 2024 #endif 2025 2026 r_ixr_rsp_to_xram_rsp_rok[r_ixr_rsp_trt_index.read()]=true; 2027 r_ixr_rsp_fsm = IXR_RSP_IDLE; 2028 } 2029 } 2030 break; 2549 case IXR_RSP_TRT_READ: // write data in the TRT 2550 { 2551 if ( (r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_RSP) && p_vci_ixr.rspval ) 2552 { 2553 size_t index = r_ixr_rsp_trt_index.read(); 2554 bool eop = p_vci_ixr.reop.read(); 2555 data_t data = p_vci_ixr.rdata.read(); 2556 bool error = (p_vci_ixr.rerror.read()&0x1 == 0); 2557 assert(((eop == (r_ixr_rsp_cpt.read() == (m_words-1))) || p_vci_ixr.rerror.read()) 2558 and "Error in VCI_MEM_CACHE : invalid length for a response from XRAM"); 2559 m_transaction_tab.write_rsp(index, 2560 r_ixr_rsp_cpt.read(), 2561 data, 2562 error); 2563 r_ixr_rsp_cpt = r_ixr_rsp_cpt.read() + 1; 2564 if ( eop ) 2565 { 2566 r_ixr_rsp_to_xram_rsp_rok[r_ixr_rsp_trt_index.read()]=true; 2567 r_ixr_rsp_fsm = IXR_RSP_IDLE; 2568 } 2569 2570 #if DEBUG_MEMC_IXR_RSP 2571 if( m_debug_ixr_rsp_fsm ) 2572 { 2573 std::cout << " <MEMC.IXR_RSP_TRT_READ> Writing a word in TRT : " 2574 << " index = " << std::dec << index 2575 << " / word = " << r_ixr_rsp_cpt.read() 2576 << " / data = " << std::hex << data << std::endl; 2577 } 2578 #endif 2579 } 2580 break; 2031 2581 } 2032 2582 } // end swich r_ixr_rsp_fsm 2033 2034 2583 2035 2584 //////////////////////////////////////////////////////////////////////////// … … 2037 2586 //////////////////////////////////////////////////////////////////////////// 2038 2587 // The XRAM_RSP FSM handles the incoming cache lines from the XRAM. 2039 // The cache line has been written in the TRT buffer by the IXR_FSM. 2588 // The cache line has been written in the TRT by the IXR_CMD_FSM. 2589 // As the IXR_RSP FSM and the XRAM_RSP FSM are running in parallel, 2590 // there is as many flip-flops r_ixr_rsp_to_xram_rsp_rok[i] 2591 // as the number of entries in the TRT, that are handled with 2592 // a round-robin priority... 2040 2593 // 2041 2594 // When a response is available, the corresponding TRT entry 2042 // is copied in a local buffer to be written in the cache. 2043 // Then, the FSM releases the lock protecting the TRT, and takes the lock 2044 // protecting the cache directory. 2595 // must be copied in a local buffer to be written in the cache. 2596 // The FSM takes the lock protecting the TRT, and the lock protecting the DIR. 2045 2597 // It selects a cache slot and writes the line in the cache. 2046 2598 // If it was a read MISS, the XRAM_RSP FSM send a request to the TGT_RSP … … 2053 2605 /////////////////////////////////////////////////////////////////////////////// 2054 2606 2055 switch ( r_xram_rsp_fsm.read() ) { 2056 2057 /////////////////// 2058 case XRAM_RSP_IDLE: // test if there is a response with a round robin priority 2059 { 2060 size_t ptr = r_xram_rsp_trt_index.read(); 2061 size_t lines = m_transaction_tab_lines; 2062 for(size_t i=0; i<lines; i++){ 2063 size_t index=(i+ptr+1)%lines; 2064 if(r_ixr_rsp_to_xram_rsp_rok[index]){ 2065 r_xram_rsp_trt_index=index; 2066 r_ixr_rsp_to_xram_rsp_rok[index]=false; 2067 r_xram_rsp_fsm = XRAM_RSP_DIR_LOCK; 2068 break; 2069 #ifdef TDEBUG 2070 if(m_cpt_cycles > DEBUG_START_CYCLE){ 2071 std::cout << "XRAM_RSP FSM in XRAM_RSP_IDLE state" << std::endl; 2072 } 2073 #endif 2074 } 2075 } 2076 break; 2607 switch ( r_xram_rsp_fsm.read() ) 2608 { 2609 /////////////////// 2610 case XRAM_RSP_IDLE: // scan the XRAM responses to get the TRT index (round robin) 2611 { 2612 size_t ptr = r_xram_rsp_trt_index.read(); 2613 size_t lines = m_transaction_tab_lines; 2614 for( size_t i=0 ; i<lines ; i++) 2615 { 2616 size_t index=(i+ptr+1)%lines; 2617 if ( r_ixr_rsp_to_xram_rsp_rok[index] ) 2618 { 2619 r_xram_rsp_trt_index = index; 2620 r_ixr_rsp_to_xram_rsp_rok[index] = false; 2621 r_xram_rsp_fsm = XRAM_RSP_DIR_LOCK; 2622 2623 #if DEBUG_MEMC_XRAM_RSP 2624 if( m_debug_xram_rsp_fsm ) 2625 { 2626 std::cout << " <MEMC.XRAM_RSP_IDLE> Available cache line in TRT:" 2627 << " index = " << std::dec << index << std::endl; 2628 } 2629 #endif 2630 break; 2631 } 2632 } 2633 break; 2077 2634 } 2078 2635 /////////////////////// 2079 case XRAM_RSP_DIR_LOCK: // Take the lock on the directory 2080 { 2081 if( r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP ) { 2082 r_xram_rsp_fsm = XRAM_RSP_TRT_COPY; 2083 #ifdef TDEBUG 2084 if(m_cpt_cycles > DEBUG_START_CYCLE){ 2085 std::cout << "XRAM_RSP FSM in XRAM_RSP_DIR_LOCK state" << std::endl; 2636 case XRAM_RSP_DIR_LOCK: // Takes the lock on the directory 2637 { 2638 if( r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP ) 2639 { 2640 r_xram_rsp_fsm = XRAM_RSP_TRT_COPY; 2641 2642 #if DEBUG_MEMC_XRAM_RSP 2643 if( m_debug_xram_rsp_fsm ) 2644 { 2645 std::cout << " <MEMC.XRAM_RSP_DIR_LOCK> Get access to directory" << std::endl; 2646 } 2647 #endif 2648 } 2649 break; 2650 } 2651 /////////////////////// 2652 case XRAM_RSP_TRT_COPY: // Takes the lock on TRT 2653 // Copy the TRT entry in a local buffer 2654 // and select a victim cache line 2655 { 2656 if ( (r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP) ) 2657 { 2658 // copy the TRT entry in the r_xram_rsp_trt_buf local buffer 2659 size_t index = r_xram_rsp_trt_index.read(); 2660 TransactionTabEntry trt_entry(m_transaction_tab.read(index)); 2661 r_xram_rsp_trt_buf.copy(trt_entry); // TRT entry local buffer 2662 2663 // selects & extracts a victim line from cache 2664 size_t way = 0; 2665 size_t set = m_y[(vci_addr_t)(trt_entry.nline * m_words * 4)]; 2666 DirectoryEntry victim(m_cache_directory.select(set, way)); 2667 2668 bool inval = (victim.count && victim.valid) ; 2669 2670 // copy the victim line in a local buffer 2671 for (size_t i=0 ; i<m_words ; i++) 2672 r_xram_rsp_victim_data[i] = m_cache_data[way][set][i]; 2673 r_xram_rsp_victim_copy = victim.owner.srcid; 2674 #if L1_MULTI_CACHE 2675 r_xram_rsp_victim_copy_cache= victim.owner.cache_id; 2676 #endif 2677 r_xram_rsp_victim_copy_inst = victim.owner.inst; 2678 r_xram_rsp_victim_count = victim.count; 2679 r_xram_rsp_victim_ptr = victim.ptr; 2680 r_xram_rsp_victim_way = way; 2681 r_xram_rsp_victim_set = set; 2682 r_xram_rsp_victim_nline = victim.tag*m_sets + set; 2683 r_xram_rsp_victim_is_cnt = victim.is_cnt; 2684 r_xram_rsp_victim_inval = inval ; 2685 r_xram_rsp_victim_dirty = victim.dirty; 2686 2687 if(!trt_entry.rerror) r_xram_rsp_fsm = XRAM_RSP_INVAL_LOCK; 2688 else r_xram_rsp_fsm = XRAM_RSP_ERROR_ERASE; 2689 2690 #if DEBUG_MEMC_XRAM_RSP 2691 if( m_debug_xram_rsp_fsm ) 2692 { 2693 std::cout << " <MEMC.XRAM_RSP_TRT_COPY> Select a slot: " 2694 << " way = " << std::dec << way 2695 << " / set = " << set 2696 << " / inval_required = " << inval << std::endl; 2697 } 2698 #endif 2699 } 2700 break; 2701 } 2702 ///////////////////////// 2703 case XRAM_RSP_INVAL_LOCK: // check a possible pending inval 2704 { 2705 if ( r_alloc_upt_fsm == ALLOC_UPT_XRAM_RSP ) 2706 { 2707 size_t index; 2708 if (m_update_tab.search_inval(r_xram_rsp_trt_buf.nline, index)) 2709 { 2710 r_xram_rsp_fsm = XRAM_RSP_INVAL_WAIT; 2711 2712 #if DEBUG_MEMC_XRAM_RSP 2713 if( m_debug_xram_rsp_fsm ) 2714 { 2715 std::cout << " <MEMC.XRAM_RSP_INVAL_LOCK> Get acces to UPT," 2716 << " but an invalidation is already registered at this address" << std::endl; 2717 m_update_tab.print(); 2718 } 2719 #endif 2720 2721 } 2722 else if (m_update_tab.is_full() && r_xram_rsp_victim_inval.read()) 2723 { 2724 r_xram_rsp_fsm = XRAM_RSP_INVAL_WAIT; 2725 2726 #if DEBUG_MEMC_XRAM_RSP 2727 if( m_debug_xram_rsp_fsm ) 2728 { 2729 std::cout << " <MEMC.XRAM_RSP_INVAL_LOCK> Get acces to UPT," 2730 << " but the table is full" << std::endl; 2731 m_update_tab.print(); 2732 } 2733 #endif 2734 } 2735 else 2736 { 2737 r_xram_rsp_fsm = XRAM_RSP_DIR_UPDT; 2738 2739 #if DEBUG_MEMC_XRAM_RSP 2740 if( m_debug_xram_rsp_fsm ) 2741 { 2742 std::cout << " <MEMC.XRAM_RSP_INVAL_LOCK> Get acces to UPT" << std::endl; 2743 } 2744 #endif 2745 } 2746 } 2747 break; 2748 } 2749 ///////////////////////// 2750 case XRAM_RSP_INVAL_WAIT: // returns to DIR_LOCK to retry 2751 { 2752 r_xram_rsp_fsm = XRAM_RSP_DIR_LOCK; 2753 break; 2754 } 2755 /////////////////////// 2756 case XRAM_RSP_DIR_UPDT: // updates the cache (both data & directory) 2757 // and possibly set an inval request in UPT 2758 { 2759 // signals generation 2760 bool inst_read = (r_xram_rsp_trt_buf.trdid & 0x2) && r_xram_rsp_trt_buf.proc_read; 2761 bool cached_read = (r_xram_rsp_trt_buf.trdid & 0x1) && r_xram_rsp_trt_buf.proc_read; 2762 // update data 2763 size_t set = r_xram_rsp_victim_set.read(); 2764 size_t way = r_xram_rsp_victim_way.read(); 2765 for(size_t i=0; i<m_words ; i++) m_cache_data[way][set][i] = r_xram_rsp_trt_buf.wdata[i]; 2766 // compute dirty 2767 bool dirty = false; 2768 for(size_t i=0; i<m_words;i++) dirty = dirty || (r_xram_rsp_trt_buf.wdata_be[i] != 0); 2769 // update directory 2770 DirectoryEntry entry; 2771 entry.valid = true; 2772 entry.is_cnt = false; 2773 entry.lock = false; 2774 entry.dirty = dirty; 2775 entry.tag = r_xram_rsp_trt_buf.nline / m_sets; 2776 entry.ptr = 0; 2777 if(cached_read) 2778 { 2779 entry.owner.srcid = r_xram_rsp_trt_buf.srcid; 2780 #if L1_MULTI_CACHE 2781 entry.owner.cache_id= r_xram_rsp_trt_buf.pktid; 2782 #endif 2783 entry.owner.inst = inst_read; 2784 entry.count = 1; 2785 } 2786 else 2787 { 2788 entry.owner.srcid = 0; 2789 #if L1_MULTI_CACHE 2790 entry.owner.cache_id = 0; 2791 #endif 2792 entry.owner.inst = 0; 2793 entry.count = 0; 2794 } 2795 m_cache_directory.write(set, way, entry); 2796 2797 if (r_xram_rsp_victim_inval.read()) 2798 { 2799 bool brdcast = r_xram_rsp_victim_is_cnt.read(); 2800 size_t index = 0; 2801 size_t count_copies = r_xram_rsp_victim_count.read(); 2802 2803 bool wok = m_update_tab.set( false, // it's an inval transaction 2804 brdcast, // set brdcast bit 2805 false, // it does not need a response 2806 0, // srcid 2807 0, // trdid 2808 0, // pktid 2809 r_xram_rsp_victim_nline.read(), 2810 count_copies, 2811 index); 2812 r_xram_rsp_upt_index = index; 2813 2814 if (!wok) 2815 { 2816 std::cout << "VCI_MEM_CACHE ERROR " << name() << " XRAM_RSP_HEAP_LAST state" << std::endl; 2817 std::cout << "an update_tab entry was free but write is unsuccessful" << std::endl; 2818 exit(0); 2819 } 2820 } 2821 2822 #if DEBUG_MEMC_XRAM_RSP 2823 if( m_debug_xram_rsp_fsm ) 2824 { 2825 std::cout << " <MEMC.XRAM_RSP_DIR_UPDT> Directory update: " 2826 << " way = " << std::dec << way 2827 << " / set = " << set 2828 << " / count = " << entry.count 2829 << " / is_cnt = " << entry.is_cnt << std::endl; 2830 if (r_xram_rsp_victim_inval.read()) 2831 std::cout << " Invalidation request for victim line " 2832 << std::hex << r_xram_rsp_victim_nline.read() 2833 << " / broadcast = " << r_xram_rsp_victim_is_cnt.read() << std::endl; 2834 } 2835 #endif 2836 2837 // If the victim is not dirty, we don't need another XRAM put transaction, 2838 // and we canwe erase the TRT entry 2839 if (!r_xram_rsp_victim_dirty.read()) m_transaction_tab.erase(r_xram_rsp_trt_index.read()); 2840 2841 // Next state 2842 if ( r_xram_rsp_victim_dirty.read()) r_xram_rsp_fsm = XRAM_RSP_TRT_DIRTY; 2843 else if ( r_xram_rsp_trt_buf.proc_read ) r_xram_rsp_fsm = XRAM_RSP_DIR_RSP; 2844 else if ( r_xram_rsp_victim_inval.read()) r_xram_rsp_fsm = XRAM_RSP_INVAL; 2845 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 2846 break; 2847 } 2848 //////////////////////// 2849 case XRAM_RSP_TRT_DIRTY: // set the TRT entry (write to XRAM) if the victim is dirty 2850 { 2851 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP ) 2852 { 2853 m_transaction_tab.set( r_xram_rsp_trt_index.read(), 2854 false, // write to XRAM 2855 r_xram_rsp_victim_nline.read(), // line index 2856 0, 2857 0, 2858 0, 2859 false, 2860 0, 2861 0, 2862 std::vector<be_t>(m_words,0), 2863 std::vector<data_t>(m_words,0) ); 2864 2865 #if DEBUG_MEMC_XRAM_RSP 2866 if( m_debug_xram_rsp_fsm ) 2867 { 2868 std::cout << " <MEMC.XRAM_RSP_TRT_DIRTY> Set TRT entry for the put transaction:" 2869 << " dirty victim line = " << r_xram_rsp_victim_nline.read() << std::endl; 2870 } 2871 #endif 2872 if ( r_xram_rsp_trt_buf.proc_read ) r_xram_rsp_fsm = XRAM_RSP_DIR_RSP; 2873 else if ( r_xram_rsp_victim_inval.read()) r_xram_rsp_fsm = XRAM_RSP_INVAL; 2874 else r_xram_rsp_fsm = XRAM_RSP_WRITE_DIRTY; 2875 } 2876 break; 2877 } 2878 ////////////////////// 2879 case XRAM_RSP_DIR_RSP: // Request a response to TGT_RSP FSM 2880 { 2881 if ( !r_xram_rsp_to_tgt_rsp_req.read() ) 2882 { 2883 r_xram_rsp_to_tgt_rsp_srcid = r_xram_rsp_trt_buf.srcid; 2884 r_xram_rsp_to_tgt_rsp_trdid = r_xram_rsp_trt_buf.trdid; 2885 r_xram_rsp_to_tgt_rsp_pktid = r_xram_rsp_trt_buf.pktid; 2886 for (size_t i=0; i < m_words; i++) r_xram_rsp_to_tgt_rsp_data[i] = r_xram_rsp_trt_buf.wdata[i]; 2887 r_xram_rsp_to_tgt_rsp_word = r_xram_rsp_trt_buf.word_index; 2888 r_xram_rsp_to_tgt_rsp_length = r_xram_rsp_trt_buf.read_length; 2889 r_xram_rsp_to_tgt_rsp_rerror = false; 2890 r_xram_rsp_to_tgt_rsp_req = true; 2891 2892 if ( r_xram_rsp_victim_inval ) r_xram_rsp_fsm = XRAM_RSP_INVAL; 2893 else if ( r_xram_rsp_victim_dirty ) r_xram_rsp_fsm = XRAM_RSP_WRITE_DIRTY; 2894 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 2895 2896 2897 #if DEBUG_MEMC_XRAM_RSP 2898 if( m_debug_xram_rsp_fsm ) 2899 { 2900 std::cout << " <MEMC.XRAM_RSP_DIR_RSP> Request the TGT_RSP FSM to return data:" 2901 << " rsrcid = " << std::hex << r_xram_rsp_trt_buf.srcid 2902 << " / address = " << r_xram_rsp_trt_buf.nline*m_words*4 2903 << " / nwords = " << std::dec << r_xram_rsp_trt_buf.read_length << std::endl; 2904 } 2905 #endif 2906 } 2907 break; 2908 } 2909 //////////////////// 2910 case XRAM_RSP_INVAL: // send invalidate request to INIT_CMD FSM 2911 { 2912 if( !r_xram_rsp_to_init_cmd_multi_req.read() && 2913 !r_xram_rsp_to_init_cmd_brdcast_req.read() ) 2914 { 2915 bool multi_req = !r_xram_rsp_victim_is_cnt.read(); 2916 bool last_multi_req = multi_req && (r_xram_rsp_victim_count.read() == 1); 2917 bool not_last_multi_req = multi_req && (r_xram_rsp_victim_count.read() != 1); 2918 2919 r_xram_rsp_to_init_cmd_multi_req = last_multi_req; 2920 r_xram_rsp_to_init_cmd_brdcast_req = r_xram_rsp_victim_is_cnt.read(); 2921 r_xram_rsp_to_init_cmd_nline = r_xram_rsp_victim_nline.read(); 2922 r_xram_rsp_to_init_cmd_trdid = r_xram_rsp_upt_index; 2923 xram_rsp_to_init_cmd_fifo_srcid = r_xram_rsp_victim_copy.read(); 2924 xram_rsp_to_init_cmd_fifo_inst = r_xram_rsp_victim_copy_inst.read(); 2925 #if L1_MULTI_CACHE 2926 xram_rsp_to_init_cmd_fifo_cache_id = r_xram_rsp_victim_copy_cache.read(); 2927 #endif 2928 xram_rsp_to_init_cmd_fifo_put = multi_req; 2929 r_xram_rsp_next_ptr = r_xram_rsp_victim_ptr.read(); 2930 2931 if ( r_xram_rsp_victim_dirty ) r_xram_rsp_fsm = XRAM_RSP_WRITE_DIRTY; 2932 else if (not_last_multi_req) r_xram_rsp_fsm = XRAM_RSP_HEAP_ERASE; 2933 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 2934 2935 #if DEBUG_MEMC_XRAM_RSP 2936 if( m_debug_xram_rsp_fsm ) 2937 { 2938 std::cout << " <MEMC.XRAM_RSP_INVAL> Send an inval request to INIT_CMD FSM:" 2939 << " victim line = " << r_xram_rsp_victim_nline.read() << std::endl; 2086 2940 } 2087 2941 #endif … … 2089 2943 break; 2090 2944 } 2091 /////////////////////// 2092 case XRAM_RSP_TRT_COPY: // Copy the TRT entry in the local buffer and eviction of a cache line 2093 { 2094 if ( (r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP) ) { 2095 size_t index = r_xram_rsp_trt_index.read(); 2096 TransactionTabEntry trt_entry(m_transaction_tab.read(index)); 2097 2098 r_xram_rsp_trt_buf.copy(trt_entry); // TRT entry local buffer 2099 2100 // selects & extracts a victim line from cache 2101 size_t way = 0; 2102 size_t set = m_y[(vci_addr_t)(trt_entry.nline * m_words * 4)]; 2103 DirectoryEntry victim(m_cache_directory.select(set, way)); 2104 2105 for (size_t i=0 ; i<m_words ; i++) r_xram_rsp_victim_data[i] = m_cache_data[way][set][i]; 2106 2107 bool inval = (victim.count && victim.valid) ; 2108 2109 r_xram_rsp_victim_copy = victim.owner.srcid; 2945 ////////////////////////// 2946 case XRAM_RSP_WRITE_DIRTY: // send a write request to IXR_CMD FSM 2947 { 2948 if ( !r_xram_rsp_to_ixr_cmd_req.read() ) 2949 { 2950 r_xram_rsp_to_ixr_cmd_req = true; 2951 r_xram_rsp_to_ixr_cmd_nline = r_xram_rsp_victim_nline.read(); 2952 r_xram_rsp_to_ixr_cmd_trdid = r_xram_rsp_trt_index.read(); 2953 for(size_t i=0; i<m_words ; i++) r_xram_rsp_to_ixr_cmd_data[i] = r_xram_rsp_victim_data[i]; 2954 m_cpt_write_dirty++; 2955 2956 bool multi_req = !r_xram_rsp_victim_is_cnt.read() && r_xram_rsp_victim_inval.read(); 2957 bool not_last_multi_req = multi_req && (r_xram_rsp_victim_count.read() != 1); 2958 if ( not_last_multi_req ) r_xram_rsp_fsm = XRAM_RSP_HEAP_ERASE; 2959 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 2960 2961 #if DEBUG_MEMC_XRAM_RSP 2962 if( m_debug_xram_rsp_fsm ) 2963 { 2964 std::cout << " <MEMC.XRAM_RSP_WRITE_DIRTY> Send the put request to IXR_CMD FSM:" 2965 << " victim line = " << r_xram_rsp_victim_nline.read() << std::endl; 2966 } 2967 #endif 2968 } 2969 break; 2970 } 2971 ///////////////////////// 2972 case XRAM_RSP_HEAP_ERASE: // erase the list of copies and sent invalidations 2973 { 2974 if( r_alloc_heap_fsm.read() == ALLOC_HEAP_XRAM_RSP ) 2975 { 2976 HeapEntry entry = m_heap.read(r_xram_rsp_next_ptr.read()); 2977 2978 xram_rsp_to_init_cmd_fifo_srcid = entry.owner.srcid; 2110 2979 #if L1_MULTI_CACHE 2111 r_xram_rsp_victim_copy_cache= victim.owner.cache_id; 2112 #endif 2113 r_xram_rsp_victim_copy_inst = victim.owner.inst; 2114 r_xram_rsp_victim_count = victim.count; 2115 r_xram_rsp_victim_ptr = victim.ptr; 2116 r_xram_rsp_victim_way = way; 2117 r_xram_rsp_victim_set = set; 2118 r_xram_rsp_victim_nline = victim.tag*m_sets + set; 2119 r_xram_rsp_victim_is_cnt = victim.is_cnt; 2120 r_xram_rsp_victim_inval = inval ; 2121 r_xram_rsp_victim_dirty = victim.dirty; 2122 2123 if(!trt_entry.rerror) 2124 r_xram_rsp_fsm = XRAM_RSP_INVAL_LOCK; 2125 else 2126 r_xram_rsp_fsm = XRAM_RSP_ERROR_ERASE; 2127 #ifdef TDEBUG 2128 if(m_cpt_cycles > DEBUG_START_CYCLE){ 2129 std::cout << "XRAM_RSP FSM in XRAM_RSP_TRT_COPY state" << std::endl; 2130 std::cout << "Victim way : " << std::hex << way << " set " << set << std::dec << std::endl; 2131 victim.print(); 2132 } 2133 #endif 2134 } 2135 break; 2136 } 2137 /////////////////////// 2138 case XRAM_RSP_INVAL_LOCK: 2139 { 2140 if ( r_alloc_upt_fsm == ALLOC_UPT_XRAM_RSP ) { 2141 #ifdef IDEBUG 2142 if(m_cpt_cycles > DEBUG_START_CYCLE){ 2143 std::cout << "XRAM_RSP FSM in XRAM_RSP_INVAL_LOCK state" << std::endl; 2144 } 2145 #endif 2146 size_t index; 2147 if(m_update_tab.search_inval(r_xram_rsp_trt_buf.nline, index)){ 2148 r_xram_rsp_fsm = XRAM_RSP_INVAL_WAIT; 2149 #ifdef IDEBUG 2150 if(m_cpt_cycles > DEBUG_START_CYCLE){ 2151 std::cout << "XRAM_RSP FSM in XRAM_RSP_INVAL_LOCK state to XRAM_RSP_INVAL_WAIT state" << std::endl; 2152 std::cout << "A invalidation is already registered at this address" << std::endl; 2153 m_update_tab.print(); 2154 } 2155 #endif 2156 2157 } 2158 else if(m_update_tab.is_full() && r_xram_rsp_victim_inval.read()){ 2159 r_xram_rsp_fsm = XRAM_RSP_INVAL_WAIT; 2160 #ifdef IDEBUG 2161 if(m_cpt_cycles > DEBUG_START_CYCLE){ 2162 std::cout << "XRAM_RSP FSM in XRAM_RSP_INVAL_LOCK state to XRAM_RSP_INVAL_WAIT state" << std::endl; 2163 std::cout << "The inval tab is full" << std::endl; 2164 m_update_tab.print(); 2165 } 2166 #endif 2167 } 2168 else { 2169 r_xram_rsp_fsm = XRAM_RSP_DIR_UPDT; 2170 #ifdef IDEBUG 2171 if(m_cpt_cycles > DEBUG_START_CYCLE){ 2172 std::cout << "XRAM_RSP FSM in XRAM_RSP_INVAL_LOCK state to XRAM_RSP_DIR_UPDT state" << std::endl; 2173 m_update_tab.print(); 2174 } 2175 #endif 2176 } 2177 } 2178 break; 2179 } 2180 /////////////////////// 2181 case XRAM_RSP_INVAL_WAIT: 2182 { 2183 r_xram_rsp_fsm = XRAM_RSP_DIR_LOCK; 2184 break; 2185 #ifdef IDEBUG 2186 if(m_cpt_cycles > DEBUG_START_CYCLE){ 2187 std::cout << "XRAM_RSP FSM in XRAM_RSP_INVAL_WAIT state" << std::endl; 2188 } 2189 #endif 2190 } 2191 /////////////////////// 2192 case XRAM_RSP_DIR_UPDT: // updates the cache (both data & directory) 2193 { 2194 // signals generation 2195 bool inst_read = (r_xram_rsp_trt_buf.trdid & 0x2) && r_xram_rsp_trt_buf.proc_read; // It is an instruction read 2196 bool cached_read = (r_xram_rsp_trt_buf.trdid & 0x1) && r_xram_rsp_trt_buf.proc_read ; 2197 // update data 2198 size_t set = r_xram_rsp_victim_set.read(); 2199 size_t way = r_xram_rsp_victim_way.read(); 2200 for(size_t i=0; i<m_words ; i++){ 2201 m_cache_data[way][set][i] = r_xram_rsp_trt_buf.wdata[i]; 2202 } 2203 // compute dirty 2204 bool dirty = false; 2205 for(size_t i=0; i<m_words;i++){ 2206 dirty = dirty || (r_xram_rsp_trt_buf.wdata_be[i] != 0); 2207 } 2208 2209 // update directory 2210 DirectoryEntry entry; 2211 entry.valid = true; 2212 entry.is_cnt = false; 2213 entry.lock = false; 2214 entry.dirty = dirty; 2215 entry.tag = r_xram_rsp_trt_buf.nline / m_sets; 2216 entry.ptr = 0; 2217 if(cached_read) { 2218 entry.owner.srcid = r_xram_rsp_trt_buf.srcid; 2980 xram_rsp_to_init_cmd_fifo_cache_id = entry.owner.cache_id; 2981 #endif 2982 xram_rsp_to_init_cmd_fifo_inst = entry.owner.inst; 2983 xram_rsp_to_init_cmd_fifo_put = true; 2984 if( m_xram_rsp_to_init_cmd_inst_fifo.wok() ) 2985 { 2986 r_xram_rsp_next_ptr = entry.next; 2987 if( entry.next == r_xram_rsp_next_ptr.read() ) // last copy 2988 { 2989 r_xram_rsp_to_init_cmd_multi_req = true; 2990 r_xram_rsp_fsm = XRAM_RSP_HEAP_LAST; 2991 } 2992 else 2993 { 2994 r_xram_rsp_fsm = XRAM_RSP_HEAP_ERASE; 2995 } 2996 } 2997 else 2998 { 2999 r_xram_rsp_fsm = XRAM_RSP_HEAP_ERASE; 3000 } 3001 3002 #if DEBUG_MEMC_XRAM_RSP 3003 if( m_debug_xram_rsp_fsm ) 3004 { 3005 std::cout << " <MEMC.XRAM_RSP_HEAP_ERASE> Erase the list of copies:" 3006 << " srcid = " << entry.owner.srcid 3007 << " / inst = " << entry.owner.inst << std::endl; 3008 } 3009 #endif 3010 } 3011 break; 3012 } 3013 ///////////////////////// 3014 case XRAM_RSP_HEAP_LAST: // last member of the list 3015 { 3016 if ( r_alloc_heap_fsm.read() != ALLOC_HEAP_XRAM_RSP ) 3017 { 3018 std::cout << "VCI_MEM_CACHE ERROR " << name() << " XRAM_RSP_HEAP_LAST state" << std::endl; 3019 std::cout << "bad HEAP allocation" << std::endl; 3020 exit(0); 3021 } 3022 size_t free_pointer = m_heap.next_free_ptr(); 3023 3024 HeapEntry last_entry; 3025 last_entry.owner.srcid = 0; 2219 3026 #if L1_MULTI_CACHE 2220 entry.owner.cache_id= r_xram_rsp_trt_buf.pktid; 2221 #endif 2222 entry.owner.inst = inst_read; 2223 entry.count = 1; 2224 } else { 2225 entry.owner.srcid = 0; 2226 #if L1_MULTI_CACHE 2227 entry.owner.cache_id = 0; 2228 #endif 2229 entry.owner.inst = 0; 2230 entry.count = 0; 2231 } 2232 m_cache_directory.write(set, way, entry); 2233 #ifdef DDEBUG 2234 if(m_cpt_cycles > DEBUG_START_CYCLE){ 2235 std::cout << "printing the entry : " << std::endl; 2236 entry.print(); 2237 std::cout << "done" << std::endl; 2238 } 2239 #endif 2240 2241 #ifdef TDEBUG 2242 if(m_cpt_cycles > DEBUG_START_CYCLE){ 2243 std::cout << sc_time_stamp() << " " << name() << " XRAM_RSP_DIR_UPDT transaction table : " << std::endl; 2244 for(size_t i = 0 ; i < m_transaction_tab.size() ; i++) 2245 m_transaction_tab.print(i); 2246 } 2247 #endif 2248 2249 if(r_xram_rsp_victim_inval.read()){ 2250 bool brdcast = r_xram_rsp_victim_is_cnt.read(); 2251 size_t index = 0; 2252 size_t count_copies = r_xram_rsp_victim_count.read(); 2253 2254 //@@ 2255 bool wok = m_update_tab.set(false, // it's an inval transaction 2256 brdcast, // set brdcast bit 2257 false, // it does not need a response 2258 0,//srcid 2259 0,//trdid 2260 0,//pktid 2261 r_xram_rsp_victim_nline.read(), 2262 count_copies, 2263 index); 2264 2265 #ifdef IDEBUG 2266 if(m_cpt_cycles > DEBUG_START_CYCLE){ 2267 std::cout << "xram_rsp : record invalidation, time = " << std::dec << m_cpt_cycles << std::endl; 2268 m_update_tab.print(); 2269 } 2270 #endif 2271 r_xram_rsp_upt_index = index; 2272 if(!wok) { 2273 ASSERT(false,"mem_cache error : xram_rsp_dir_upt, an update_tab entry was free but write unsuccessful"); 2274 } 2275 } 2276 // If the victim is not dirty, we erase the entry in the TRT 2277 if (!r_xram_rsp_victim_dirty.read()){ 2278 m_transaction_tab.erase(r_xram_rsp_trt_index.read()); 2279 2280 } 2281 // Next state 2282 if ( r_xram_rsp_victim_dirty.read()) r_xram_rsp_fsm = XRAM_RSP_TRT_DIRTY; 2283 else if ( r_xram_rsp_trt_buf.proc_read ) r_xram_rsp_fsm = XRAM_RSP_DIR_RSP; 2284 else if ( r_xram_rsp_victim_inval.read()) r_xram_rsp_fsm = XRAM_RSP_INVAL; 2285 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 2286 break; 3027 last_entry.owner.cache_id = 0; 3028 #endif 3029 last_entry.owner.inst = false; 3030 if(m_heap.is_full()) 3031 { 3032 last_entry.next = r_xram_rsp_next_ptr.read(); 3033 m_heap.unset_full(); 3034 } 3035 else 3036 { 3037 last_entry.next = free_pointer; 3038 } 3039 3040 m_heap.write_free_ptr(r_xram_rsp_victim_ptr.read()); 3041 m_heap.write(r_xram_rsp_next_ptr.read(),last_entry); 3042 3043 r_xram_rsp_fsm = XRAM_RSP_IDLE; 3044 3045 #if DEBUG_MEMC_XRAM_RSP 3046 if( m_debug_xram_rsp_fsm ) 3047 { 3048 std::cout << " <MEMC.XRAM_RSP_HEAP_LAST> Heap housekeeping" << std::endl; 3049 } 3050 #endif 3051 break; 3052 } 3053 // /////////////////////// 3054 case XRAM_RSP_ERROR_ERASE: // erase TRT entry in case of error 3055 { 3056 m_transaction_tab.erase(r_xram_rsp_trt_index.read()); 3057 3058 // Next state 3059 if ( r_xram_rsp_trt_buf.proc_read ) r_xram_rsp_fsm = XRAM_RSP_ERROR_RSP; 3060 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 3061 3062 #if DEBUG_MEMC_XRAM_RSP 3063 if( m_debug_xram_rsp_fsm ) 3064 { 3065 std::cout << " <MEMC.XRAM_RSP_ERROR_ERASE> Error reported by XRAM / erase the TRT entry" << std::endl; 3066 } 3067 #endif 3068 break; 2287 3069 } 2288 3070 //////////////////////// 2289 case XRAM_RSP_TRT_DIRTY: // set the TRT entry (write line to XRAM) if the victim is dirty 2290 { 2291 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP ) { 2292 m_transaction_tab.set(r_xram_rsp_trt_index.read(), 2293 false, // write to XRAM 2294 r_xram_rsp_victim_nline.read(), // line index 2295 0, 2296 0, 2297 0, 2298 false, 2299 0, 2300 0, 2301 std::vector<be_t>(m_words,0), 2302 std::vector<data_t>(m_words,0) ); 2303 #ifdef TDEBUG 2304 if(m_cpt_cycles > DEBUG_START_CYCLE){ 2305 std::cout << sc_time_stamp() << " " << name() << " XRAM_RSP_TRT_DIRTY transaction table : " << std::endl; 2306 for(size_t i = 0 ; i < m_transaction_tab.size() ; i++) 2307 m_transaction_tab.print(i); 2308 } 2309 #endif 2310 2311 if ( r_xram_rsp_trt_buf.proc_read ) r_xram_rsp_fsm = XRAM_RSP_DIR_RSP; 2312 else if ( r_xram_rsp_victim_inval.read()) r_xram_rsp_fsm = XRAM_RSP_INVAL; 2313 else r_xram_rsp_fsm = XRAM_RSP_WRITE_DIRTY; 2314 } 2315 break; 2316 } 2317 ////////////////////// 2318 case XRAM_RSP_DIR_RSP: // send a request to TGT_RSP FSM in case of read 2319 { 2320 if ( !r_xram_rsp_to_tgt_rsp_req.read() ) { 2321 r_xram_rsp_to_tgt_rsp_srcid = r_xram_rsp_trt_buf.srcid; 2322 r_xram_rsp_to_tgt_rsp_trdid = r_xram_rsp_trt_buf.trdid; 2323 r_xram_rsp_to_tgt_rsp_pktid = r_xram_rsp_trt_buf.pktid; 2324 for (size_t i=0; i < m_words; i++) { 2325 r_xram_rsp_to_tgt_rsp_data[i] = r_xram_rsp_trt_buf.wdata[i]; 2326 } 2327 r_xram_rsp_to_tgt_rsp_word = r_xram_rsp_trt_buf.word_index; 2328 r_xram_rsp_to_tgt_rsp_length = r_xram_rsp_trt_buf.read_length; 2329 r_xram_rsp_to_tgt_rsp_rerror = false; 2330 r_xram_rsp_to_tgt_rsp_req = true; 2331 2332 if ( r_xram_rsp_victim_inval ) r_xram_rsp_fsm = XRAM_RSP_INVAL; 2333 else if ( r_xram_rsp_victim_dirty ) r_xram_rsp_fsm = XRAM_RSP_WRITE_DIRTY; 2334 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 2335 2336 #ifdef DDEBUG 2337 if(m_cpt_cycles > DEBUG_START_CYCLE){ 2338 std::cout << "XRAM_RSP FSM in XRAM_RSP_DIR_RSP state" << std::endl; 2339 } 2340 #endif 2341 } 2342 break; 2343 } 2344 //////////////////// 2345 case XRAM_RSP_INVAL: // send invalidate request to INIT_CMD FSM 2346 { 2347 if( !r_xram_rsp_to_init_cmd_multi_req.read() && 2348 !r_xram_rsp_to_init_cmd_brdcast_req.read() ) { 2349 2350 bool multi_req = !r_xram_rsp_victim_is_cnt.read(); 2351 bool last_multi_req = multi_req && (r_xram_rsp_victim_count.read() == 1); 2352 bool not_last_multi_req = multi_req && (r_xram_rsp_victim_count.read() != 1); 2353 2354 r_xram_rsp_to_init_cmd_multi_req = last_multi_req; 2355 r_xram_rsp_to_init_cmd_brdcast_req = r_xram_rsp_victim_is_cnt.read(); 2356 r_xram_rsp_to_init_cmd_nline = r_xram_rsp_victim_nline.read(); 2357 r_xram_rsp_to_init_cmd_trdid = r_xram_rsp_upt_index; 2358 xram_rsp_to_init_cmd_fifo_srcid = r_xram_rsp_victim_copy.read(); 2359 xram_rsp_to_init_cmd_fifo_inst = r_xram_rsp_victim_copy_inst.read(); 2360 #if L1_MULTI_CACHE 2361 xram_rsp_to_init_cmd_fifo_cache_id = r_xram_rsp_victim_copy_cache.read(); 2362 #endif 2363 xram_rsp_to_init_cmd_fifo_put = multi_req; 2364 2365 r_xram_rsp_next_ptr = r_xram_rsp_victim_ptr.read(); 2366 2367 if ( r_xram_rsp_victim_dirty ) r_xram_rsp_fsm = XRAM_RSP_WRITE_DIRTY; 2368 else if (not_last_multi_req) r_xram_rsp_fsm = XRAM_RSP_HEAP_ERASE; 2369 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 2370 #ifdef IDEBUG 2371 if(m_cpt_cycles > DEBUG_START_CYCLE){ 2372 std::cout << "XRAM_RSP FSM in XRAM_RSP_INVAL state" << std::endl; 2373 } 2374 #endif 2375 } 2376 break; 2377 } 2378 ////////////////////////// 2379 case XRAM_RSP_WRITE_DIRTY: // send a write request to IXR_CMD FSM 2380 { 2381 if ( !r_xram_rsp_to_ixr_cmd_req.read() ) { 2382 r_xram_rsp_to_ixr_cmd_req = true; 2383 r_xram_rsp_to_ixr_cmd_nline = r_xram_rsp_victim_nline.read(); 2384 r_xram_rsp_to_ixr_cmd_trdid = r_xram_rsp_trt_index.read(); 2385 for(size_t i=0; i<m_words ; i++) { 2386 r_xram_rsp_to_ixr_cmd_data[i] = r_xram_rsp_victim_data[i]; 2387 } 2388 m_cpt_write_dirty++; 2389 bool multi_req = !r_xram_rsp_victim_is_cnt.read() && r_xram_rsp_victim_inval.read(); 2390 bool not_last_multi_req = multi_req && (r_xram_rsp_victim_count.read() != 1); 2391 if ( not_last_multi_req ) r_xram_rsp_fsm = XRAM_RSP_HEAP_ERASE; 2392 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 2393 #ifdef TDEBUG 2394 if(m_cpt_cycles > DEBUG_START_CYCLE){ 2395 std::cout << "XRAM_RSP FSM in XRAM_RSP_WRITE_DIRTY state" << std::endl; 2396 } 2397 #endif 2398 } 2399 break; 2400 } 2401 ////////////////////////// 2402 case XRAM_RSP_HEAP_ERASE: // erase the list of copies and sent invalidations 2403 { 2404 if( r_alloc_heap_fsm.read() == ALLOC_HEAP_XRAM_RSP ) { 2405 HeapEntry entry = m_heap_directory.read(r_xram_rsp_next_ptr.read()); 2406 xram_rsp_to_init_cmd_fifo_srcid = entry.owner.srcid; 2407 #if L1_MULTI_CACHE 2408 xram_rsp_to_init_cmd_fifo_cache_id = entry.owner.cache_id; 2409 #endif 2410 xram_rsp_to_init_cmd_fifo_inst = entry.owner.inst; 2411 xram_rsp_to_init_cmd_fifo_put = true; 2412 if( m_xram_rsp_to_init_cmd_inst_fifo.wok() ){ 2413 r_xram_rsp_next_ptr = entry.next; 2414 if( entry.next == r_xram_rsp_next_ptr.read() ){ // last copy 2415 r_xram_rsp_to_init_cmd_multi_req = true; 2416 r_xram_rsp_fsm = XRAM_RSP_HEAP_LAST; 2417 } else { 2418 r_xram_rsp_fsm = XRAM_RSP_HEAP_ERASE; 2419 } 2420 } else { 2421 r_xram_rsp_fsm = XRAM_RSP_HEAP_ERASE; 2422 } 2423 } 2424 break; 2425 } 2426 ////////////////////////// 2427 case XRAM_RSP_HEAP_LAST: // last member of the list 2428 { 2429 ASSERT((r_alloc_heap_fsm.read() == ALLOC_HEAP_XRAM_RSP) 2430 ,"MemCache ERROR : bad HEAP allocation"); 2431 size_t free_pointer = m_heap_directory.next_free_ptr(); 2432 2433 HeapEntry last_entry; 2434 last_entry.owner.srcid = 0; 2435 #if L1_MULTI_CACHE 2436 last_entry.owner.cache_id = 0; 2437 #endif 2438 last_entry.owner.inst = false; 2439 if(m_heap_directory.is_full()){ 2440 last_entry.next = r_xram_rsp_next_ptr.read(); 2441 m_heap_directory.unset_full(); 2442 } else { 2443 last_entry.next = free_pointer; 2444 } 2445 2446 m_heap_directory.write_free_ptr(r_xram_rsp_victim_ptr.read()); 2447 m_heap_directory.write(r_xram_rsp_next_ptr.read(),last_entry); 2448 2449 r_xram_rsp_fsm = XRAM_RSP_IDLE; 2450 2451 break; 2452 } 2453 /////////////////////// 2454 case XRAM_RSP_ERROR_ERASE: // erase xram transaction 2455 { 2456 2457 #ifdef TDEBUG 2458 if(m_cpt_cycles > DEBUG_START_CYCLE){ 2459 std::cout << sc_time_stamp() << " " << name() << " XRAM_RSP_ERROR_ERASE transaction table : " << std::endl; 2460 for(size_t i = 0 ; i < m_transaction_tab.size() ; i++) 2461 m_transaction_tab.print(i); 2462 } 2463 #endif 2464 2465 m_transaction_tab.erase(r_xram_rsp_trt_index.read()); 2466 2467 // Next state 2468 if ( r_xram_rsp_trt_buf.proc_read ) r_xram_rsp_fsm = XRAM_RSP_ERROR_RSP; 2469 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 2470 break; 2471 } 2472 ////////////////////// 2473 case XRAM_RSP_ERROR_RSP: // send a request to TGT_RSP FSM in case of read 2474 { 2475 if ( !r_xram_rsp_to_tgt_rsp_req.read() ) { 2476 r_xram_rsp_to_tgt_rsp_srcid = r_xram_rsp_trt_buf.srcid; 2477 r_xram_rsp_to_tgt_rsp_trdid = r_xram_rsp_trt_buf.trdid; 2478 r_xram_rsp_to_tgt_rsp_pktid = r_xram_rsp_trt_buf.pktid; 2479 for (size_t i=0; i < m_words; i++) { 2480 r_xram_rsp_to_tgt_rsp_data[i] = r_xram_rsp_trt_buf.wdata[i]; 2481 } 2482 r_xram_rsp_to_tgt_rsp_word = r_xram_rsp_trt_buf.word_index; 2483 r_xram_rsp_to_tgt_rsp_length = r_xram_rsp_trt_buf.read_length; 2484 r_xram_rsp_to_tgt_rsp_rerror = true; 2485 r_xram_rsp_to_tgt_rsp_req = true; 2486 2487 r_xram_rsp_fsm = XRAM_RSP_IDLE; 2488 2489 #ifdef DDEBUG 2490 if(m_cpt_cycles > DEBUG_START_CYCLE){ 2491 std::cout << "XRAM_RSP FSM in XRAM_RSP_DIR_RSP state" << std::endl; 2492 } 2493 #endif 2494 } 2495 break; 3071 case XRAM_RSP_ERROR_RSP: // Request an error response to TGT_RSP FSM 3072 { 3073 if ( !r_xram_rsp_to_tgt_rsp_req.read() ) 3074 { 3075 r_xram_rsp_to_tgt_rsp_srcid = r_xram_rsp_trt_buf.srcid; 3076 r_xram_rsp_to_tgt_rsp_trdid = r_xram_rsp_trt_buf.trdid; 3077 r_xram_rsp_to_tgt_rsp_pktid = r_xram_rsp_trt_buf.pktid; 3078 for (size_t i=0; i < m_words; i++) r_xram_rsp_to_tgt_rsp_data[i] = r_xram_rsp_trt_buf.wdata[i]; 3079 r_xram_rsp_to_tgt_rsp_word = r_xram_rsp_trt_buf.word_index; 3080 r_xram_rsp_to_tgt_rsp_length = r_xram_rsp_trt_buf.read_length; 3081 r_xram_rsp_to_tgt_rsp_rerror = true; 3082 r_xram_rsp_to_tgt_rsp_req = true; 3083 3084 r_xram_rsp_fsm = XRAM_RSP_IDLE; 3085 3086 #if DEBUG_MEMC_XRAM_RSP 3087 if( m_debug_xram_rsp_fsm ) 3088 { 3089 std::cout << " <MEMC.XRAM_RSP_ERROR_RSP> Request a response error to TGT_RSP FSM:" 3090 << " srcid = " << r_xram_rsp_trt_buf.srcid << std::endl; 3091 } 3092 #endif 3093 } 3094 break; 2496 3095 } 2497 3096 } // end swich r_xram_rsp_fsm … … 2501 3100 //////////////////////////////////////////////////////////////////////////////////// 2502 3101 // The CLEANUP FSM handles the cleanup request from L1 caches. 2503 // It accesses the cache directory to update the list of copies. 2504 // 3102 // It accesses the cache directory and the heap to update the list of copies. 2505 3103 //////////////////////////////////////////////////////////////////////////////////// 2506 switch ( r_cleanup_fsm.read() ) { 2507 2508 /////////////////// 2509 case CLEANUP_IDLE: 2510 { 2511 if ( p_vci_tgt_cleanup.cmdval.read() ) { 2512 ASSERT((p_vci_tgt_cleanup.srcid.read() < m_initiators) 2513 ,"VCI_MEM_CACHE error in a cleanup request : received SRCID is larger than the number of initiators"); 2514 2515 bool reached = false; 2516 for ( size_t index = 0 ; index < ncseg && !reached ; index++ ){ 2517 if ( m_cseg[index]->contains((addr_t)(p_vci_tgt_cleanup.address.read())) ){ 2518 reached = true; 2519 } 2520 } 2521 // only write request to a mapped address that are not broadcast are handled 2522 if ( (p_vci_tgt_cleanup.cmd.read() == vci_param::CMD_WRITE) && 2523 ((p_vci_tgt_cleanup.address.read() & 0x3) == 0) && 2524 reached) 2525 { 2526 PRINTF(" * <MEM_CACHE.CLEANUP> Request from %d.%d at address %llx\n",(uint32_t)p_vci_tgt_cleanup.srcid.read(),(uint32_t)p_vci_tgt_cleanup.pktid.read(),(uint64_t)p_vci_tgt_cleanup.address.read()); 2527 2528 m_cpt_cleanup++; 2529 2530 r_cleanup_nline = (addr_t)(m_nline[(vci_addr_t)(p_vci_tgt_cleanup.address.read())]) ; 2531 r_cleanup_srcid = p_vci_tgt_cleanup.srcid.read(); 2532 r_cleanup_trdid = p_vci_tgt_cleanup.trdid.read(); 2533 r_cleanup_pktid = p_vci_tgt_cleanup.pktid.read(); 2534 2535 r_cleanup_fsm = CLEANUP_DIR_LOCK; 2536 } 2537 } 2538 break; 3104 3105 3106 switch ( r_cleanup_fsm.read() ) 3107 { 3108 ////////////////// 3109 case CLEANUP_IDLE: 3110 { 3111 if ( p_vci_tgt_cleanup.cmdval.read() ) 3112 { 3113 if (p_vci_tgt_cleanup.srcid.read() >= m_initiators ) 3114 { 3115 std::cout << "VCI_MEM_CACHE ERROR " << name() 3116 << " CLEANUP_IDLE state" << std::endl; 3117 std::cout << "illegal srcid for cleanup request" << std::endl; 3118 exit(0); 3119 } 3120 3121 bool reached = false; 3122 for ( size_t index = 0 ; index < ncseg && !reached ; index++ ) 3123 { 3124 if ( m_cseg[index]->contains((addr_t)(p_vci_tgt_cleanup.address.read())) ) 3125 reached = true; 3126 } 3127 // only write request to a mapped address that are not broadcast are handled 3128 if ( (p_vci_tgt_cleanup.cmd.read() == vci_param::CMD_WRITE) && 3129 ((p_vci_tgt_cleanup.address.read() & 0x3) == 0) && reached) 3130 { 3131 addr_t line = (addr_t)(m_nline[(vci_addr_t)(p_vci_tgt_cleanup.address.read())]); 3132 3133 r_cleanup_nline = line; 3134 r_cleanup_srcid = p_vci_tgt_cleanup.srcid.read(); 3135 r_cleanup_trdid = p_vci_tgt_cleanup.trdid.read(); 3136 r_cleanup_pktid = p_vci_tgt_cleanup.pktid.read(); 3137 r_cleanup_fsm = CLEANUP_DIR_LOCK; 3138 3139 #if DEBUG_MEMC_CLEANUP 3140 if( m_debug_cleanup_fsm ) 3141 { 3142 std::cout << " <MEMC.CLEANUP_IDLE> Cleanup request:" << std::hex 3143 << " line = " << line * m_words * 4 3144 << " / owner_id = " << p_vci_tgt_cleanup.srcid.read() 3145 << " / owner_ins = " << (p_vci_tgt_cleanup.trdid.read()&0x1) 3146 << std::endl; 3147 } 3148 #endif 3149 m_cpt_cleanup++; 3150 } 3151 } 3152 break; 2539 3153 } 2540 3154 ////////////////////// 2541 case CLEANUP_DIR_LOCK: 2542 { 2543 if ( r_alloc_dir_fsm.read() == ALLOC_DIR_CLEANUP ) { 2544 2545 // Read the directory 2546 size_t way = 0; 2547 addr_t cleanup_address = r_cleanup_nline.read() * m_words * 4; 2548 DirectoryEntry entry = m_cache_directory.read(cleanup_address , way); 2549 #ifdef DDEBUG 2550 if(m_cpt_cycles > DEBUG_START_CYCLE){ 2551 std::cout << "In CLEANUP_DIR_LOCK printing the entry of address is : " << std::hex << cleanup_address << std::endl; 2552 entry.print(); 2553 std::cout << "done" << std::endl; 2554 } 2555 #endif 2556 r_cleanup_is_cnt = entry.is_cnt; 2557 r_cleanup_dirty = entry.dirty; 2558 r_cleanup_tag = entry.tag; 2559 r_cleanup_lock = entry.lock; 2560 r_cleanup_way = way; 2561 r_cleanup_copy = entry.owner.srcid; 3155 case CLEANUP_DIR_LOCK: // test directory status 3156 { 3157 if ( r_alloc_dir_fsm.read() == ALLOC_DIR_CLEANUP ) 3158 { 3159 // Read the directory 3160 size_t way = 0; 3161 addr_t cleanup_address = r_cleanup_nline.read() * m_words * 4; 3162 DirectoryEntry entry = m_cache_directory.read(cleanup_address , way); 3163 r_cleanup_is_cnt = entry.is_cnt; 3164 r_cleanup_dirty = entry.dirty; 3165 r_cleanup_tag = entry.tag; 3166 r_cleanup_lock = entry.lock; 3167 r_cleanup_way = way; 3168 r_cleanup_copy = entry.owner.srcid; 2562 3169 #if L1_MULTI_CACHE 2563 r_cleanup_copy_cache= entry.owner.cache_id; 2564 #endif 2565 r_cleanup_copy_inst = entry.owner.inst; 2566 r_cleanup_count = entry.count; 2567 r_cleanup_ptr = entry.ptr; 2568 2569 // In case of hit, the copy must be cleaned in the copies bit-vector 2570 if( entry.valid){ 2571 if ( (entry.count==1) || (entry.is_cnt) ) { // no access to the heap 2572 r_cleanup_fsm = CLEANUP_DIR_WRITE; 2573 } else { 2574 r_cleanup_fsm = CLEANUP_HEAP_LOCK; 2575 } 2576 } else { 2577 r_cleanup_fsm = CLEANUP_UPT_LOCK; 2578 } 2579 } 2580 break; 3170 r_cleanup_copy_cache= entry.owner.cache_id; 3171 #endif 3172 r_cleanup_copy_inst = entry.owner.inst; 3173 r_cleanup_count = entry.count; 3174 r_cleanup_ptr = entry.ptr; 3175 3176 if( entry.valid) // hit : the copy must be cleared 3177 { 3178 if ( (entry.count==1) || (entry.is_cnt) ) // no access to the heap 3179 { 3180 r_cleanup_fsm = CLEANUP_DIR_WRITE; 3181 } 3182 else // access to the heap 3183 { 3184 r_cleanup_fsm = CLEANUP_HEAP_LOCK; 3185 } 3186 } 3187 else // miss : we must check the update table 3188 { 3189 r_cleanup_fsm = CLEANUP_UPT_LOCK; 3190 } 3191 3192 #if DEBUG_MEMC_CLEANUP 3193 if( m_debug_cleanup_fsm ) 3194 { 3195 std::cout << " <MEMC.CLEANUP_DIR_LOCK> Test directory status: " << std::hex 3196 << " line = " << r_cleanup_nline.read() * m_words * 4 3197 << " / hit = " << entry.valid 3198 << " / dir_id = " << entry.owner.srcid 3199 << " / dir_ins = " << entry.owner.inst 3200 << " / search_id = " << r_cleanup_srcid.read() 3201 << " / search_ins = " << (r_cleanup_trdid.read()&0x1) 3202 << " / count = " << entry.count 3203 << " / is_cnt = " << entry.is_cnt << std::endl; 3204 } 3205 #endif 3206 } 3207 break; 2581 3208 } 2582 3209 /////////////////////// 2583 case CLEANUP_DIR_WRITE: 2584 { 2585 ASSERT((r_alloc_dir_fsm.read() == ALLOC_DIR_CLEANUP) 2586 ,"MemCache ERROR : Bad DIR allocation"); 2587 size_t way = r_cleanup_way.read(); 2588 #define L2 soclib::common::uint32_log2 2589 size_t set = m_y[(vci_addr_t)(r_cleanup_nline.read() << (L2(m_words) +2))]; 2590 #undef L2 2591 bool cleanup_inst = r_cleanup_trdid.read() & 0x1; 2592 bool match_srcid = ((r_cleanup_copy.read() == r_cleanup_srcid.read()) 3210 case CLEANUP_DIR_WRITE: // update the directory entry without heap access 3211 { 3212 if ( r_alloc_dir_fsm.read() != ALLOC_DIR_CLEANUP ) 3213 { 3214 std::cout << "VCI_MEM_CACHE ERROR " << name() 3215 << " CLEANUP_DIR_WRITE state" 3216 << " bad DIR allocation" << std::endl; 3217 exit(0); 3218 } 3219 3220 size_t way = r_cleanup_way.read(); 3221 size_t set = m_y[(vci_addr_t)(r_cleanup_nline.read()*m_words*4)]; 3222 bool cleanup_inst = r_cleanup_trdid.read() & 0x1; 3223 bool match_srcid = ((r_cleanup_copy.read() == r_cleanup_srcid.read()) 2593 3224 #if L1_MULTI_CACHE 2594 3225 and (r_cleanup_copy_cache.read() == r_cleanup_pktid.read()) 2595 3226 #endif 2596 3227 ); 2597 bool match_inst = (r_cleanup_copy_inst.read() == cleanup_inst); 2598 bool match = match_srcid && match_inst; 2599 2600 // update the cache directory (for the copies) 2601 DirectoryEntry entry; 2602 entry.valid = true; 2603 entry.is_cnt = r_cleanup_is_cnt.read(); 2604 entry.dirty = r_cleanup_dirty.read(); 2605 entry.tag = r_cleanup_tag.read(); 2606 entry.lock = r_cleanup_lock.read(); 2607 entry.ptr = r_cleanup_ptr.read(); 2608 if(r_cleanup_is_cnt.read()) { // Directory is a counter 2609 entry.count = r_cleanup_count.read() -1; 2610 entry.owner.srcid = 0; 3228 bool match_inst = (r_cleanup_copy_inst.read() == cleanup_inst); 3229 bool match = match_srcid && match_inst; 3230 3231 // update the cache directory (for the copies) 3232 DirectoryEntry entry; 3233 entry.valid = true; 3234 entry.is_cnt = r_cleanup_is_cnt.read(); 3235 entry.dirty = r_cleanup_dirty.read(); 3236 entry.tag = r_cleanup_tag.read(); 3237 entry.lock = r_cleanup_lock.read(); 3238 entry.ptr = r_cleanup_ptr.read(); 3239 3240 if ( r_cleanup_is_cnt.read() ) // counter mode 3241 { 3242 entry.count = r_cleanup_count.read() -1; 3243 entry.owner.srcid = 0; 2611 3244 #if L1_MULTI_CACHE 2612 entry.owner.cache_id= 0; 2613 #endif 2614 entry.owner.inst = 0; 2615 // response to the cache 3245 entry.owner.cache_id= 0; 3246 #endif 3247 entry.owner.inst = 0; 3248 // response to the cache 3249 r_cleanup_fsm = CLEANUP_RSP; 3250 } 3251 else // linked_list mode 3252 { 3253 if ( match ) // hit 3254 { 3255 entry.count = 0; // no more copy 3256 entry.owner.srcid = 0; 3257 #if L1_MULTI_CACHE 3258 entry.owner.cache_id=0; 3259 #endif 3260 entry.owner.inst = 0; 3261 r_cleanup_fsm = CLEANUP_RSP; 3262 } 3263 else // miss 3264 { 3265 entry.count = r_cleanup_count.read(); 3266 entry.owner.srcid = r_cleanup_copy.read(); 3267 #if L1_MULTI_CACHE 3268 entry.owner.cache_id = r_cleanup_copy_cache.read(); 3269 #endif 3270 entry.owner.inst = r_cleanup_copy_inst.read(); 3271 r_cleanup_fsm = CLEANUP_UPT_LOCK; 3272 } 3273 } 3274 m_cache_directory.write(set, way, entry); 3275 3276 #if DEBUG_MEMC_CLEANUP 3277 if( m_debug_cleanup_fsm ) 3278 { 3279 std::cout << " <MEMC.CLEANUP_DIR_WRITE> Update directory:" << std::hex 3280 << " line = " << r_cleanup_nline.read() * m_words * 4 3281 << " / dir_id = " << entry.owner.srcid 3282 << " / dir_ins = " << entry.owner.inst 3283 << " / count = " << entry.count 3284 << " / is_cnt = " << entry.is_cnt << std::endl; 3285 } 3286 #endif 3287 3288 break; 3289 } 3290 /////////////////////// 3291 case CLEANUP_HEAP_LOCK: // two cases are handled in this state: 3292 // - the matching copy is directly in the directory 3293 // - the matching copy is the first copy in the heap 3294 { 3295 if ( r_alloc_heap_fsm.read() == ALLOC_HEAP_CLEANUP ) 3296 { 3297 size_t way = r_cleanup_way.read(); 3298 size_t set = m_y[(vci_addr_t)(r_cleanup_nline.read()*m_words*4)]; 3299 HeapEntry heap_entry = m_heap.read(r_cleanup_ptr.read()); 3300 bool last = (heap_entry.next == r_cleanup_ptr.read()); 3301 bool cleanup_inst = r_cleanup_trdid.read() & 0x1; 3302 3303 // match_dir computation 3304 bool match_dir_srcid = (r_cleanup_copy.read() == r_cleanup_srcid.read()); 3305 bool match_dir_inst = (r_cleanup_copy_inst.read() == cleanup_inst); 3306 bool match_dir = match_dir_srcid and match_dir_inst; 3307 #if L1_MULTI_CACHE 3308 match_dir = match_dir and (r_cleanup_copy_cache.read() == r_cleanup_pktid.read()); 3309 #endif 3310 3311 // match_heap computation 3312 bool match_heap_srcid = (heap_entry.owner.srcid == r_cleanup_srcid.read()); 3313 bool match_heap_inst = (heap_entry.owner.inst == cleanup_inst); 3314 bool match_heap = match_heap_srcid and match_heap_inst; 3315 #if L1_MULTI_CACHE 3316 match_heap = match_heap and (heap_entry.owner.cache_id == r_cleanup_pktid.read()); 3317 #endif 3318 3319 r_cleanup_prev_ptr = r_cleanup_ptr.read(); 3320 r_cleanup_prev_srcid = heap_entry.owner.srcid; 3321 #if L1_MULTI_CACHE 3322 r_cleanup_prev_cache_id = heap_entry.owner.cache_id; 3323 #endif 3324 r_cleanup_prev_inst = heap_entry.owner.inst; 3325 3326 if (match_dir) // the matching copy is registered in the directory 3327 { 3328 // the copy registered in the directory must be replaced 3329 // by the first copy registered in the heap 3330 // and the corresponding entry must be freed 3331 DirectoryEntry dir_entry; 3332 dir_entry.valid = true; 3333 dir_entry.is_cnt = r_cleanup_is_cnt.read(); 3334 dir_entry.dirty = r_cleanup_dirty.read(); 3335 dir_entry.tag = r_cleanup_tag.read(); 3336 dir_entry.lock = r_cleanup_lock.read(); 3337 dir_entry.ptr = heap_entry.next; 3338 dir_entry.count = r_cleanup_count.read()-1; 3339 dir_entry.owner.srcid = heap_entry.owner.srcid; 3340 #if L1_MULTI_CACHE 3341 dir_entry.owner.cache_id = heap_entry.owner.cache_id; 3342 #endif 3343 dir_entry.owner.inst = heap_entry.owner.inst; 3344 m_cache_directory.write(set,way,dir_entry); 3345 r_cleanup_next_ptr = r_cleanup_ptr.read(); 3346 r_cleanup_fsm = CLEANUP_HEAP_FREE; 3347 } 3348 else if (match_heap) // the matching copy is the first copy in the heap 3349 { 3350 // The first copy in heap must be freed 3351 // and the copy registered in directory must point to the next copy in heap 3352 DirectoryEntry dir_entry; 3353 dir_entry.valid = true; 3354 dir_entry.is_cnt = r_cleanup_is_cnt.read(); 3355 dir_entry.dirty = r_cleanup_dirty.read(); 3356 dir_entry.tag = r_cleanup_tag.read(); 3357 dir_entry.lock = r_cleanup_lock.read(); 3358 dir_entry.ptr = heap_entry.next; 3359 dir_entry.count = r_cleanup_count.read()-1; 3360 dir_entry.owner.srcid = r_cleanup_copy.read(); 3361 #if L1_MULTI_CACHE 3362 dir_entry.owner.cache_id = r_cleanup_copy_cache.read(); 3363 #endif 3364 dir_entry.owner.inst = r_cleanup_copy_inst.read(); 3365 m_cache_directory.write(set,way,dir_entry); 3366 r_cleanup_next_ptr = r_cleanup_ptr.read(); 3367 r_cleanup_fsm = CLEANUP_HEAP_FREE; 3368 } 3369 else if(!last) // The matching copy is in the heap, but is not the first copy 3370 { 3371 // The directory entry must be modified to decrement count 3372 DirectoryEntry dir_entry; 3373 dir_entry.valid = true; 3374 dir_entry.is_cnt = r_cleanup_is_cnt.read(); 3375 dir_entry.dirty = r_cleanup_dirty.read(); 3376 dir_entry.tag = r_cleanup_tag.read(); 3377 dir_entry.lock = r_cleanup_lock.read(); 3378 dir_entry.ptr = r_cleanup_ptr.read(); 3379 dir_entry.count = r_cleanup_count.read()-1; 3380 dir_entry.owner.srcid = r_cleanup_copy.read(); 3381 #if L1_MULTI_CACHE 3382 dir_entry.owner.cache_id = r_cleanup_copy_cache.read(); 3383 #endif 3384 dir_entry.owner.inst = r_cleanup_copy_inst.read(); 3385 m_cache_directory.write(set,way,dir_entry); 3386 r_cleanup_next_ptr = heap_entry.next; 3387 r_cleanup_fsm = CLEANUP_HEAP_SEARCH; 3388 } 3389 else 3390 { 3391 std::cout << "VCI_MEM_CACHE ERROR " << name() 3392 << " CLEANUP_HEAP_LOCK state" 3393 << " hit but copy not found" << std::endl; 3394 exit(0); 3395 } 3396 3397 #if DEBUG_MEMC_CLEANUP 3398 if( m_debug_cleanup_fsm ) 3399 { 3400 std::cout << " <MEMC.CLEANUP_HEAP_LOCK> Checks matching:" 3401 << " line = " << r_cleanup_nline.read() * m_words * 4 3402 << " / dir_id = " << r_cleanup_copy.read() 3403 << " / dir_ins = " << r_cleanup_copy_inst.read() 3404 << " / heap_id = " << heap_entry.owner.srcid 3405 << " / heap_ins = " << heap_entry.owner.inst 3406 << " / search_id = " << r_cleanup_srcid.read() 3407 << " / search_ins = " << (r_cleanup_trdid.read()&0x1) << std::endl; 3408 } 3409 #endif 3410 } 3411 break; 3412 } 3413 ///////////////////////// 3414 case CLEANUP_HEAP_SEARCH: // This state is handling the case where the copy 3415 // is in the heap, but is not the first in the linked list 3416 { 3417 if ( r_alloc_heap_fsm.read() != ALLOC_HEAP_CLEANUP ) 3418 { 3419 std::cout << "VCI_MEM_CACHE ERROR " << name() 3420 << " CLEANUP_HEAP_SEARCH state" 3421 << " bad HEAP allocation" << std::endl; 3422 exit(0); 3423 } 3424 3425 HeapEntry heap_entry = m_heap.read(r_cleanup_next_ptr.read()); 3426 bool last = (heap_entry.next == r_cleanup_next_ptr.read()); 3427 bool cleanup_inst = r_cleanup_trdid.read() & 0x1; 3428 bool match_heap_srcid = (heap_entry.owner.srcid == r_cleanup_srcid.read()); 3429 bool match_heap_inst = (heap_entry.owner.inst == cleanup_inst); 3430 bool match_heap = match_heap_srcid && match_heap_inst; 3431 #if L1_MULTI_CACHE 3432 match_heap = match_heap and (heap_entry.owner.cache_id == r_cleanup_pktid.read()); 3433 #endif 3434 3435 #if DEBUG_MEMC_CLEANUP 3436 if( m_debug_cleanup_fsm ) 3437 { 3438 std::cout << " <MEMC.CLEANUP_HEAP_SEARCH> Cheks matching:" 3439 << " line = " << r_cleanup_nline.read() * m_words * 4 3440 << " / heap_id = " << heap_entry.owner.srcid 3441 << " / heap_ins = " << heap_entry.owner.inst 3442 << " / search_id = " << r_cleanup_srcid.read() 3443 << " / search_ins = " << (r_cleanup_trdid.read()&0x1) 3444 << " / last = " << last << std::endl; 3445 } 3446 #endif 3447 if(match_heap) // the matching copy must be removed 3448 { 3449 r_cleanup_ptr = heap_entry.next; // reuse ressources 3450 r_cleanup_fsm = CLEANUP_HEAP_CLEAN; 3451 } 3452 else 3453 { 3454 if ( last ) 3455 { 3456 std::cout << "VCI_MEM_CACHE_ERROR " << name() 3457 << " CLEANUP_HEAP_SEARCH state" 3458 << " cleanup hit but copy not found" << std::endl; 3459 exit(0); 3460 } 3461 else // test the next in the linked list 3462 { 3463 r_cleanup_prev_ptr = r_cleanup_next_ptr.read(); 3464 r_cleanup_prev_srcid = heap_entry.owner.srcid; 3465 #if L1_MULTI_CACHE 3466 r_cleanup_prev_cache_id = heap_entry.owner.cache_id; 3467 #endif 3468 r_cleanup_prev_inst = heap_entry.owner.inst; 3469 r_cleanup_next_ptr = heap_entry.next; 3470 r_cleanup_fsm = CLEANUP_HEAP_SEARCH; 3471 3472 #if DEBUG_MEMC_CLEANUP 3473 if( m_debug_cleanup_fsm ) 3474 { 3475 std::cout << " <MEMC.CLEANUP_HEAP_SEARCH> Matching copy not found, search next:" 3476 << " line = " << r_cleanup_nline.read() * m_words * 4 3477 << " / heap_id = " << heap_entry.owner.srcid 3478 << " / heap_ins = " << heap_entry.owner.inst 3479 << " / search_id = " << r_cleanup_srcid.read() 3480 << " / search_ins = " << (r_cleanup_trdid.read()&0x1) << std::endl; 3481 } 3482 #endif 3483 } 3484 } 3485 break; 3486 } 3487 //////////////////////// 3488 case CLEANUP_HEAP_CLEAN: // remove a copy in the linked list 3489 { 3490 if ( r_alloc_heap_fsm.read() != ALLOC_HEAP_CLEANUP ) 3491 { 3492 std::cout << "VCI_MEM_CACHE ERROR " << name() 3493 << " CLEANUP_HEAP_CLEAN state" 3494 << "Bad HEAP allocation" << std::endl; 3495 exit(0); 3496 } 3497 3498 bool last = (r_cleanup_next_ptr.read() == r_cleanup_ptr.read()); 3499 HeapEntry heap_entry; 3500 heap_entry.owner.srcid = r_cleanup_prev_srcid.read(); 3501 #if L1_MULTI_CACHE 3502 heap_entry.owner.cache_id = r_cleanup_prev_cache_id.read(); 3503 #endif 3504 heap_entry.owner.inst = r_cleanup_prev_inst.read(); 3505 if(last) // this is the last entry of the list of copies 3506 { 3507 heap_entry.next = r_cleanup_prev_ptr.read(); 3508 } 3509 else // this is not the last entry 3510 { 3511 heap_entry.next = r_cleanup_ptr.read(); 3512 } 3513 m_heap.write(r_cleanup_prev_ptr.read(),heap_entry); 3514 r_cleanup_fsm = CLEANUP_HEAP_FREE; 3515 3516 #if DEBUG_MEMC_CLEANUP 3517 if( m_debug_cleanup_fsm ) 3518 { 3519 std::cout << " <MEMC.CLEANUP_HEAP_SEARCH> Remove the copy in the linked list" << std::endl; 3520 } 3521 #endif 3522 break; 3523 } 3524 /////////////////////// 3525 case CLEANUP_HEAP_FREE: // The heap entry pointed by r_cleanup_next_ptr is freed 3526 // and becomes the head of the list of free entries 3527 { 3528 if ( r_alloc_heap_fsm.read() != ALLOC_HEAP_CLEANUP ) 3529 { 3530 std::cout << "VCI_MEM_CACHE ERROR " << name() << " CLEANUP_HEAP_CLEAN state" << std::endl; 3531 std::cout << "Bad HEAP allocation" << std::endl; 3532 exit(0); 3533 } 3534 3535 HeapEntry heap_entry; 3536 heap_entry.owner.srcid = 0; 3537 #if L1_MULTI_CACHE 3538 heap_entry.owner.cache_id = 0; 3539 #endif 3540 heap_entry.owner.inst = false; 3541 3542 if(m_heap.is_full()) heap_entry.next = r_cleanup_next_ptr.read(); 3543 else heap_entry.next = m_heap.next_free_ptr(); 3544 m_heap.write(r_cleanup_next_ptr.read(),heap_entry); 3545 m_heap.write_free_ptr(r_cleanup_next_ptr.read()); 3546 m_heap.unset_full(); 2616 3547 r_cleanup_fsm = CLEANUP_RSP; 2617 } 2618 else{ // Directory is a list 2619 if(match) { // hit 2620 entry.count = 0; // no more copy 2621 entry.owner.srcid = 0; 3548 3549 #if DEBUG_MEMC_CLEANUP 3550 if( m_debug_cleanup_fsm ) 3551 { 3552 std::cout << " <MEMC.CLEANUP_HEAP_SEARCH> Update the list of free entries" << std::endl; 3553 } 3554 #endif 3555 break; 3556 } 3557 ////////////////////// 3558 case CLEANUP_UPT_LOCK: 3559 { 3560 if ( r_alloc_upt_fsm.read() == ALLOC_UPT_CLEANUP ) 3561 { 3562 size_t index = 0; 3563 bool hit_inval; 3564 hit_inval = m_update_tab.search_inval(r_cleanup_nline.read(),index); 3565 3566 if ( !hit_inval ) // no pending inval 3567 { 3568 3569 #if DEBUG_MEMC_CLEANUP 3570 if( m_debug_cleanup_fsm ) 3571 { 3572 std::cout << " <MEMC.CLEANUP_UPT_LOCK> Unexpected cleanup with no corresponding UPT entry:" 3573 << " address = " << std::hex << (r_cleanup_nline.read()*4*m_words) << std::endl; 3574 } 3575 #endif 3576 r_cleanup_fsm = CLEANUP_RSP; 3577 } 3578 else // pending inval 3579 { 3580 r_cleanup_write_srcid = m_update_tab.srcid(index); 3581 r_cleanup_write_trdid = m_update_tab.trdid(index); 3582 r_cleanup_write_pktid = m_update_tab.pktid(index); 3583 r_cleanup_need_rsp = m_update_tab.need_rsp(index); 3584 r_cleanup_fsm = CLEANUP_UPT_WRITE; 3585 } 3586 r_cleanup_index.write(index) ; 3587 } 3588 break; 3589 } 3590 /////////////////////// 3591 case CLEANUP_UPT_WRITE: // decrement response counter 3592 { 3593 size_t count = 0; 3594 m_update_tab.decrement(r_cleanup_index.read(), count); 3595 if ( count == 0 ) 3596 { 3597 m_update_tab.clear(r_cleanup_index.read()); 3598 3599 #if DEBUG_MEMC_CLEANUP 3600 if( m_debug_cleanup_fsm ) 3601 { 3602 std::cout << " <MEMC.CLEANUP_UPT_WRITE> Decrement response counter in UPT:" 3603 << " UPT_index = " << r_cleanup_index.read() 3604 << " rsp_count = " << count << std::endl; 3605 } 3606 #endif 3607 if( r_cleanup_need_rsp.read() ) r_cleanup_fsm = CLEANUP_WRITE_RSP ; 3608 else r_cleanup_fsm = CLEANUP_RSP; 3609 } 3610 else 3611 { 3612 r_cleanup_fsm = CLEANUP_RSP ; 3613 } 3614 break; 3615 } 3616 /////////////////////// 3617 case CLEANUP_WRITE_RSP: // Response to a previous write on the direct network 3618 { 3619 if( !r_cleanup_to_tgt_rsp_req.read() ) 3620 { 3621 r_cleanup_to_tgt_rsp_req = true; 3622 r_cleanup_to_tgt_rsp_srcid = r_cleanup_write_srcid.read(); 3623 r_cleanup_to_tgt_rsp_trdid = r_cleanup_write_trdid.read(); 3624 r_cleanup_to_tgt_rsp_pktid = r_cleanup_write_pktid.read(); 3625 r_cleanup_fsm = CLEANUP_RSP; 3626 3627 #if DEBUG_MEMC_CLEANUP 3628 if( m_debug_cleanup_fsm ) 3629 { 3630 std::cout << " <MEMC.CLEANUP_WRITE_RSP> Send a response to a cleanup request:" 3631 << " rsrcid = " << r_cleanup_write_srcid.read() 3632 << " / rtrdid = " << r_cleanup_write_trdid.read() << std::endl; 3633 } 3634 #endif 3635 } 3636 break; 3637 } 3638 ///////////////// 3639 case CLEANUP_RSP: // Response to a cleanup on the coherence network 3640 { 3641 if ( p_vci_tgt_cleanup.rspack.read() ) 3642 { 3643 r_cleanup_fsm = CLEANUP_IDLE; 3644 3645 #if DEBUG_MEMC_CLEANUP 3646 if( m_debug_cleanup_fsm ) 3647 { 3648 std::cout << " <MEMC.CLEANUP_RSP> Send the response to a cleanup request:" 3649 << " rsrcid = " << r_cleanup_write_srcid.read() 3650 << " / rtrdid = " << r_cleanup_write_trdid.read() << std::endl; 3651 } 3652 #endif 3653 } 3654 break; 3655 } 3656 } // end switch cleanup fsm 3657 3658 //////////////////////////////////////////////////////////////////////////////////// 3659 // SC FSM 3660 //////////////////////////////////////////////////////////////////////////////////// 3661 // The SC FSM handles the SC (Store Conditionnal) atomic commands, 3662 // that are handled as "compare-and-swap instructions. 3663 // 3664 // This command contains two or four flits: 3665 // - In case of 32 bits atomic access, the first flit contains the value read 3666 // by a previous LL instruction, the second flit contains the value to be writen. 3667 // - In case of 64 bits atomic access, the 2 first flits contains the value read 3668 // by a previous LL instruction, the 2 next flits contains the value to be writen. 3669 // 3670 // The target address is cachable. If it is replicated in other L1 caches 3671 // than the writer, a coherence operation is done. 3672 // 3673 // It access the directory to check hit / miss. 3674 // - In case of miss, the SC FSM must register a GET transaction in TRT. 3675 // If a read transaction to the XRAM for this line already exists, 3676 // or if the transaction table is full, it goes to the WAIT state 3677 // to release the locks and try again. When the GET transaction has been 3678 // launched, it goes to the WAIT state and try again. 3679 // The SC request is not consumed in the FIFO until a HIT is obtained. 3680 // - In case of hit... 3681 /////////////////////////////////////////////////////////////////////////////////// 3682 3683 switch ( r_sc_fsm.read() ) 3684 { 3685 ///////////// 3686 case SC_IDLE: // fill the local rdata buffers 3687 { 3688 if( m_cmd_sc_addr_fifo.rok() ) 3689 { 3690 3691 #if DEBUG_MEMC_SC 3692 if( m_debug_sc_fsm ) 3693 { 3694 std::cout << " <MEMC.SC_IDLE> SC command: " << std::hex 3695 << " srcid = " << m_cmd_sc_srcid_fifo.read() 3696 << " addr = " << m_cmd_sc_addr_fifo.read() 3697 << " wdata = " << m_cmd_sc_wdata_fifo.read() 3698 << " eop = " << m_cmd_sc_eop_fifo.read() 3699 << " cpt = " << std::dec << r_sc_cpt.read() << std::endl; 3700 } 3701 #endif 3702 if( m_cmd_sc_eop_fifo.read() ) 3703 { 3704 m_cpt_sc++; 3705 r_sc_fsm = SC_DIR_LOCK; 3706 } 3707 else // we keep the last word in the FIFO 3708 { 3709 cmd_sc_fifo_get = true; 3710 } 3711 // We fill the two buffers 3712 if ( r_sc_cpt.read() < 2 ) // 32 bits access 3713 r_sc_rdata[r_sc_cpt.read()] = m_cmd_sc_wdata_fifo.read(); 3714 3715 if((r_sc_cpt.read() == 1) && m_cmd_sc_eop_fifo.read()) 3716 r_sc_wdata = m_cmd_sc_wdata_fifo.read(); 3717 3718 if( r_sc_cpt.read()>3 ) // more than 4 flits... 3719 { 3720 std::cout << "VCI_MEM_CACHE ERROR in SC_IDLE state : illegal SC command" 3721 << std::endl; 3722 exit(0); 3723 } 3724 3725 if ( r_sc_cpt.read()==2 ) 3726 r_sc_wdata = m_cmd_sc_wdata_fifo.read(); 3727 3728 r_sc_cpt = r_sc_cpt.read()+1; 3729 } 3730 break; 3731 } 3732 ///////////////// 3733 case SC_DIR_LOCK: // Read the directory 3734 { 3735 if( r_alloc_dir_fsm.read() == ALLOC_DIR_SC ) 3736 { 3737 size_t way = 0; 3738 DirectoryEntry entry(m_cache_directory.read(m_cmd_sc_addr_fifo.read(), way)); 3739 3740 r_sc_is_cnt = entry.is_cnt; 3741 r_sc_dirty = entry.dirty; 3742 r_sc_tag = entry.tag; 3743 r_sc_way = way; 3744 r_sc_copy = entry.owner.srcid; 2622 3745 #if L1_MULTI_CACHE 2623 entry.owner.cache_id=0; 2624 #endif 2625 entry.owner.inst = 0; 2626 r_cleanup_fsm = CLEANUP_RSP; 2627 } else { // miss 2628 entry.count = r_cleanup_count.read(); 2629 entry.owner.srcid = r_cleanup_copy.read(); 3746 r_sc_copy_cache = entry.owner.cache_id; 3747 #endif 3748 r_sc_copy_inst = entry.owner.inst; 3749 r_sc_ptr = entry.ptr; 3750 r_sc_count = entry.count; 3751 3752 if ( entry.valid ) r_sc_fsm = SC_DIR_HIT_READ; 3753 else r_sc_fsm = SC_TRT_GET_LOCK; 3754 3755 #if DEBUG_MEMC_SC 3756 if( m_debug_sc_fsm ) 3757 { 3758 std::cout << " <MEMC.SC_DIR_LOCK> Directory acces" 3759 << " / address = " << m_cmd_sc_addr_fifo.read() 3760 << " / hit = " << entry.valid 3761 << " / count = " << entry.count 3762 << " / is_cnt = " << entry.is_cnt << std::endl; 3763 } 3764 #endif 3765 } 3766 break; 3767 } 3768 ///////////////////// 3769 case SC_DIR_HIT_READ: // update directory for lock and dirty bit 3770 // and check data change in cache 3771 { 3772 size_t way = r_sc_way.read(); 3773 size_t set = m_y[(vci_addr_t)(m_cmd_sc_addr_fifo.read())]; 3774 size_t word = m_x[(vci_addr_t)(m_cmd_sc_addr_fifo.read())]; 3775 3776 // update directory (lock & dirty bits) 3777 DirectoryEntry entry; 3778 entry.valid = true; 3779 entry.is_cnt = r_sc_is_cnt.read(); 3780 entry.dirty = true; 3781 entry.lock = true; 3782 entry.tag = r_sc_tag.read(); 3783 entry.owner.srcid = r_sc_copy.read(); 2630 3784 #if L1_MULTI_CACHE 2631 entry.owner.cache_id = r_cleanup_copy_cache.read(); 2632 #endif 2633 entry.owner.inst = r_cleanup_copy_inst.read(); 2634 r_cleanup_fsm = CLEANUP_UPT_LOCK; 2635 } 2636 } 2637 m_cache_directory.write(set, way, entry); 2638 2639 break; 3785 entry.owner.cache_id = r_sc_copy_cache.read(); 3786 #endif 3787 entry.owner.inst = r_sc_copy_inst.read(); 3788 entry.count = r_sc_count.read(); 3789 entry.ptr = r_sc_ptr.read(); 3790 3791 m_cache_directory.write(set, way, entry); 3792 3793 // read data in cache & check data change 3794 bool ok = ( r_sc_rdata[0].read() == m_cache_data[way][set][word] ); 3795 if ( r_sc_cpt.read()==4 ) // 64 bits SC 3796 ok &= ( r_sc_rdata[1] == m_cache_data[way][set][word+1] ); 3797 3798 // to avoid livelock, force the atomic access to fail pseudo-randomly 3799 bool forced_fail = ( (r_sc_lfsr % (64) == 0) && RANDOMIZE_SC ); 3800 r_sc_lfsr = (r_sc_lfsr >> 1) ^ ((-(r_sc_lfsr & 1)) & 0xd0000001); 3801 3802 if( ok and not forced_fail ) // no data change 3803 { 3804 r_sc_fsm = SC_DIR_HIT_WRITE; 3805 } 3806 else // return failure 3807 { 3808 r_sc_fsm = SC_RSP_FAIL; 3809 } 3810 3811 #if DEBUG_MEMC_SC 3812 if( m_debug_sc_fsm ) 3813 { 3814 std::cout << " <MEMC.SC_DIR_HIT_READ> Test if SC success:" 3815 << " / expected value = " << r_sc_rdata[0].read() 3816 << " / actual value = " << m_cache_data[way][set][word] 3817 << " / forced_fail = " << forced_fail << std::endl; 3818 } 3819 #endif 3820 break; 3821 } 3822 ///// //////////////// 3823 case SC_DIR_HIT_WRITE: // write data in the cache 3824 // and test if a coherence request is required 3825 { 3826 size_t way = r_sc_way.read(); 3827 size_t set = m_y[(vci_addr_t)(m_cmd_sc_addr_fifo.read())]; 3828 size_t word = m_x[(vci_addr_t)(m_cmd_sc_addr_fifo.read())]; 3829 3830 // cache update 3831 m_cache_data[way][set][word] = r_sc_wdata.read(); 3832 if(r_sc_cpt.read()==4) 3833 m_cache_data[way][set][word+1] = m_cmd_sc_wdata_fifo.read(); 3834 3835 // test coherence request 3836 if(r_sc_count.read()) // replicated line 3837 { 3838 if ( r_sc_is_cnt.read() ) 3839 { 3840 r_sc_fsm = SC_TRT_PUT_LOCK; // broadcast invalidate required 3841 } 3842 else if( !r_sc_to_init_cmd_multi_req.read() && 3843 !r_sc_to_init_cmd_brdcast_req.read() ) 3844 { 3845 r_sc_fsm = SC_UPT_LOCK; // multi update required 3846 } 3847 else 3848 { 3849 r_sc_fsm = SC_WAIT; 3850 } 3851 } 3852 else // no copies 3853 { 3854 r_sc_fsm = SC_RSP_SUCCESS; 3855 } 3856 3857 #if DEBUG_MEMC_SC 3858 if( m_debug_sc_fsm ) 3859 { 3860 std::cout << " <MEMC.SC_DIR_HIT_WRITE> Update cache:" 3861 << " way = " << std::dec << way 3862 << " / set = " << set 3863 << " / word = " << word 3864 << " / value = " << r_sc_wdata.read() 3865 << " / count = " << r_sc_count.read() << std::endl; 3866 } 3867 #endif 3868 break; 2640 3869 } 2641 3870 ///////////////// 2642 case CLEANUP_HEAP_LOCK: 2643 { 2644 if(r_alloc_heap_fsm.read() == ALLOC_HEAP_CLEANUP){ 2645 size_t way = r_cleanup_way.read(); 2646 #define L2 soclib::common::uint32_log2 2647 size_t set = m_y[(vci_addr_t)(r_cleanup_nline.read() << (L2(m_words) +2))]; 2648 #undef L2 2649 HeapEntry heap_entry = m_heap_directory.read(r_cleanup_ptr.read()); 2650 bool last = (heap_entry.next == r_cleanup_ptr.read()); 2651 bool cleanup_inst = r_cleanup_trdid.read() & 0x1; 2652 bool match_dir_srcid = (r_cleanup_copy.read() == r_cleanup_srcid.read()); 3871 case SC_UPT_LOCK: // register the transaction in UPT 3872 { 3873 if ( r_alloc_upt_fsm.read() == ALLOC_UPT_SC ) 3874 { 3875 bool wok = false; 3876 size_t index = 0; 3877 size_t srcid = m_cmd_sc_srcid_fifo.read(); 3878 size_t trdid = m_cmd_sc_trdid_fifo.read(); 3879 size_t pktid = m_cmd_sc_pktid_fifo.read(); 3880 addr_t nline = m_nline[(vci_addr_t)(m_cmd_sc_addr_fifo.read())]; 3881 size_t nb_copies = r_sc_count.read(); 3882 3883 wok = m_update_tab.set(true, // it's an update transaction 3884 false, // it's not a broadcast 3885 true, // it needs a response 3886 srcid, 3887 trdid, 3888 pktid, 3889 nline, 3890 nb_copies, 3891 index); 3892 if (wok) // coherence transaction registered in UPT 3893 { 3894 r_sc_upt_index = index; 3895 r_sc_fsm = SC_HEAP_LOCK; 3896 } 3897 else // releases the locks protecting UPT and DIR if no entry 3898 { 3899 r_sc_fsm = SC_WAIT; 3900 } 3901 3902 #if DEBUG_MEMC_SC 3903 if( m_debug_sc_fsm ) 3904 { 3905 std::cout << " <MEMC.SC_UPT_LOCK> Register multi-update transaction in UPT" 3906 << " / wok = " << wok 3907 << " / nline = " << std::hex << nline 3908 << " / count = " << nb_copies << std::endl; 3909 } 3910 #endif 3911 } 3912 break; 3913 } 3914 ///////////// 3915 case SC_WAIT: // release all locks and retry from beginning 3916 { 3917 3918 #if DEBUG_MEMC_SC 3919 if( m_debug_sc_fsm ) 3920 { 3921 std::cout << " <MEMC.SC_WAIT> Release all locks" << std::endl; 3922 } 3923 #endif 3924 r_sc_fsm = SC_DIR_LOCK; 3925 break; 3926 } 3927 ////////////////// 3928 case SC_HEAP_LOCK: // lock the heap 3929 { 3930 if( r_alloc_heap_fsm.read() == ALLOC_HEAP_SC ) 3931 { 3932 3933 #if DEBUG_MEMC_SC 3934 if( m_debug_sc_fsm ) 3935 { 3936 std::cout << " <MEMC.SC_HEAP_LOCK> Get access to the heap" << std::endl; 3937 } 3938 #endif 3939 r_sc_fsm = SC_UPT_REQ; 3940 } 3941 break; 3942 } 3943 //////////////// 3944 case SC_UPT_REQ: // send a first update request to INIT_CMD FSM 3945 { 3946 assert((r_alloc_heap_fsm.read() == ALLOC_HEAP_SC) and 3947 "VCI_MEM_CACHE ERROR : bad HEAP allocation"); 3948 3949 if( !r_sc_to_init_cmd_multi_req.read() && !r_sc_to_init_cmd_brdcast_req.read() ) 3950 { 3951 r_sc_to_init_cmd_brdcast_req = false; 3952 r_sc_to_init_cmd_trdid = r_sc_upt_index.read(); 3953 r_sc_to_init_cmd_nline = m_nline[(vci_addr_t)(m_cmd_sc_addr_fifo.read())]; 3954 r_sc_to_init_cmd_index = m_x[(vci_addr_t)(m_cmd_sc_addr_fifo.read())]; 3955 r_sc_to_init_cmd_wdata = r_sc_wdata.read(); 3956 3957 if(r_sc_cpt.read() == 4) 3958 { 3959 r_sc_to_init_cmd_is_long = true; 3960 r_sc_to_init_cmd_wdata_high = m_cmd_sc_wdata_fifo.read(); 3961 } 3962 else 3963 { 3964 r_sc_to_init_cmd_is_long = false; 3965 r_sc_to_init_cmd_wdata_high = 0; 3966 } 3967 3968 // We put the first copy in the fifo 3969 sc_to_init_cmd_fifo_put = true; 3970 sc_to_init_cmd_fifo_inst = r_sc_copy_inst.read(); 3971 sc_to_init_cmd_fifo_srcid = r_sc_copy.read(); 2653 3972 #if L1_MULTI_CACHE 2654 bool match_dir_cache_id = (r_cleanup_copy_cache.read() == r_cleanup_pktid.read()); 2655 #endif 2656 bool match_dir_inst = (r_cleanup_copy_inst.read() == cleanup_inst); 2657 bool match_dir = match_dir_srcid and match_dir_cache_id and match_dir_inst; 2658 bool match_heap_srcid = (heap_entry.owner.srcid == r_cleanup_srcid.read()); 3973 sc_to_init_cmd_fifo_cache_id= r_sc_copy_cache.read(); 3974 #endif 3975 if(r_sc_count.read() == 1) // one single copy 3976 { 3977 r_sc_fsm = SC_IDLE; // Response will be sent after receiving 3978 // update responses 3979 cmd_sc_fifo_get = true; 3980 r_sc_to_init_cmd_multi_req = true; 3981 r_sc_cpt = 0; 3982 } 3983 else // several copies 3984 { 3985 r_sc_fsm = SC_UPT_NEXT; 3986 } 3987 3988 #if DEBUG_MEMC_SC 3989 if( m_debug_sc_fsm ) 3990 { 3991 std::cout << " <MEMC.SC_UPT_REQ> Send the first update request to INIT_CMD FSM " 3992 << " / address = " << std::hex << m_cmd_sc_addr_fifo.read() 3993 << " / wdata = " << std::hex << r_sc_wdata.read() 3994 << " / srcid = " << std::hex << r_sc_copy.read() 3995 << " / inst = " << r_sc_copy_inst.read() << std::endl; 3996 } 3997 #endif 3998 } 3999 break; 4000 } 4001 ///////////////// 4002 case SC_UPT_NEXT: // send a multi-update request to INIT_CMD FSM 4003 { 4004 assert((r_alloc_heap_fsm.read() == ALLOC_HEAP_SC) 4005 and "VCI_MEM_CACHE ERROR : bad HEAP allocation"); 4006 4007 HeapEntry entry = m_heap.read(r_sc_ptr.read()); 4008 sc_to_init_cmd_fifo_srcid = entry.owner.srcid; 2659 4009 #if L1_MULTI_CACHE 2660 bool match_heap_cache_id= (heap_entry.owner.cache_id == r_cleanup_pktid.read()); 2661 #endif 2662 bool match_heap_inst = (heap_entry.owner.inst == cleanup_inst); 2663 bool match_heap = match_heap_srcid and match_heap_cache_id and match_heap_inst; 2664 2665 PRINTF(" * <MEM_CACHE.CLEANUP> %s - srcid %d\n",name().c_str(),r_cleanup_srcid.read()); 2666 4010 sc_to_init_cmd_fifo_cache_id = entry.owner.cache_id; 4011 #endif 4012 sc_to_init_cmd_fifo_inst = entry.owner.inst; 4013 sc_to_init_cmd_fifo_put = true; 4014 4015 if( m_sc_to_init_cmd_inst_fifo.wok() ) // request accepted by INIT_CMD FSM 4016 { 4017 r_sc_ptr = entry.next; 4018 if( entry.next == r_sc_ptr.read() ) // last copy 4019 { 4020 r_sc_to_init_cmd_multi_req = true; 4021 r_sc_fsm = SC_IDLE; // Response will be sent after receiving 4022 // all update responses 4023 cmd_sc_fifo_get = true; 4024 r_sc_cpt = 0; 4025 } 4026 } 4027 4028 #if DEBUG_MEMC_SC 4029 if( m_debug_sc_fsm ) 4030 { 4031 std::cout << " <MEMC.SC_UPT_NEXT> Send the next update request to INIT_CMD FSM " 4032 << " / address = " << std::hex << m_cmd_sc_addr_fifo.read() 4033 << " / wdata = " << std::hex << r_sc_wdata.read() 4034 << " / srcid = " << std::hex << entry.owner.srcid 4035 << " / inst = " << entry.owner.inst << std::endl; 4036 } 4037 #endif 4038 break; 4039 } 4040 ///////////////////// 4041 case SC_TRT_PUT_LOCK: // check the TRT to register a PUT transaction 4042 { 4043 if( r_alloc_trt_fsm.read() == ALLOC_TRT_SC ) 4044 { 4045 if( !r_sc_to_ixr_cmd_req ) // we can transfer the request to IXR_CMD FSM 4046 { 4047 // fill the data buffer 4048 size_t way = r_sc_way.read(); 4049 size_t set = m_y[(vci_addr_t)(m_cmd_sc_addr_fifo.read())]; 4050 size_t word = m_x[(vci_addr_t)(m_cmd_sc_addr_fifo.read())]; 4051 for(size_t i = 0; i<m_words; i++) 4052 { 4053 if (i == word) 4054 { 4055 r_sc_to_ixr_cmd_data[i] = r_sc_wdata.read(); 4056 } 4057 else if ( (i == word+1) && (r_sc_cpt.read()==4) ) // 64 bit SC 4058 { 4059 r_sc_to_ixr_cmd_data[i] = m_cmd_sc_wdata_fifo.read(); 4060 } 4061 else 4062 { 4063 r_sc_to_ixr_cmd_data[i] = m_cache_data[way][set][i]; 4064 } 4065 } 4066 size_t wok_index = 0; 4067 bool wok = !m_transaction_tab.full(wok_index); 4068 if ( wok ) 4069 { 4070 r_sc_trt_index = wok_index; 4071 r_sc_fsm = SC_INVAL_LOCK; 4072 } 4073 else 4074 { 4075 r_sc_fsm = SC_WAIT; 4076 } 4077 } 4078 else 4079 { 4080 r_sc_fsm = SC_WAIT; 4081 } 4082 } 4083 break; 4084 } 4085 /////////////////// 4086 case SC_INVAL_LOCK: // Register a broadcast inval transaction in UPT 4087 { 4088 if ( r_alloc_upt_fsm.read() == ALLOC_UPT_SC ) 4089 { 4090 bool wok = false; 4091 size_t index = 0; 4092 size_t srcid = m_cmd_sc_srcid_fifo.read(); 4093 size_t trdid = m_cmd_sc_trdid_fifo.read(); 4094 size_t pktid = m_cmd_sc_pktid_fifo.read(); 4095 addr_t nline = m_nline[(vci_addr_t)(m_cmd_sc_addr_fifo.read())]; 4096 size_t nb_copies = r_sc_count.read(); 4097 4098 // register a broadcast inval transaction in UPT 4099 wok = m_update_tab.set(false, // it's an inval transaction 4100 true, // it's a broadcast 4101 true, // it needs a response 4102 srcid, 4103 trdid, 4104 pktid, 4105 nline, 4106 nb_copies, 4107 index); 4108 4109 if ( wok ) // UPT not full 4110 { 4111 r_sc_upt_index = index; 4112 r_sc_fsm = SC_DIR_INVAL; 4113 #if DEBUG_MEMC_SC 4114 if( m_debug_sc_fsm ) 4115 { 4116 std::cout << " <MEMC.SC_INVAL_LOCK> Register a broadcast inval transaction in UPT" 4117 << " / nline = " << nline 4118 << " / count = " << nb_copies 4119 << " / upt_index = " << index << std::endl; 4120 } 4121 #endif 4122 } 4123 else // releases the lock protecting UPT 4124 { 4125 r_sc_fsm = SC_WAIT; 4126 } 4127 } 4128 break; 4129 } 4130 ////////////////// 4131 case SC_DIR_INVAL: // Register the PUT transaction in TRT, and inval the DIR entry 4132 { 4133 if ( (r_alloc_trt_fsm.read() == ALLOC_TRT_SC ) && 4134 (r_alloc_upt_fsm.read() == ALLOC_UPT_SC ) && 4135 (r_alloc_dir_fsm.read() == ALLOC_DIR_SC )) 4136 { 4137 // set TRT 4138 m_transaction_tab.set(r_sc_trt_index.read(), 4139 false, // PUT request to XRAM 4140 m_nline[(vci_addr_t)(m_cmd_sc_addr_fifo.read())], 4141 0, 4142 0, 4143 0, 4144 false, // not a processor read 4145 0, 4146 0, 4147 std::vector<be_t>(m_words,0), 4148 std::vector<data_t>(m_words,0)); 4149 4150 // invalidate directory entry 4151 DirectoryEntry entry; 4152 entry.valid = false; 4153 entry.dirty = false; 4154 entry.tag = 0; 4155 entry.is_cnt = false; 4156 entry.lock = false; 4157 entry.count = 0; 4158 entry.owner.srcid = 0; 2667 4159 #if L1_MULTI_CACHE 2668 PRINTF(" * <MEM_CACHE.CLEANUP> match_dir %d (match_dir_srcid %d, match_dir_cache_id %d, match_dir_inst %d)\n", 2669 match_dir , match_dir_srcid , match_dir_cache_id , match_dir_inst); 2670 PRINTF(" * <MEM_CACHE.CLEANUP> match_heap %d (match_heap_srcid %d, match_heap_cache_id %d, match_heap_inst %d)\n", 2671 match_heap, match_heap_srcid, match_heap_cache_id, match_heap_inst); 2672 #else 2673 PRINTF(" * <MEM_CACHE.CLEANUP> match_dir %d (match_dir_srcid %d, match_dir_inst %d)\n", 2674 match_dir , match_dir_srcid , match_dir_inst); 2675 PRINTF(" * <MEM_CACHE.CLEANUP> match_heap %d (match_heap_srcid %d, match_heap_inst %d)\n", 2676 match_heap, match_heap_srcid, match_heap_inst); 2677 #endif 2678 PRINTF(" * <MEM_CACHE.CLEANUP> last %d\n",last); 2679 2680 r_cleanup_prev_ptr = r_cleanup_ptr.read(); 2681 r_cleanup_prev_srcid = heap_entry.owner.srcid; 2682 #if L1_MULTI_CACHE 2683 r_cleanup_prev_cache_id = heap_entry.owner.cache_id; 2684 #endif 2685 2686 r_cleanup_prev_inst = heap_entry.owner.inst; 2687 2688 if(match_dir){ 2689 DirectoryEntry dir_entry; 2690 dir_entry.valid = true; 2691 dir_entry.is_cnt = r_cleanup_is_cnt.read(); 2692 dir_entry.dirty = r_cleanup_dirty.read(); 2693 dir_entry.tag = r_cleanup_tag.read(); 2694 dir_entry.lock = r_cleanup_lock.read(); 2695 dir_entry.ptr = heap_entry.next; 2696 dir_entry.count = r_cleanup_count.read()-1; 2697 dir_entry.owner.srcid = heap_entry.owner.srcid; 2698 #if L1_MULTI_CACHE 2699 dir_entry.owner.cache_id = heap_entry.owner.cache_id; 2700 #endif 2701 dir_entry.owner.inst = heap_entry.owner.inst; 2702 m_cache_directory.write(set,way,dir_entry); 2703 r_cleanup_next_ptr = r_cleanup_ptr.read(); 2704 r_cleanup_fsm = CLEANUP_HEAP_FREE; 2705 } 2706 else if(match_heap){ 2707 DirectoryEntry dir_entry; 2708 dir_entry.valid = true; 2709 dir_entry.is_cnt = r_cleanup_is_cnt.read(); 2710 dir_entry.dirty = r_cleanup_dirty.read(); 2711 dir_entry.tag = r_cleanup_tag.read(); 2712 dir_entry.lock = r_cleanup_lock.read(); 2713 dir_entry.ptr = heap_entry.next; 2714 dir_entry.count = r_cleanup_count.read()-1; 2715 dir_entry.owner.srcid = r_cleanup_copy.read(); 2716 #if L1_MULTI_CACHE 2717 dir_entry.owner.cache_id = r_cleanup_copy_cache.read(); 2718 #endif 2719 dir_entry.owner.inst = r_cleanup_copy_inst.read(); 2720 m_cache_directory.write(set,way,dir_entry); 2721 r_cleanup_next_ptr = r_cleanup_ptr.read(); 2722 r_cleanup_fsm = CLEANUP_HEAP_FREE; 2723 } 2724 else{ 2725 if(!last){ 2726 DirectoryEntry dir_entry; 2727 dir_entry.valid = true; 2728 dir_entry.is_cnt = r_cleanup_is_cnt.read(); 2729 dir_entry.dirty = r_cleanup_dirty.read(); 2730 dir_entry.tag = r_cleanup_tag.read(); 2731 dir_entry.lock = r_cleanup_lock.read(); 2732 dir_entry.ptr = r_cleanup_ptr.read(); 2733 dir_entry.count = r_cleanup_count.read()-1; 2734 dir_entry.owner.srcid = r_cleanup_copy.read(); 2735 #if L1_MULTI_CACHE 2736 dir_entry.owner.cache_id= r_cleanup_copy_cache.read(); 2737 #endif 2738 dir_entry.owner.inst = r_cleanup_copy_inst.read(); 2739 m_cache_directory.write(set,way,dir_entry); 2740 2741 r_cleanup_next_ptr = heap_entry.next; 2742 r_cleanup_fsm = CLEANUP_HEAP_SEARCH; 2743 2744 } else{ 2745 ASSERT(false,"MemCache ERROR : CLEANUP hit but line not shared"); 2746 } 2747 } 2748 } 2749 break; 4160 entry.owner.cache_id= 0; 4161 #endif 4162 entry.owner.inst = false; 4163 entry.ptr = 0; 4164 size_t set = m_y[(vci_addr_t)(m_cmd_sc_addr_fifo.read())]; 4165 size_t way = r_sc_way.read(); 4166 m_cache_directory.write(set, way, entry); 4167 4168 r_sc_fsm = SC_INVAL; 4169 4170 #if DEBUG_MEMC_SC 4171 if( m_debug_sc_fsm ) 4172 { 4173 std::cout << " <MEMC.SC_DIR_INVAL> Register the PUT in TRT and invalidate DIR entry" 4174 << " / nline = " << std::hex << m_nline[(vci_addr_t)(m_cmd_sc_addr_fifo.read())] 4175 << " / set = " << std::dec << set << " / way = " << way << std::endl; 4176 } 4177 #endif 4178 } 4179 else 4180 { 4181 assert(false and "LOCK ERROR in SC_FSM, STATE = SC_DIR_INVAL"); 4182 } 4183 break; 4184 } 4185 ////////////// 4186 case SC_INVAL: // Request the broadcast inval to INIT_CMD FSM 4187 { 4188 if ( !r_sc_to_init_cmd_multi_req.read() && 4189 !r_sc_to_init_cmd_brdcast_req.read()) 4190 { 4191 r_sc_to_init_cmd_multi_req = false; 4192 r_sc_to_init_cmd_brdcast_req = true; 4193 r_sc_to_init_cmd_trdid = r_sc_upt_index.read(); 4194 r_sc_to_init_cmd_nline = m_nline[(vci_addr_t)(m_cmd_sc_addr_fifo.read())]; 4195 r_sc_to_init_cmd_index = 0; 4196 r_sc_to_init_cmd_wdata = 0; 4197 4198 r_sc_fsm = SC_TRT_PUT_REQ; 4199 } 4200 break; 4201 } 4202 //////////////////// 4203 case SC_TRT_PUT_REQ: // request the IXR FSM to start a put transaction 4204 { 4205 if ( !r_sc_to_ixr_cmd_req ) 4206 { 4207 r_sc_to_ixr_cmd_req = true; 4208 r_sc_to_ixr_cmd_write = true; 4209 r_sc_to_ixr_cmd_nline = m_nline[(vci_addr_t)(m_cmd_sc_addr_fifo.read())]; 4210 r_sc_to_ixr_cmd_trdid = r_sc_trt_index.read(); 4211 r_sc_fsm = SC_IDLE; 4212 cmd_sc_fifo_get = true; 4213 r_sc_cpt = 0; 4214 4215 #if DEBUG_MEMC_SC 4216 if( m_debug_sc_fsm ) 4217 { 4218 std::cout << " <MEMC.SC_TRT_PUT_REQ> Request a PUT transaction to IXR_CMD FSM" << std::hex 4219 << " / nline = " << m_nline[(vci_addr_t)m_cmd_sc_addr_fifo.read()] 4220 << " / trt_index = " << r_sc_trt_index.read() << std::endl; 4221 } 4222 #endif 4223 } 4224 else 4225 { 4226 std::cout << "MEM_CACHE, SC_TRT_PUT_REQ state : request should not have been previously set" 4227 << std::endl; 4228 } 4229 break; 2750 4230 } 2751 4231 ///////////////// 2752 case CLEANUP_HEAP_SEARCH: 2753 { 2754 ASSERT((r_alloc_heap_fsm.read() == ALLOC_HEAP_CLEANUP) 2755 ,"MemCache ERROR : bad HEAP allocation"); 2756 HeapEntry heap_entry = m_heap_directory.read(r_cleanup_next_ptr.read()); 2757 bool last = (heap_entry.next == r_cleanup_next_ptr.read()); 2758 bool cleanup_inst = r_cleanup_trdid.read() & 0x1; 2759 bool match_heap_srcid = ((heap_entry.owner.srcid == r_cleanup_srcid.read()) 2760 #if L1_MULTI_CACHE 2761 and (heap_entry.owner.cache_id == r_cleanup_pktid.read()) 2762 #endif 2763 ); 2764 bool match_heap_inst = (heap_entry.owner.inst == cleanup_inst); 2765 bool match_heap = match_heap_srcid && match_heap_inst; 2766 2767 if(match_heap){ 2768 r_cleanup_ptr = heap_entry.next; // reuse ressources 2769 r_cleanup_fsm = CLEANUP_HEAP_CLEAN; 2770 } 2771 else{ 2772 if(last) { 2773 ASSERT(false,"MemCache ERROR : CLEANUP hit but line not shared"); 2774 } else { 2775 r_cleanup_prev_ptr = r_cleanup_next_ptr.read(); 2776 r_cleanup_prev_srcid = heap_entry.owner.srcid; 2777 #if L1_MULTI_CACHE 2778 r_cleanup_prev_cache_id = heap_entry.owner.cache_id; 2779 #endif 2780 r_cleanup_prev_inst = heap_entry.owner.inst; 2781 r_cleanup_next_ptr = heap_entry.next; 2782 r_cleanup_fsm = CLEANUP_HEAP_SEARCH; 2783 } 2784 } 2785 2786 break; 2787 } 2788 ///////////////// 2789 case CLEANUP_HEAP_CLEAN: 2790 { 2791 ASSERT((r_alloc_heap_fsm.read() == ALLOC_HEAP_CLEANUP) 2792 ,"MemCache ERROR : bad HEAP allocation"); 2793 bool last = (r_cleanup_next_ptr.read() == r_cleanup_ptr.read()); 2794 HeapEntry heap_entry; 2795 heap_entry.owner.srcid = r_cleanup_prev_srcid.read(); 2796 #if L1_MULTI_CACHE 2797 heap_entry.owner.cache_id = r_cleanup_prev_cache_id.read(); 2798 #endif 2799 heap_entry.owner.inst = r_cleanup_prev_inst.read(); 2800 if(last){ // this is the last entry of the list of copies 2801 heap_entry.next = r_cleanup_prev_ptr.read(); 2802 } else { // this is not the last entry 2803 heap_entry.next = r_cleanup_ptr.read(); 2804 } 2805 m_heap_directory.write(r_cleanup_prev_ptr.read(),heap_entry); 2806 r_cleanup_fsm = CLEANUP_HEAP_FREE; 2807 break; 2808 } 2809 ///////////////// 2810 case CLEANUP_HEAP_FREE: 2811 { 2812 ASSERT((r_alloc_heap_fsm.read() == ALLOC_HEAP_CLEANUP) 2813 ,"MemCache ERROR : bad HEAP allocation"); 2814 HeapEntry heap_entry; 2815 heap_entry.owner.srcid = 0; 2816 #if L1_MULTI_CACHE 2817 heap_entry.owner.cache_id = 0; 2818 #endif 2819 heap_entry.owner.inst = false; 2820 if(m_heap_directory.is_full()){ 2821 heap_entry.next = r_cleanup_next_ptr.read(); 2822 } else { 2823 heap_entry.next = m_heap_directory.next_free_ptr(); 2824 } 2825 m_heap_directory.write(r_cleanup_next_ptr.read(),heap_entry); 2826 m_heap_directory.write_free_ptr(r_cleanup_next_ptr.read()); 2827 m_heap_directory.unset_full(); 2828 r_cleanup_fsm = CLEANUP_RSP; 2829 break; 2830 } 2831 ///////////////// 2832 case CLEANUP_UPT_LOCK: 2833 { 2834 if( r_alloc_upt_fsm.read() == ALLOC_UPT_CLEANUP ) 2835 { 2836 size_t index = 0; 2837 bool hit_inval; 2838 hit_inval = m_update_tab.search_inval(r_cleanup_nline.read(),index); 2839 if(!hit_inval) { 2840 #if DEBUG_VCI_MEM_CACHE 2841 if(m_cpt_cycles > DEBUG_START_CYCLE) 2842 std::cout << "MEM_CACHE WARNING: cleanup with no corresponding entry at address : " << std::hex << (r_cleanup_nline.read()*4*m_words) << std::dec << std::endl; 2843 #endif 2844 r_cleanup_fsm = CLEANUP_RSP; 2845 } else { 2846 r_cleanup_write_srcid = m_update_tab.srcid(index); 2847 r_cleanup_write_trdid = m_update_tab.trdid(index); 2848 r_cleanup_write_pktid = m_update_tab.pktid(index); 2849 r_cleanup_need_rsp = m_update_tab.need_rsp(index); 2850 r_cleanup_fsm = CLEANUP_UPT_WRITE; 2851 } 2852 r_cleanup_index.write(index) ; 2853 } 2854 break; 2855 } 2856 ///////////////// 2857 case CLEANUP_UPT_WRITE: 2858 { 2859 size_t count = 0; 2860 m_update_tab.decrement(r_cleanup_index.read(), count); // &count 2861 if(count == 0){ 2862 m_update_tab.clear(r_cleanup_index.read()); 2863 #ifdef IDEBUG 2864 if(m_cpt_cycles > DEBUG_START_CYCLE){ 2865 std::cout << sc_time_stamp() << " " << name() << " CLEANUP_UPT_WRITE update table : " << std::endl; 2866 m_update_tab.print(); 2867 } 2868 #endif 2869 2870 if(r_cleanup_need_rsp.read()){ 2871 r_cleanup_fsm = CLEANUP_WRITE_RSP ; 2872 } else { 2873 r_cleanup_fsm = CLEANUP_RSP; 2874 } 2875 } else { 2876 r_cleanup_fsm = CLEANUP_RSP ; 2877 } 2878 break; 2879 } 2880 ///////////////// 2881 case CLEANUP_WRITE_RSP: 2882 { 2883 if( !r_cleanup_to_tgt_rsp_req.read()) { 2884 r_cleanup_to_tgt_rsp_req = true; 2885 r_cleanup_to_tgt_rsp_srcid = r_cleanup_write_srcid.read(); 2886 r_cleanup_to_tgt_rsp_trdid = r_cleanup_write_trdid.read(); 2887 r_cleanup_to_tgt_rsp_pktid = r_cleanup_write_pktid.read(); 2888 r_cleanup_fsm = CLEANUP_RSP; 2889 } 2890 break; 2891 } 2892 ///////////////// 2893 case CLEANUP_RSP: 2894 { 2895 if(p_vci_tgt_cleanup.rspack) 2896 r_cleanup_fsm = CLEANUP_IDLE; 2897 break; 2898 } 2899 } // end switch cleanup fsm 2900 2901 2902 //////////////////////////////////////////////////////////////////////////////////// 2903 // LLSC FSM 2904 //////////////////////////////////////////////////////////////////////////////////// 2905 // The LLSC FSM handles the LL & SC atomic access. 2906 // 2907 // For a LL : 2908 // It access the directory to check hit / miss. 2909 // - In case of hit, the LL request is registered in the Atomic Table and the 2910 // response is sent to the requesting processor. 2911 // - In case of miss, the LLSC FSM accesses the transaction table. 2912 // If a read transaction to the XRAM for this line already exists, 2913 // or if the transaction table is full, it returns to IDLE state. 2914 // Otherwise, a new transaction to the XRAM is initiated. 2915 // In both cases, the LL request is not consumed in the FIFO. 2916 // 2917 // For a SC : 2918 // It access the directory to check hit / miss. 2919 // - In case of hit, the Atomic Table is checked and the proper response 2920 // (true or false is sent to the requesting processor. 2921 // - In case of miss, the LLSC FSM accesses the transaction table. 2922 // If a read transaction to the XRAM for this line already exists, 2923 // or if the transaction table is full, it returns to IDLE state. 2924 // Otherwise, a new transaction to the XRAM is initiated. 2925 // In both cases, the SC request is not consumed in the FIFO. 2926 ///////////////////////////////////////////////////////////////////// 2927 2928 switch ( r_llsc_fsm.read() ) { 2929 2930 /////////////// 2931 case LLSC_IDLE: // fill the buffers 2932 { 2933 if( m_cmd_llsc_addr_fifo.rok() ) { 2934 #ifdef LOCK_DEBUG 2935 if(m_cpt_cycles > DEBUG_START_CYCLE){ 2936 std::cout << "SC data : " << m_cmd_llsc_wdata_fifo.read() << std::endl; 2937 std::cout << "SC addr : " << std::hex << m_cmd_llsc_addr_fifo.read() << std::dec << std::endl; 2938 std::cout << "SC cpt : " << r_llsc_cpt.read() << std::endl; 2939 } 2940 #endif 2941 if(m_cmd_llsc_eop_fifo.read()){ 2942 m_cpt_sc++; 2943 r_llsc_fsm = SC_DIR_LOCK; 2944 #ifdef LOCK_DEBUG 2945 if(m_cpt_cycles > DEBUG_START_CYCLE){ 2946 std::cout << "SC eop" << std::endl; 2947 } 2948 #endif 2949 } else { // we keep the last word 2950 cmd_llsc_fifo_get = true; 2951 } 2952 // We fill the two buffers 2953 if(r_llsc_cpt.read() < 2){ 2954 r_llsc_rdata[r_llsc_cpt.read()] = m_cmd_llsc_wdata_fifo.read(); 2955 } 2956 if((r_llsc_cpt.read() == 1) && m_cmd_llsc_eop_fifo.read()) 2957 r_llsc_wdata = m_cmd_llsc_wdata_fifo.read(); 2958 if(r_llsc_cpt.read()>3) 2959 ASSERT(false,"MEMCACHE error : SC too long"); 2960 if(r_llsc_cpt.read()==2){ 2961 r_llsc_wdata = m_cmd_llsc_wdata_fifo.read(); 2962 } 2963 r_llsc_cpt = r_llsc_cpt.read()+1; 2964 } 2965 break; 2966 } 2967 ///////////////// 2968 case SC_DIR_LOCK: 2969 { 2970 if( r_alloc_dir_fsm.read() == ALLOC_DIR_LLSC ) { 2971 size_t way = 0; 2972 DirectoryEntry entry(m_cache_directory.read(m_cmd_llsc_addr_fifo.read(), way)); 2973 r_llsc_is_cnt = entry.is_cnt; 2974 r_llsc_dirty = entry.dirty; 2975 r_llsc_tag = entry.tag; 2976 r_llsc_way = way; 2977 r_llsc_copy = entry.owner.srcid; 2978 #if L1_MULTI_CACHE 2979 r_llsc_copy_cache = entry.owner.cache_id; 2980 #endif 2981 2982 r_llsc_copy_inst = entry.owner.inst; 2983 r_llsc_ptr = entry.ptr; 2984 r_llsc_count = entry.count; 2985 if ( entry.valid ){ 2986 r_llsc_fsm = SC_DIR_HIT_READ; 2987 } 2988 else r_llsc_fsm = LLSC_TRT_LOCK; 2989 } 2990 break; 2991 } 2992 //////////////// 2993 case SC_DIR_HIT_READ: 2994 { 2995 size_t way = r_llsc_way.read(); 2996 size_t set = m_y[(vci_addr_t)(m_cmd_llsc_addr_fifo.read())]; 2997 size_t word = m_x[(vci_addr_t)(m_cmd_llsc_addr_fifo.read())]; 2998 2999 // update directory (lock & dirty bits 3000 DirectoryEntry entry; 3001 entry.valid = true; 3002 entry.is_cnt = r_llsc_is_cnt.read(); 3003 entry.dirty = true; 3004 entry.lock = true; 3005 entry.tag = r_llsc_tag.read(); 3006 entry.owner.srcid = r_llsc_copy.read(); 3007 #if L1_MULTI_CACHE 3008 entry.owner.cache_id = r_llsc_copy_cache.read(); 3009 #endif 3010 entry.owner.inst = r_llsc_copy_inst.read(); 3011 entry.count = r_llsc_count.read(); 3012 entry.ptr = r_llsc_ptr.read(); 3013 m_cache_directory.write(set, way, entry); 3014 3015 // read data in cache 3016 bool ok; 3017 ok = (r_llsc_rdata[0].read() == m_cache_data[way][set][word]); 3018 if(r_llsc_cpt.read()==4) // 64 bits SC 3019 ok &= (r_llsc_rdata[1] == m_cache_data[way][set][word+1]); 3020 3021 #ifdef LOCK_DEBUG 3022 if(m_cpt_cycles > DEBUG_START_CYCLE){ 3023 std::cout << "SC_DIR_HIT_READ ok ? " << ok << std::endl; 3024 if(!ok){ 3025 std::cout << "SC_DIR_HIT_READ cache data 0 : " << m_cache_data[way][set][word] << std::endl; 3026 if(r_llsc_cpt.read()==4) 3027 std::cout << "SC_DIR_HIT_READ rdata 1 : " << m_cache_data[way][set][word+1] << std::endl; 3028 std::cout << "SC_DIR_HIT_READ rdata 0 : " << r_llsc_rdata[0].read() << std::endl; 3029 if(r_llsc_cpt.read()==4) 3030 std::cout << "SC_DIR_HIT_READ rdata 1 : " << r_llsc_rdata[1].read() << std::endl; 3031 std::cout << "SC_DIR_HIT_READ wdata 0 : " << r_llsc_wdata.read() << std::endl; 3032 if(r_llsc_cpt.read()==4) 3033 std::cout << "SC_DIR_HIT_READ wdata 1 : " << m_cmd_llsc_wdata_fifo.read() << std::endl; 3034 } 3035 } 3036 #endif 3037 if(ok){ 3038 /* to avoid livelock, force the atomic access to fail (pseudo-)randomly */ 3039 bool fail = (r_llsc_lfsr % (64) == 0); 3040 r_llsc_lfsr = (r_llsc_lfsr >> 1) ^ ((-(r_llsc_lfsr & 1)) & 0xd0000001); 3041 #ifdef RANDOMIZE_SC 3042 if(fail){ 3043 #else 3044 if(0){ 3045 #endif 3046 r_llsc_fsm = SC_RSP_FALSE; 3047 } else { 3048 if(r_llsc_count.read()) { // Shared line 3049 if(entry.is_cnt) { 3050 r_llsc_fsm = SC_TRT_LOCK; 3051 } else { 3052 if( !r_llsc_to_init_cmd_multi_req.read() && 3053 !r_llsc_to_init_cmd_brdcast_req.read() ) 3054 r_llsc_fsm = SC_UPT_LOCK; 3055 else 3056 r_llsc_fsm = SC_WAIT; 3057 } 3058 } else { 3059 r_llsc_fsm = SC_DIR_HIT_WRITE; 3060 } 3061 } 3062 } else { 3063 r_llsc_fsm = SC_RSP_FALSE; 3064 } 3065 break; 3066 } 3067 //////////////// 3068 case SC_DIR_HIT_WRITE: 3069 { 3070 size_t way = r_llsc_way.read(); 3071 size_t set = m_y[(vci_addr_t)(m_cmd_llsc_addr_fifo.read())]; 3072 size_t word = m_x[(vci_addr_t)(m_cmd_llsc_addr_fifo.read())]; 3073 3074 m_cache_data[way][set][word] = r_llsc_wdata.read(); 3075 if(r_llsc_cpt.read()==4) 3076 m_cache_data[way][set][word+1] = m_cmd_llsc_wdata_fifo.read(); 3077 3078 r_llsc_fsm = SC_RSP_TRUE; 3079 break; 4232 case SC_RSP_FAIL: // request TGT_RSP FSM to send a failure response 4233 { 4234 if( !r_sc_to_tgt_rsp_req ) 4235 { 4236 cmd_sc_fifo_get = true; 4237 r_sc_cpt = 0; 4238 r_sc_to_tgt_rsp_req = true; 4239 r_sc_to_tgt_rsp_data = 1; 4240 r_sc_to_tgt_rsp_srcid = m_cmd_sc_srcid_fifo.read(); 4241 r_sc_to_tgt_rsp_trdid = m_cmd_sc_trdid_fifo.read(); 4242 r_sc_to_tgt_rsp_pktid = m_cmd_sc_pktid_fifo.read(); 4243 r_sc_fsm = SC_IDLE; 4244 4245 #if DEBUG_MEMC_SC 4246 if( m_debug_sc_fsm ) 4247 { 4248 std::cout << " <MEMC.SC_RSP_FAIL> Request TGT_RSP to send a failure response" << std::endl; 4249 } 4250 #endif 4251 } 4252 break; 4253 } 4254 //////////////////// 4255 case SC_RSP_SUCCESS: // request TGT_RSP FSM to send a success response 4256 { 4257 if( !r_sc_to_tgt_rsp_req ) 4258 { 4259 cmd_sc_fifo_get = true; 4260 r_sc_cpt = 0; 4261 r_sc_to_tgt_rsp_req = true; 4262 r_sc_to_tgt_rsp_data = 0; 4263 r_sc_to_tgt_rsp_srcid = m_cmd_sc_srcid_fifo.read(); 4264 r_sc_to_tgt_rsp_trdid = m_cmd_sc_trdid_fifo.read(); 4265 r_sc_to_tgt_rsp_pktid = m_cmd_sc_pktid_fifo.read(); 4266 r_sc_fsm = SC_IDLE; 4267 4268 #if DEBUG_MEMC_SC 4269 if( m_debug_sc_fsm ) 4270 { 4271 std::cout << " <MEMC.SC_RSP_SUCCESS> Request TGT_RSP to send a success response" << std::endl; 4272 } 4273 #endif 4274 } 4275 break; 3080 4276 } 3081 4277 ///////////////////// 3082 case SC_UPT_LOCK: // Try to register the request in Update Table 3083 { 3084 3085 if ( r_alloc_upt_fsm.read() == ALLOC_UPT_LLSC ) { 3086 size_t way = r_llsc_way.read(); 3087 size_t set = m_y[(vci_addr_t)(m_cmd_llsc_addr_fifo.read())]; 3088 size_t word = m_x[(vci_addr_t)(m_cmd_llsc_addr_fifo.read())]; 3089 bool wok = false; 3090 size_t index = 0; 3091 size_t srcid = m_cmd_llsc_srcid_fifo.read(); 3092 size_t trdid = m_cmd_llsc_trdid_fifo.read(); 3093 size_t pktid = m_cmd_llsc_pktid_fifo.read(); 3094 addr_t nline = m_nline[(vci_addr_t)(m_cmd_llsc_addr_fifo.read())]; 3095 size_t nb_copies = r_llsc_count.read(); 3096 3097 wok =m_update_tab.set(true, // it's an update transaction 3098 false, // it's not a broadcast 3099 true, // it needs a response 3100 srcid, 3101 trdid, 3102 pktid, 3103 nline, 3104 nb_copies, 3105 index); 3106 if(wok){ 3107 // write data in cache 3108 m_cache_data[way][set][word] = r_llsc_wdata.read(); 3109 if(r_llsc_cpt.read()==4) 3110 m_cache_data[way][set][word+1] = m_cmd_llsc_wdata_fifo.read(); 3111 } 3112 #ifdef IDEBUG 3113 if(m_cpt_cycles > DEBUG_START_CYCLE){ 3114 if(wok){ 3115 std::cout << sc_time_stamp() << " " << name() << " SC_UPT_LOCK update table : " << std::endl; 3116 m_update_tab.print(); 3117 } 3118 } 3119 #endif 3120 r_llsc_upt_index = index; 3121 // releases the lock protecting the Update Table and the Directory if no entry... 3122 if ( wok ) r_llsc_fsm = SC_HEAP_LOCK; 3123 else r_llsc_fsm = SC_WAIT; 3124 } 3125 break; 4278 case SC_TRT_GET_LOCK: // cache miss : request access to transaction Table 4279 { 4280 if( r_alloc_trt_fsm.read() == ALLOC_TRT_SC ) 4281 { 4282 size_t index = 0; 4283 bool hit_read = m_transaction_tab.hit_read( 4284 m_nline[(vci_addr_t)m_cmd_sc_addr_fifo.read()],index); 4285 bool hit_write = m_transaction_tab.hit_write( 4286 m_nline[(vci_addr_t)m_cmd_sc_addr_fifo.read()]); 4287 bool wok = !m_transaction_tab.full(index); 4288 4289 #if DEBUG_MEMC_SC 4290 if( m_debug_sc_fsm ) 4291 { 4292 std::cout << " <MEMC.SC_TRT_GET_LOCK> Check TRT state" 4293 << " / hit_read = " << hit_read 4294 << " / hit_write = " << hit_write 4295 << " / wok = " << wok 4296 << " / index = " << index << std::endl; 4297 } 4298 #endif 4299 4300 if ( hit_read || !wok || hit_write ) // missing line already requested or no space in TRT 4301 { 4302 r_sc_fsm = SC_WAIT; 4303 } 4304 else 4305 { 4306 r_sc_trt_index = index; 4307 r_sc_fsm = SC_TRT_GET_SET; 4308 } 4309 } 4310 break; 3126 4311 } 3127 4312 //////////////////// 3128 case SC_WAIT: // release all locks 3129 { 3130 r_llsc_fsm = SC_DIR_LOCK; 3131 break; 3132 } 3133 //////////////////// 3134 case SC_HEAP_LOCK: // lock the heap 3135 { 3136 if( r_alloc_heap_fsm.read() == ALLOC_HEAP_LLSC ){ 3137 r_llsc_fsm = SC_UPT_REQ; 3138 } 3139 break; 3140 } 3141 //////////////////// 3142 case SC_UPT_REQ: // Request the update 3143 { 3144 ASSERT((r_alloc_heap_fsm.read() == ALLOC_HEAP_LLSC) 3145 ,"MemCache ERROR : bad HEAP allocation"); 3146 if( !r_llsc_to_init_cmd_multi_req.read() && 3147 !r_llsc_to_init_cmd_brdcast_req.read() ){ 3148 r_llsc_to_init_cmd_brdcast_req = false; 3149 r_llsc_to_init_cmd_trdid = r_llsc_upt_index.read(); 3150 r_llsc_to_init_cmd_nline = m_nline[(vci_addr_t)(m_cmd_llsc_addr_fifo.read())]; 3151 r_llsc_to_init_cmd_index = m_x[(vci_addr_t)(m_cmd_llsc_addr_fifo.read())]; 3152 r_llsc_to_init_cmd_wdata = r_llsc_wdata.read(); 3153 if(r_llsc_cpt.read() == 4){ 3154 r_llsc_to_init_cmd_is_long = true; 3155 r_llsc_to_init_cmd_wdata_high = m_cmd_llsc_wdata_fifo.read(); 3156 } else { 3157 r_llsc_to_init_cmd_is_long = false; 3158 r_llsc_to_init_cmd_wdata_high = 0; 3159 } 3160 3161 // We put the first copy in the fifo 3162 llsc_to_init_cmd_fifo_put = true; 3163 llsc_to_init_cmd_fifo_inst = r_llsc_copy_inst.read(); 3164 llsc_to_init_cmd_fifo_srcid = r_llsc_copy.read(); 3165 #if L1_MULTI_CACHE 3166 llsc_to_init_cmd_fifo_cache_id= r_llsc_copy_cache.read(); 3167 #endif 3168 if(r_llsc_count.read() == 1){ 3169 #ifdef LOCK_DEBUG 3170 if(m_cpt_cycles > DEBUG_START_CYCLE){ 3171 std::cout << "SC_UPT_REQ, only one owner : " << r_llsc_copy.read() << std::endl; 3172 } 3173 #endif 3174 r_llsc_fsm = LLSC_IDLE; 3175 cmd_llsc_fifo_get = true; 3176 r_llsc_to_init_cmd_multi_req = true; 3177 r_llsc_cpt = 0; 3178 } else { 3179 r_llsc_fsm = SC_UPDATE; 3180 } 3181 } 3182 break; 3183 } 3184 ////////////////// 3185 case SC_UPDATE: // send a multi-update request to INIT_CMD fsm 3186 { 3187 ASSERT((r_alloc_heap_fsm.read() == ALLOC_HEAP_LLSC) 3188 ,"MemCache ERROR : bad HEAP allocation"); 3189 HeapEntry entry = m_heap_directory.read(r_llsc_ptr.read()); 3190 llsc_to_init_cmd_fifo_srcid = entry.owner.srcid; 3191 #if L1_MULTI_CACHE 3192 llsc_to_init_cmd_fifo_cache_id = entry.owner.cache_id; 3193 #endif 3194 llsc_to_init_cmd_fifo_inst = entry.owner.inst; 3195 llsc_to_init_cmd_fifo_put = true; 3196 3197 if( m_llsc_to_init_cmd_inst_fifo.wok() ){ 3198 r_llsc_ptr = entry.next; 3199 if( entry.next == r_llsc_ptr.read() ) { // last copy 3200 r_llsc_to_init_cmd_multi_req = true; 3201 r_llsc_fsm = LLSC_IDLE; // Response will be sent after receiving 3202 // all update responses 3203 cmd_llsc_fifo_get = true; 3204 r_llsc_cpt = 0; 3205 } else { 3206 r_llsc_fsm = SC_UPDATE; 3207 } 3208 } else { 3209 r_llsc_fsm = SC_UPDATE; 3210 } 3211 3212 break; 3213 } 3214 ////////////////// 3215 case SC_TRT_LOCK: 3216 { 3217 if( r_alloc_trt_fsm.read() == ALLOC_TRT_LLSC ) { 3218 if( !r_llsc_to_ixr_cmd_req ) { // we can transfer the data to the buffer 3219 size_t way = r_llsc_way.read(); 3220 size_t set = m_y[(vci_addr_t)(m_cmd_llsc_addr_fifo.read())]; 3221 for(size_t i = 0; i<m_words; i++){ 3222 if(i==m_x[(vci_addr_t)m_cmd_llsc_addr_fifo.read()]) { 3223 r_llsc_to_ixr_cmd_data[i] = r_llsc_wdata.read(); 3224 } else { 3225 if((i==(m_x[(vci_addr_t)m_cmd_llsc_addr_fifo.read()]+1)) && // 64 bit SC 3226 (r_llsc_cpt.read()==4)) { 3227 r_llsc_to_ixr_cmd_data[i] = m_cmd_llsc_wdata_fifo.read(); 3228 } else { 3229 r_llsc_to_ixr_cmd_data[i] = m_cache_data[way][set][i]; 3230 } 3231 } 3232 } 3233 size_t wok_index = 0; 3234 bool wok = !m_transaction_tab.full(wok_index); 3235 if ( wok ) { // set a new entry in TRT 3236 r_llsc_trt_index = wok_index; 3237 r_llsc_fsm = SC_INVAL_LOCK; 3238 } else { 3239 r_llsc_fsm = SC_WAIT; 3240 } 3241 } else { 3242 r_llsc_fsm = SC_WAIT; 3243 } 3244 } 3245 break; 3246 } 3247 ////////////////// 3248 case SC_INVAL_LOCK: 3249 { 3250 if ( r_alloc_upt_fsm.read() == ALLOC_UPT_LLSC ) { 3251 bool wok = false; 3252 size_t index = 0; 3253 size_t srcid = m_cmd_llsc_srcid_fifo.read(); 3254 size_t trdid = m_cmd_llsc_trdid_fifo.read(); 3255 size_t pktid = m_cmd_llsc_pktid_fifo.read(); 3256 addr_t nline = m_nline[(vci_addr_t)(m_cmd_llsc_addr_fifo.read())]; 3257 size_t nb_copies = r_llsc_count.read(); 3258 3259 wok =m_update_tab.set(false, // it's an inval transaction 3260 true, // it's a broadcast 3261 true, // it needs a response 3262 srcid, 3263 trdid, 3264 pktid, 3265 nline, 3266 nb_copies, 3267 index); 3268 #ifdef IDEBUG 3269 if(m_cpt_cycles > DEBUG_START_CYCLE){ 3270 if(wok){ 3271 std::cout << sc_time_stamp() << " " << name() << " LLSC_INVAL_LOCK update table : " << std::endl; 3272 m_update_tab.print(); 3273 } 3274 } 3275 #endif 3276 r_llsc_upt_index = index; 3277 // releases the lock protecting Update Table if no entry... 3278 if ( wok ) r_llsc_fsm = SC_DIR_INVAL; 3279 else r_llsc_fsm = SC_WAIT; 3280 } 3281 break; 3282 } 3283 ////////////////// 3284 case SC_DIR_INVAL: 3285 { 3286 if ( (r_alloc_trt_fsm.read() == ALLOC_TRT_LLSC ) && 3287 (r_alloc_upt_fsm.read() == ALLOC_UPT_LLSC ) && 3288 (r_alloc_dir_fsm.read() == ALLOC_DIR_LLSC )) 3289 { 3290 m_transaction_tab.set(r_llsc_trt_index.read(), 3291 false, // write request to XRAM 3292 m_nline[(vci_addr_t)(m_cmd_llsc_addr_fifo.read())], 3293 0, 3294 0, 3295 0, 3296 false, // not a processor read 3297 0, // not a single word 3298 0, // word index 3299 std::vector<be_t>(m_words,0), 3300 std::vector<data_t>(m_words,0)); 3301 #ifdef TDEBUG 3302 if(m_cpt_cycles > DEBUG_START_CYCLE){ 3303 std::cout << sc_time_stamp() << " " << name() << " SC_DIR_INVAL transaction table : " << std::endl; 3304 for(size_t i = 0 ; i < m_transaction_tab.size() ; i++) 3305 m_transaction_tab.print(i); 3306 } 3307 #endif 3308 3309 // invalidate directory entry 3310 DirectoryEntry entry; 3311 entry.valid = false; 3312 entry.dirty = false; 3313 entry.tag = 0; 3314 entry.is_cnt = false; 3315 entry.lock = false; 3316 entry.count = 0; 3317 entry.owner.srcid = 0; 3318 #if L1_MULTI_CACHE 3319 entry.owner.cache_id= 0; 3320 #endif 3321 entry.owner.inst = false; 3322 entry.ptr = 0; 3323 size_t set = m_y[(vci_addr_t)(m_cmd_llsc_addr_fifo.read())]; 3324 size_t way = r_llsc_way.read(); 3325 m_cache_directory.write(set, way, entry); 3326 3327 r_llsc_fsm = SC_INVAL; 3328 } else { 3329 ASSERT(false,"LOCK ERROR in LLSC_FSM, STATE = LLSC_DIR_INVAL"); 3330 } 3331 3332 break; 3333 3334 } 3335 ////////////////// 3336 case SC_INVAL: 3337 { 3338 if ( !r_llsc_to_init_cmd_multi_req.read() && 3339 !r_llsc_to_init_cmd_brdcast_req.read()) { 3340 r_llsc_to_init_cmd_multi_req = false; 3341 r_llsc_to_init_cmd_brdcast_req = true; 3342 r_llsc_to_init_cmd_trdid = r_llsc_upt_index.read(); 3343 r_llsc_to_init_cmd_nline = m_nline[(vci_addr_t)(m_cmd_llsc_addr_fifo.read())]; 3344 r_llsc_to_init_cmd_index = 0; 3345 r_llsc_to_init_cmd_wdata = 0; 3346 3347 r_llsc_fsm = SC_XRAM_SEND; 3348 // all update responses 3349 } 3350 3351 break; 3352 } 3353 ////////////////// 3354 case SC_XRAM_SEND: 3355 { 3356 if ( !r_llsc_to_ixr_cmd_req ) { 3357 r_llsc_to_ixr_cmd_req = true; 3358 r_llsc_to_ixr_cmd_write = true; 3359 r_llsc_to_ixr_cmd_nline = m_nline[(vci_addr_t)(m_cmd_llsc_addr_fifo.read())]; 3360 r_llsc_to_ixr_cmd_trdid = r_llsc_trt_index.read(); 3361 r_llsc_fsm = LLSC_IDLE; 3362 cmd_llsc_fifo_get = true; 3363 r_llsc_cpt = 0; 3364 } else { 3365 ASSERT(false,"MEM_CACHE, LLSC FSM : SC_XRAM_SEND state : the request should not have been previously set"); 3366 } 3367 break; 3368 } 3369 ////////////////// 3370 case SC_RSP_FALSE: 3371 { 3372 if( !r_llsc_to_tgt_rsp_req ) { 3373 cmd_llsc_fifo_get = true; 3374 r_llsc_cpt = 0; 3375 r_llsc_to_tgt_rsp_req = true; 3376 r_llsc_to_tgt_rsp_data = 1; 3377 r_llsc_to_tgt_rsp_srcid = m_cmd_llsc_srcid_fifo.read(); 3378 r_llsc_to_tgt_rsp_trdid = m_cmd_llsc_trdid_fifo.read(); 3379 r_llsc_to_tgt_rsp_pktid = m_cmd_llsc_pktid_fifo.read(); 3380 r_llsc_fsm = LLSC_IDLE; 3381 } 3382 break; 3383 } 3384 ///////////////// 3385 case SC_RSP_TRUE: 3386 { 3387 if( !r_llsc_to_tgt_rsp_req ) { 3388 cmd_llsc_fifo_get = true; 3389 r_llsc_cpt = 0; 3390 r_llsc_to_tgt_rsp_req = true; 3391 r_llsc_to_tgt_rsp_data = 0; 3392 r_llsc_to_tgt_rsp_srcid = m_cmd_llsc_srcid_fifo.read(); 3393 r_llsc_to_tgt_rsp_trdid = m_cmd_llsc_trdid_fifo.read(); 3394 r_llsc_to_tgt_rsp_pktid = m_cmd_llsc_pktid_fifo.read(); 3395 r_llsc_fsm = LLSC_IDLE; 3396 } 3397 break; 3398 } 3399 /////////////////// 3400 case LLSC_TRT_LOCK: // read or write miss : check the Transaction Table 3401 { 3402 if( r_alloc_trt_fsm.read() == ALLOC_TRT_LLSC ) { 3403 size_t index = 0; 3404 bool hit_read = m_transaction_tab.hit_read(m_nline[(vci_addr_t)m_cmd_llsc_addr_fifo.read()],index); 3405 bool hit_write = m_transaction_tab.hit_write(m_nline[(vci_addr_t)m_cmd_llsc_addr_fifo.read()]); 3406 bool wok = !m_transaction_tab.full(index); 3407 3408 if ( hit_read || !wok || hit_write ) { // missing line already requested or no space in TRT 3409 r_llsc_fsm = SC_WAIT; 3410 } else { 3411 r_llsc_trt_index = index; 3412 r_llsc_fsm = LLSC_TRT_SET; 3413 } 3414 } 3415 break; 3416 } 3417 ////////////////// 3418 case LLSC_TRT_SET: // register the XRAM transaction in Transaction Table 3419 { 3420 if( r_alloc_trt_fsm.read() == ALLOC_TRT_LLSC ) { 4313 case SC_TRT_GET_SET: // register the GET transaction in TRT 4314 { 4315 if( r_alloc_trt_fsm.read() == ALLOC_TRT_SC ) 4316 { 3421 4317 std::vector<be_t> be_vector; 3422 4318 std::vector<data_t> data_vector; … … 3429 4325 } 3430 4326 3431 m_transaction_tab.set(r_llsc_trt_index.read(), 3432 true, 3433 m_nline[(vci_addr_t)m_cmd_llsc_addr_fifo.read()], 3434 m_cmd_llsc_srcid_fifo.read(), 3435 m_cmd_llsc_trdid_fifo.read(), 3436 m_cmd_llsc_pktid_fifo.read(), 3437 false, 3438 0, 3439 0, 3440 be_vector, 3441 data_vector); 3442 #ifdef TDEBUG 3443 if(m_cpt_cycles > DEBUG_START_CYCLE){ 3444 std::cout << sc_time_stamp() << " " << name() << " LLSC_TRT_SET transaction table : " << std::endl; 3445 for(size_t i = 0 ; i < m_transaction_tab.size() ; i++) 3446 m_transaction_tab.print(i); 3447 } 3448 #endif 3449 3450 r_llsc_fsm = LLSC_XRAM_REQ; 3451 } 3452 break; 3453 } 3454 /////////////////// 3455 case LLSC_XRAM_REQ: // request the IXR_CMD FSM to fetch the missing line 3456 { 3457 if ( !r_llsc_to_ixr_cmd_req ) { 3458 r_llsc_to_ixr_cmd_req = true; 3459 r_llsc_to_ixr_cmd_write = false; 3460 r_llsc_to_ixr_cmd_trdid = r_llsc_trt_index.read(); 3461 r_llsc_to_ixr_cmd_nline = m_nline[(vci_addr_t)m_cmd_llsc_addr_fifo.read()]; 3462 r_llsc_fsm = SC_WAIT; 3463 } 3464 break; 3465 } 3466 } // end switch r_llsc_fsm 4327 m_transaction_tab.set(r_sc_trt_index.read(), 4328 true, // read request 4329 m_nline[(vci_addr_t)m_cmd_sc_addr_fifo.read()], 4330 m_cmd_sc_srcid_fifo.read(), 4331 m_cmd_sc_trdid_fifo.read(), 4332 m_cmd_sc_pktid_fifo.read(), 4333 false, // write request from processor 4334 0, 4335 0, 4336 be_vector, 4337 data_vector); 4338 r_sc_fsm = SC_TRT_GET_REQ; 4339 4340 #if DEBUG_MEMC_SC 4341 if( m_debug_sc_fsm ) 4342 { 4343 std::cout << " <MEMC.SC_TRT_GET_SET> Register a GET transaction in TRT" << std::hex 4344 << " / nline = " << m_nline[(vci_addr_t)m_cmd_sc_addr_fifo.read()] 4345 << " / trt_index = " << r_sc_trt_index.read() << std::endl; 4346 } 4347 #endif 4348 } 4349 break; 4350 } 4351 //////////////////// 4352 case SC_TRT_GET_REQ: // request the IXR_CMD FSM to fetch the missing line 4353 { 4354 if ( !r_sc_to_ixr_cmd_req ) 4355 { 4356 r_sc_to_ixr_cmd_req = true; 4357 r_sc_to_ixr_cmd_write = false; 4358 r_sc_to_ixr_cmd_trdid = r_sc_trt_index.read(); 4359 r_sc_to_ixr_cmd_nline = m_nline[(vci_addr_t)m_cmd_sc_addr_fifo.read()]; 4360 r_sc_fsm = SC_WAIT; 4361 4362 #if DEBUG_MEMC_SC 4363 if( m_debug_sc_fsm ) 4364 { 4365 std::cout << " <MEMC.SC_TRT_GET_REQ> Request a GET transaction to IXR_CMD FSM" << std::hex 4366 << " / nline = " << m_nline[(vci_addr_t)m_cmd_sc_addr_fifo.read()] 4367 << " / trt_index = " << r_sc_trt_index.read() << std::endl; 4368 } 4369 #endif 4370 } 4371 break; 4372 } 4373 } // end switch r_sc_fsm 3467 4374 3468 4375 … … 3472 4379 // The INIT_CMD fsm controls the VCI CMD initiator port, used to update 3473 4380 // or invalidate cache lines in L1 caches. 3474 // It implements a round-robin priority between the two following requests: 3475 // - r_write_to_init_cmd_req : update request from WRITE FSM 3476 // - r_xram_rsp_to_init_cmd_req : invalidate request from XRAM_RSP FSM 3477 // The inval request is a single cell VCI write command containing the 4381 // 4382 // It implements a round-robin priority between the three possible client FSMs 4383 // XRAM_RSP, WRITE and SC. Each FSM can request two types of services: 4384 // - r_xram_rsp_to_init_cmd_multi_req : multi-inval 4385 // r_xram_rsp_to_init_cmd_brdcast_req : broadcast-inval 4386 // - r_write_to_init_cmd_multi_req : multi-update 4387 // r_write_to_init_cmd_brdcast_req : broadcast-inval 4388 // - r_sc_to_init_cmd_multi_req : multi-update 4389 // r_sc_to_init_cmd_brdcast_req : broadcast-inval 4390 // 4391 // An inval request is a single cell VCI write command containing the 3478 4392 // index of the line to be invalidated. 3479 // Theupdate request is a multi-cells VCI write command : The first cell4393 // An update request is a multi-cells VCI write command : The first cell 3480 4394 // contains the index of the cache line to be updated. The second cell contains 3481 4395 // the index of the first modified word in the line. The following cells … … 3483 4397 /////////////////////////////////////////////////////////////////////////////// 3484 4398 3485 switch ( r_init_cmd_fsm.read() ) { 3486 3487 //////////////////////// 3488 case INIT_CMD_UPDT_IDLE: // Invalidate requests have highest priority 3489 { 3490 3491 if ( m_xram_rsp_to_init_cmd_inst_fifo.rok() || 3492 r_xram_rsp_to_init_cmd_multi_req.read() ) { 3493 r_init_cmd_fsm = INIT_CMD_INVAL_NLINE; 3494 m_cpt_inval++; 3495 } else if ( r_xram_rsp_to_init_cmd_brdcast_req.read() ) { 3496 r_init_cmd_fsm = INIT_CMD_XRAM_BRDCAST; 3497 m_cpt_inval++; 3498 } else if ( m_write_to_init_cmd_inst_fifo.rok() || 3499 r_write_to_init_cmd_multi_req.read() ) { 3500 r_init_cmd_fsm = INIT_CMD_UPDT_NLINE; 3501 m_cpt_update++; 3502 } else if ( r_write_to_init_cmd_brdcast_req.read() ){ 3503 r_init_cmd_fsm = INIT_CMD_WRITE_BRDCAST; 3504 m_cpt_inval++; 3505 } else if ( m_llsc_to_init_cmd_inst_fifo.rok() || 3506 r_llsc_to_init_cmd_multi_req.read() ) { 3507 r_init_cmd_fsm = INIT_CMD_SC_UPDT_NLINE; 3508 m_cpt_update++; 3509 } else if( r_llsc_to_init_cmd_brdcast_req.read() ){ 3510 r_init_cmd_fsm = INIT_CMD_SC_BRDCAST; 3511 m_cpt_inval++; 3512 } 3513 break; 4399 switch ( r_init_cmd_fsm.read() ) 4400 { 4401 //////////////////////// 4402 case INIT_CMD_UPDT_IDLE: // XRAM_RSP FSM has highest priority 4403 { 4404 if ( m_xram_rsp_to_init_cmd_inst_fifo.rok() || 4405 r_xram_rsp_to_init_cmd_multi_req.read() ) 4406 { 4407 r_init_cmd_fsm = INIT_CMD_INVAL_NLINE; 4408 m_cpt_inval++; 4409 } 4410 else if ( r_xram_rsp_to_init_cmd_brdcast_req.read() ) 4411 { 4412 r_init_cmd_fsm = INIT_CMD_XRAM_BRDCAST; 4413 m_cpt_inval++; 4414 } 4415 else if ( m_write_to_init_cmd_inst_fifo.rok() || 4416 r_write_to_init_cmd_multi_req.read() ) 4417 { 4418 r_init_cmd_fsm = INIT_CMD_UPDT_NLINE; 4419 m_cpt_update++; 4420 } 4421 else if ( r_write_to_init_cmd_brdcast_req.read() ) 4422 { 4423 r_init_cmd_fsm = INIT_CMD_WRITE_BRDCAST; 4424 m_cpt_inval++; 4425 } 4426 else if ( m_sc_to_init_cmd_inst_fifo.rok() || 4427 r_sc_to_init_cmd_multi_req.read() ) 4428 { 4429 r_init_cmd_fsm = INIT_CMD_SC_UPDT_NLINE; 4430 m_cpt_update++; 4431 } 4432 else if( r_sc_to_init_cmd_brdcast_req.read() ) 4433 { 4434 r_init_cmd_fsm = INIT_CMD_SC_BRDCAST; 4435 m_cpt_inval++; 4436 } 4437 break; 3514 4438 } 3515 4439 ///////////////////////// 3516 case INIT_CMD_INVAL_IDLE: // Update requests have highest priority 3517 { 3518 if ( m_write_to_init_cmd_inst_fifo.rok() || 3519 r_write_to_init_cmd_multi_req.read() ) { 3520 r_init_cmd_fsm = INIT_CMD_UPDT_NLINE; 3521 m_cpt_update++; 3522 } else if ( r_write_to_init_cmd_brdcast_req.read() ){ 3523 r_init_cmd_fsm = INIT_CMD_WRITE_BRDCAST; 3524 m_cpt_inval++; 3525 } else if ( m_llsc_to_init_cmd_inst_fifo.rok() || 3526 r_llsc_to_init_cmd_multi_req.read() ) { 3527 r_init_cmd_fsm = INIT_CMD_SC_UPDT_NLINE; 3528 m_cpt_update++; 3529 } else if( r_llsc_to_init_cmd_brdcast_req.read() ){ 3530 r_init_cmd_fsm = INIT_CMD_SC_BRDCAST; 3531 m_cpt_inval++; 3532 } else if ( m_xram_rsp_to_init_cmd_inst_fifo.rok() || 3533 r_xram_rsp_to_init_cmd_multi_req.read() ) { 3534 r_init_cmd_fsm = INIT_CMD_INVAL_NLINE; 3535 m_cpt_inval++; 3536 } else if ( r_xram_rsp_to_init_cmd_brdcast_req.read() ) { 3537 r_init_cmd_fsm = INIT_CMD_XRAM_BRDCAST; 3538 m_cpt_inval++; 3539 } 3540 break; 4440 case INIT_CMD_INVAL_IDLE: // WRITE FSM has highest priority 4441 { 4442 if ( m_write_to_init_cmd_inst_fifo.rok() || 4443 r_write_to_init_cmd_multi_req.read() ) 4444 { 4445 r_init_cmd_fsm = INIT_CMD_UPDT_NLINE; 4446 m_cpt_update++; 4447 } 4448 else if ( r_write_to_init_cmd_brdcast_req.read() ) 4449 { 4450 r_init_cmd_fsm = INIT_CMD_WRITE_BRDCAST; 4451 m_cpt_inval++; 4452 } 4453 else if ( m_sc_to_init_cmd_inst_fifo.rok() || 4454 r_sc_to_init_cmd_multi_req.read() ) 4455 { 4456 r_init_cmd_fsm = INIT_CMD_SC_UPDT_NLINE; 4457 m_cpt_update++; 4458 } 4459 else if( r_sc_to_init_cmd_brdcast_req.read() ) 4460 { 4461 r_init_cmd_fsm = INIT_CMD_SC_BRDCAST; 4462 m_cpt_inval++; 4463 } 4464 else if ( m_xram_rsp_to_init_cmd_inst_fifo.rok() || 4465 r_xram_rsp_to_init_cmd_multi_req.read() ) 4466 { 4467 r_init_cmd_fsm = INIT_CMD_INVAL_NLINE; 4468 m_cpt_inval++; 4469 } 4470 else if ( r_xram_rsp_to_init_cmd_brdcast_req.read() ) 4471 { 4472 r_init_cmd_fsm = INIT_CMD_XRAM_BRDCAST; 4473 m_cpt_inval++; 4474 } 4475 break; 4476 } 4477 ////////////////////////// 4478 case INIT_CMD_SC_UPDT_IDLE: // SC FSM has highest priority 4479 { 4480 if ( m_sc_to_init_cmd_inst_fifo.rok() || 4481 r_sc_to_init_cmd_multi_req.read() ) 4482 { 4483 r_init_cmd_fsm = INIT_CMD_SC_UPDT_NLINE; 4484 m_cpt_update++; 4485 } 4486 else if( r_sc_to_init_cmd_brdcast_req.read() ) 4487 { 4488 r_init_cmd_fsm = INIT_CMD_SC_BRDCAST; 4489 m_cpt_inval++; 4490 } 4491 else if ( m_xram_rsp_to_init_cmd_inst_fifo.rok() || 4492 r_xram_rsp_to_init_cmd_multi_req.read() ) 4493 { 4494 r_init_cmd_fsm = INIT_CMD_INVAL_NLINE; 4495 m_cpt_inval++; 4496 } 4497 else if ( r_xram_rsp_to_init_cmd_brdcast_req.read() ) 4498 { 4499 r_init_cmd_fsm = INIT_CMD_XRAM_BRDCAST; 4500 m_cpt_inval++; 4501 } 4502 else if ( m_write_to_init_cmd_inst_fifo.rok() || 4503 r_write_to_init_cmd_multi_req.read() ) 4504 { 4505 r_init_cmd_fsm = INIT_CMD_UPDT_NLINE; 4506 m_cpt_update++; 4507 } 4508 else if ( r_write_to_init_cmd_brdcast_req.read() ) 4509 { 4510 r_init_cmd_fsm = INIT_CMD_WRITE_BRDCAST; 4511 m_cpt_inval++; 4512 } 4513 break; 4514 } 4515 ////////////////////////// 4516 case INIT_CMD_INVAL_NLINE: // send a multi-inval (from XRAM_RSP) 4517 { 4518 if ( m_xram_rsp_to_init_cmd_inst_fifo.rok() ) 4519 { 4520 if ( p_vci_ini.cmdack ) 4521 { 4522 m_cpt_inval_mult++; 4523 r_init_cmd_fsm = INIT_CMD_INVAL_NLINE; 4524 xram_rsp_to_init_cmd_fifo_get = true; 4525 } 4526 } 4527 else 4528 { 4529 if( r_xram_rsp_to_init_cmd_multi_req.read() ) r_xram_rsp_to_init_cmd_multi_req = false; 4530 r_init_cmd_fsm = INIT_CMD_INVAL_IDLE; 4531 } 4532 break; 4533 } 4534 /////////////////////////// 4535 case INIT_CMD_XRAM_BRDCAST: // send a broadcast-inval (from XRAM_RSP) 4536 { 4537 if ( p_vci_ini.cmdack ) 4538 { 4539 m_cpt_inval_brdcast++; 4540 r_init_cmd_fsm = INIT_CMD_INVAL_IDLE; 4541 r_xram_rsp_to_init_cmd_brdcast_req = false; 4542 } 4543 break; 4544 } 4545 //////////////////////////// 4546 case INIT_CMD_WRITE_BRDCAST: // send a broadcast-inval (from WRITE FSM) 4547 { 4548 if( p_vci_ini.cmdack ) 4549 { 4550 4551 #if DEBUG_MEMC_INIT_CMD 4552 if( m_debug_init_cmd_fsm ) 4553 { 4554 std::cout << " <MEMC.INIT_CMD_WRITE_BRDCAST> Broadcast-Inval for line " 4555 << r_write_to_init_cmd_nline.read() << std::endl; 4556 } 4557 #endif 4558 m_cpt_inval_brdcast++; 4559 r_write_to_init_cmd_brdcast_req = false; 4560 r_init_cmd_fsm = INIT_CMD_UPDT_IDLE; 4561 } 4562 break; 3541 4563 } 3542 4564 ///////////////////////// 3543 case INIT_CMD_SC_UPDT_IDLE: // Update requests for SCs have highest priority 3544 { 3545 if ( m_llsc_to_init_cmd_inst_fifo.rok() || 3546 r_llsc_to_init_cmd_multi_req.read() ) { 3547 r_init_cmd_fsm = INIT_CMD_SC_UPDT_NLINE; 3548 m_cpt_update++; 3549 } else if( r_llsc_to_init_cmd_brdcast_req.read() ){ 3550 r_init_cmd_fsm = INIT_CMD_SC_BRDCAST; 3551 m_cpt_inval++; 3552 } else if ( m_xram_rsp_to_init_cmd_inst_fifo.rok() || 3553 r_xram_rsp_to_init_cmd_multi_req.read() ) { 3554 r_init_cmd_fsm = INIT_CMD_INVAL_NLINE; 3555 m_cpt_inval++; 3556 } else if ( r_xram_rsp_to_init_cmd_brdcast_req.read() ) { 3557 r_init_cmd_fsm = INIT_CMD_XRAM_BRDCAST; 3558 m_cpt_inval++; 3559 } else if ( m_write_to_init_cmd_inst_fifo.rok() || 3560 r_write_to_init_cmd_multi_req.read() ) { 3561 r_init_cmd_fsm = INIT_CMD_UPDT_NLINE; 3562 m_cpt_update++; 3563 } else if ( r_write_to_init_cmd_brdcast_req.read() ){ 3564 r_init_cmd_fsm = INIT_CMD_WRITE_BRDCAST; 3565 m_cpt_inval++; 3566 } 3567 break; 4565 case INIT_CMD_UPDT_NLINE: // send nline for a multi-update (from WRITE FSM) 4566 { 4567 if ( m_write_to_init_cmd_inst_fifo.rok() ) 4568 { 4569 if ( p_vci_ini.cmdack ) 4570 { 4571 m_cpt_update_mult++; 4572 r_init_cmd_fsm = INIT_CMD_UPDT_INDEX; 4573 // write_to_init_cmd_fifo_get = true; 4574 } 4575 } 4576 else 4577 { 4578 if ( r_write_to_init_cmd_multi_req.read() ) r_write_to_init_cmd_multi_req = false; 4579 r_init_cmd_fsm = INIT_CMD_UPDT_IDLE; 4580 } 4581 break; 4582 } 4583 ///////////////////////// 4584 case INIT_CMD_UPDT_INDEX: // send word index for a multi-update (from WRITE FSM) 4585 { 4586 r_init_cmd_cpt = 0; 4587 if ( p_vci_ini.cmdack ) r_init_cmd_fsm = INIT_CMD_UPDT_DATA; 4588 break; 3568 4589 } 3569 4590 //////////////////////// 3570 case INIT_CMD_INVAL_NLINE: // send the cache line index 3571 { 3572 if ( m_xram_rsp_to_init_cmd_inst_fifo.rok() ){ 3573 if ( p_vci_ini.cmdack ) { 3574 m_cpt_inval_mult++; 3575 r_init_cmd_fsm = INIT_CMD_INVAL_NLINE; 3576 xram_rsp_to_init_cmd_fifo_get = true; 3577 } 3578 } else { 3579 if( r_xram_rsp_to_init_cmd_multi_req.read() ){ 3580 r_xram_rsp_to_init_cmd_multi_req = false; 4591 case INIT_CMD_UPDT_DATA: // send the data for a multi-update (from WRITE FSM) 4592 { 4593 if ( p_vci_ini.cmdack ) 4594 { 4595 if ( r_init_cmd_cpt.read() == (r_write_to_init_cmd_count.read()-1) ) 4596 { 4597 r_init_cmd_fsm = INIT_CMD_UPDT_NLINE; 4598 write_to_init_cmd_fifo_get = true; 4599 } 4600 else 4601 { 4602 r_init_cmd_cpt = r_init_cmd_cpt.read() + 1; 4603 } 4604 } 4605 break; 4606 } 4607 ///////////////////////// 4608 case INIT_CMD_SC_BRDCAST: // send a broadcast-inval (from SC FSM) 4609 { 4610 if( p_vci_ini.cmdack ) 4611 { 4612 m_cpt_inval_brdcast++; 4613 r_sc_to_init_cmd_brdcast_req = false; 4614 r_init_cmd_fsm = INIT_CMD_SC_UPDT_IDLE; 4615 } 4616 break; 4617 } 4618 //////////////////////////// 4619 case INIT_CMD_SC_UPDT_NLINE: // send nline for a multi-update (from SC FSM) 4620 { 4621 if ( m_sc_to_init_cmd_inst_fifo.rok() ) 4622 { 4623 if ( p_vci_ini.cmdack ) 4624 { 4625 m_cpt_update_mult++; 4626 r_init_cmd_fsm = INIT_CMD_SC_UPDT_INDEX; 4627 } 3581 4628 } 3582 r_init_cmd_fsm = INIT_CMD_INVAL_IDLE; 3583 } 3584 break; 4629 else 4630 { 4631 if( r_sc_to_init_cmd_multi_req.read() ) r_sc_to_init_cmd_multi_req = false; 4632 r_init_cmd_fsm = INIT_CMD_SC_UPDT_IDLE; 4633 } 4634 break; 4635 } 4636 //////////////////////////// 4637 case INIT_CMD_SC_UPDT_INDEX: // send word index for a multi-update (from SC FSM) 4638 { 4639 if ( p_vci_ini.cmdack ) r_init_cmd_fsm = INIT_CMD_SC_UPDT_DATA; 4640 break; 4641 } 4642 /////////////////////////// 4643 case INIT_CMD_SC_UPDT_DATA: // send first data for a multi-update (from SC FSM) 4644 { 4645 if ( p_vci_ini.cmdack ) 4646 { 4647 if ( r_sc_to_init_cmd_is_long.read() ) 4648 { 4649 r_init_cmd_fsm = INIT_CMD_SC_UPDT_DATA_HIGH; 4650 } 4651 else 4652 { 4653 sc_to_init_cmd_fifo_get = true; 4654 r_init_cmd_fsm = INIT_CMD_SC_UPDT_NLINE; 4655 } 4656 } 4657 break; 3585 4658 } 3586 4659 //////////////////////// 3587 case INIT_CMD_XRAM_BRDCAST: // send the cache line index 3588 { 3589 if ( p_vci_ini.cmdack ) { 3590 m_cpt_inval_brdcast++; 3591 r_init_cmd_fsm = INIT_CMD_INVAL_IDLE; 3592 r_xram_rsp_to_init_cmd_brdcast_req = false; 3593 } 3594 break; 3595 } 3596 ///////////////////////// 3597 case INIT_CMD_WRITE_BRDCAST: 3598 { 3599 if( p_vci_ini.cmdack ) { 3600 m_cpt_inval_brdcast++; 3601 r_write_to_init_cmd_brdcast_req = false; 3602 r_init_cmd_fsm = INIT_CMD_UPDT_IDLE; 3603 } 3604 break; 3605 } 3606 ///////////////////////// 3607 case INIT_CMD_UPDT_NLINE: // send the cache line index 3608 { 3609 if ( m_write_to_init_cmd_inst_fifo.rok() ) { 3610 if ( p_vci_ini.cmdack ){ 3611 m_cpt_update_mult++; 3612 r_init_cmd_fsm = INIT_CMD_UPDT_INDEX; 3613 } 3614 } else { 3615 if ( r_write_to_init_cmd_multi_req.read() ){ 3616 r_write_to_init_cmd_multi_req = false; 3617 } 3618 r_init_cmd_fsm = INIT_CMD_UPDT_IDLE; 3619 } 3620 break; 3621 } 3622 ///////////////////////// 3623 case INIT_CMD_UPDT_INDEX: // send the first word index 3624 { 3625 r_init_cmd_cpt = 0; 3626 if ( p_vci_ini.cmdack ) r_init_cmd_fsm = INIT_CMD_UPDT_DATA; 3627 break; 3628 } 3629 //////////////////////// 3630 case INIT_CMD_UPDT_DATA: // send the data 3631 { 3632 if ( p_vci_ini.cmdack ) { 3633 if ( r_init_cmd_cpt.read() == (r_write_to_init_cmd_count.read()-1) ) { 3634 r_init_cmd_fsm = INIT_CMD_UPDT_NLINE; 3635 write_to_init_cmd_fifo_get = true; 3636 } else { 3637 r_init_cmd_cpt = r_init_cmd_cpt.read() + 1; 3638 } 3639 } 3640 break; 3641 } 3642 ///////////////////////// 3643 case INIT_CMD_SC_BRDCAST: 3644 { 3645 if( p_vci_ini.cmdack ) { 3646 m_cpt_inval_brdcast++; 3647 r_llsc_to_init_cmd_brdcast_req = false; 3648 r_init_cmd_fsm = INIT_CMD_SC_UPDT_IDLE; 3649 } 3650 break; 3651 } 3652 ///////////////////////// 3653 case INIT_CMD_SC_UPDT_NLINE: // send the cache line index 3654 { 3655 if ( m_llsc_to_init_cmd_inst_fifo.rok() ){ 3656 if ( p_vci_ini.cmdack ){ 3657 m_cpt_update_mult++; 3658 r_init_cmd_fsm = INIT_CMD_SC_UPDT_INDEX; 3659 } 3660 } else { 3661 if( r_llsc_to_init_cmd_multi_req.read() ){ 3662 r_llsc_to_init_cmd_multi_req = false; 3663 } 3664 r_init_cmd_fsm = INIT_CMD_SC_UPDT_IDLE; 3665 } 3666 break; 3667 } 3668 ///////////////////////// 3669 case INIT_CMD_SC_UPDT_INDEX: // send the first word index 3670 { 3671 if ( p_vci_ini.cmdack ) r_init_cmd_fsm = INIT_CMD_SC_UPDT_DATA; 3672 break; 3673 } 3674 //////////////////////// 3675 case INIT_CMD_SC_UPDT_DATA: // send the data 3676 { 3677 if ( p_vci_ini.cmdack ) { 3678 if(r_llsc_to_init_cmd_is_long.read()){ 3679 r_init_cmd_fsm = INIT_CMD_SC_UPDT_DATA_HIGH; 3680 } else { 3681 llsc_to_init_cmd_fifo_get = true; 4660 case INIT_CMD_SC_UPDT_DATA_HIGH: // send second data for a multi-update (from SC FSM) 4661 { 4662 if ( p_vci_ini.cmdack ) 4663 { 4664 sc_to_init_cmd_fifo_get = true; 3682 4665 r_init_cmd_fsm = INIT_CMD_SC_UPDT_NLINE; 3683 4666 } 3684 } 3685 break; 3686 } 3687 //////////////////////// 3688 case INIT_CMD_SC_UPDT_DATA_HIGH: // send the data upper 3689 { 3690 if ( p_vci_ini.cmdack ) { 3691 llsc_to_init_cmd_fifo_get = true; 3692 r_init_cmd_fsm = INIT_CMD_SC_UPDT_NLINE; 3693 } 3694 break; 3695 } 3696 3697 4667 break; 4668 } 3698 4669 } // end switch r_init_cmd_fsm 3699 4670 … … 3705 4676 // - r_read_to_tgt_rsp_req 3706 4677 // - r_write_to_tgt_rsp_req 3707 // - r_ llsc_to_tgt_rsp_req4678 // - r_sc_to_tgt_rsp_req 3708 4679 // - r_cleanup_to_tgt_rsp_req 4680 // - r_xram_rsp_to_tgt_rsp_req 3709 4681 // - r_init_rsp_to_tgt_rsp_req 3710 // - r_xram_rsp_to_tgt_rsp_req 3711 // The ordering is : read > write > llsc > cleanup > xram > init 4682 // The ordering is : read > write > sc > xram > init > cleanup 3712 4683 ///////////////////////////////////////////////////////////////////// 3713 4684 3714 switch ( r_tgt_rsp_fsm.read() ) {3715 3716 ///////////////////////3717 case TGT_RSP_READ_IDLE: // write requests have the highest priority4685 switch ( r_tgt_rsp_fsm.read() ) 4686 { 4687 /////////////////////// 4688 case TGT_RSP_READ_IDLE: // write requests have the highest priority 3718 4689 { 3719 4690 if ( r_write_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_WRITE; 3720 else if ( r_llsc_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_LLSC; 3721 else if ( r_xram_rsp_to_tgt_rsp_req ) { 4691 else if ( r_sc_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_SC; 4692 else if ( r_xram_rsp_to_tgt_rsp_req ) 4693 { 3722 4694 r_tgt_rsp_fsm = TGT_RSP_XRAM; 3723 4695 r_tgt_rsp_cpt = r_xram_rsp_to_tgt_rsp_word.read(); … … 3725 4697 else if ( r_init_rsp_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_INIT; 3726 4698 else if ( r_cleanup_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_CLEANUP; 3727 else if ( r_read_to_tgt_rsp_req ) { 4699 else if ( r_read_to_tgt_rsp_req ) 4700 { 3728 4701 r_tgt_rsp_fsm = TGT_RSP_READ; 3729 4702 r_tgt_rsp_cpt = r_read_to_tgt_rsp_word.read(); … … 3732 4705 } 3733 4706 //////////////////////// 3734 case TGT_RSP_WRITE_IDLE: // llsc requests have the highest priority 3735 { 3736 if ( r_llsc_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_LLSC; 3737 else if ( r_xram_rsp_to_tgt_rsp_req ) { 4707 case TGT_RSP_WRITE_IDLE: // sc requests have the highest priority 4708 { 4709 if ( r_sc_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_SC; 4710 else if ( r_xram_rsp_to_tgt_rsp_req ) 4711 { 3738 4712 r_tgt_rsp_fsm = TGT_RSP_XRAM; 3739 4713 r_tgt_rsp_cpt = r_xram_rsp_to_tgt_rsp_word.read(); … … 3741 4715 else if ( r_init_rsp_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_INIT; 3742 4716 else if ( r_cleanup_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_CLEANUP; 3743 else if ( r_read_to_tgt_rsp_req ) { 4717 else if ( r_read_to_tgt_rsp_req ) 4718 { 3744 4719 r_tgt_rsp_fsm = TGT_RSP_READ; 3745 4720 r_tgt_rsp_cpt = r_read_to_tgt_rsp_word.read(); … … 3750 4725 } 3751 4726 /////////////////////// 3752 case TGT_RSP_LLSC_IDLE: // cleanup requests have the highest priority 3753 { 3754 if ( r_xram_rsp_to_tgt_rsp_req ) { 4727 case TGT_RSP_SC_IDLE: // xram_rsp requests have the highest priority 4728 { 4729 if ( r_xram_rsp_to_tgt_rsp_req ) 4730 { 3755 4731 r_tgt_rsp_fsm = TGT_RSP_XRAM; 3756 4732 r_tgt_rsp_cpt = r_xram_rsp_to_tgt_rsp_word.read(); … … 3758 4734 else if ( r_init_rsp_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_INIT; 3759 4735 else if ( r_cleanup_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_CLEANUP; 3760 else if ( r_read_to_tgt_rsp_req ) { 4736 else if ( r_read_to_tgt_rsp_req ) 4737 { 3761 4738 r_tgt_rsp_fsm = TGT_RSP_READ; 3762 4739 r_tgt_rsp_cpt = r_read_to_tgt_rsp_word.read(); 3763 4740 } 3764 4741 else if ( r_write_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_WRITE; 3765 else if ( r_ llsc_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_LLSC;4742 else if ( r_sc_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_SC; 3766 4743 break; 3767 4744 } 3768 case TGT_RSP_XRAM_IDLE: // init requests have the highest priority 4745 /////////////////////// 4746 case TGT_RSP_XRAM_IDLE: // init requests have the highest priority 3769 4747 { 3770 4748 3771 4749 if ( r_init_rsp_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_INIT; 3772 4750 else if ( r_cleanup_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_CLEANUP; 3773 else if ( r_read_to_tgt_rsp_req ) { 4751 else if ( r_read_to_tgt_rsp_req ) 4752 { 3774 4753 r_tgt_rsp_fsm = TGT_RSP_READ; 3775 4754 r_tgt_rsp_cpt = r_read_to_tgt_rsp_word.read(); 3776 4755 } 3777 4756 else if ( r_write_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_WRITE; 3778 else if ( r_llsc_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_LLSC; 3779 else if ( r_xram_rsp_to_tgt_rsp_req ) { 4757 else if ( r_sc_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_SC; 4758 else if ( r_xram_rsp_to_tgt_rsp_req ) 4759 { 3780 4760 r_tgt_rsp_fsm = TGT_RSP_XRAM; 3781 4761 r_tgt_rsp_cpt = r_xram_rsp_to_tgt_rsp_word.read(); … … 3784 4764 } 3785 4765 /////////////////////// 3786 case TGT_RSP_INIT_IDLE: // cleanup requests have the highest priority4766 case TGT_RSP_INIT_IDLE: // cleanup requests have the highest priority 3787 4767 { 3788 4768 if ( r_cleanup_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_CLEANUP; 3789 else if ( r_read_to_tgt_rsp_req ) { 4769 else if ( r_read_to_tgt_rsp_req ) 4770 { 3790 4771 r_tgt_rsp_fsm = TGT_RSP_READ; 3791 4772 r_tgt_rsp_cpt = r_read_to_tgt_rsp_word.read(); 3792 4773 } 3793 4774 else if ( r_write_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_WRITE; 3794 else if ( r_llsc_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_LLSC; 3795 else if ( r_xram_rsp_to_tgt_rsp_req ) { 4775 else if ( r_sc_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_SC; 4776 else if ( r_xram_rsp_to_tgt_rsp_req ) 4777 { 3796 4778 r_tgt_rsp_fsm = TGT_RSP_XRAM; 3797 4779 r_tgt_rsp_cpt = r_xram_rsp_to_tgt_rsp_word.read(); … … 3801 4783 } 3802 4784 /////////////////////// 3803 case TGT_RSP_CLEANUP_IDLE: // read requests have the highest priority 3804 { 3805 if ( r_read_to_tgt_rsp_req ) { 4785 case TGT_RSP_CLEANUP_IDLE: // read requests have the highest priority 4786 { 4787 if ( r_read_to_tgt_rsp_req ) 4788 { 3806 4789 r_tgt_rsp_fsm = TGT_RSP_READ; 3807 4790 r_tgt_rsp_cpt = r_read_to_tgt_rsp_word.read(); 3808 4791 } 3809 4792 else if ( r_write_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_WRITE; 3810 else if ( r_llsc_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_LLSC; 3811 else if ( r_xram_rsp_to_tgt_rsp_req ) { 4793 else if ( r_sc_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_SC; 4794 else if ( r_xram_rsp_to_tgt_rsp_req ) 4795 { 3812 4796 r_tgt_rsp_fsm = TGT_RSP_XRAM; 3813 4797 r_tgt_rsp_cpt = r_xram_rsp_to_tgt_rsp_word.read(); … … 3817 4801 break; 3818 4802 } 4803 ////////////////// 4804 case TGT_RSP_READ: // send the response to a read 4805 { 4806 if ( p_vci_tgt.rspack ) 4807 { 4808 4809 #if DEBUG_MEMC_TGT_RSP 4810 if( m_debug_tgt_rsp_fsm ) 4811 { 4812 std::cout << " <MEMC.TGT_RSP_READ> Read response" 4813 << " / rsrcid = " << std::hex << r_read_to_tgt_rsp_srcid.read() 4814 << " / rtrdid = " << r_read_to_tgt_rsp_trdid.read() 4815 << " / rdata = " << r_read_to_tgt_rsp_data[r_tgt_rsp_cpt.read()].read() 4816 << " / cpt = " << std::dec << r_tgt_rsp_cpt.read() << std::endl; 4817 } 4818 #endif 4819 if ( r_tgt_rsp_cpt.read() == (r_read_to_tgt_rsp_word.read()+r_read_to_tgt_rsp_length-1) ) 4820 { 4821 r_tgt_rsp_fsm = TGT_RSP_READ_IDLE; 4822 r_read_to_tgt_rsp_req = false; 4823 } 4824 else 4825 { 4826 r_tgt_rsp_cpt = r_tgt_rsp_cpt.read() + 1; 4827 } 4828 } 4829 break; 4830 } 4831 /////////////////// 4832 case TGT_RSP_WRITE: // send the write acknowledge 4833 { 4834 if ( p_vci_tgt.rspack ) 4835 { 4836 4837 #if DEBUG_MEMC_TGT_RSP 4838 if( m_debug_tgt_rsp_fsm ) 4839 { 4840 std::cout << " <MEMC.TGT_RSP_WRITE> Write response" 4841 << " / rsrcid = " << r_write_to_tgt_rsp_srcid.read() 4842 << " / rtrdid = " << r_write_to_tgt_rsp_trdid.read() << std::endl; 4843 } 4844 #endif 4845 r_tgt_rsp_fsm = TGT_RSP_WRITE_IDLE; 4846 r_write_to_tgt_rsp_req = false; 4847 } 4848 break; 4849 } 4850 /////////////////// 4851 case TGT_RSP_CLEANUP: // pas clair pour moi (AG) 4852 { 4853 if ( p_vci_tgt.rspack ) 4854 { 4855 4856 #if DEBUG_MEMC_TGT_RSP 4857 if( m_debug_tgt_rsp_fsm ) 4858 { 4859 std::cout << " <MEMC.TGT_RSP_CLEANUP> Cleanup response" 4860 << " / rsrcid = " << r_cleanup_to_tgt_rsp_srcid.read() 4861 << " / rtrdid = " << r_cleanup_to_tgt_rsp_trdid.read() << std::endl; 4862 } 4863 #endif 4864 r_tgt_rsp_fsm = TGT_RSP_CLEANUP_IDLE; 4865 r_cleanup_to_tgt_rsp_req = false; 4866 } 4867 break; 4868 } 4869 ////////////////// 4870 case TGT_RSP_SC: // send one atomic word response 4871 { 4872 if ( p_vci_tgt.rspack ) 4873 { 4874 4875 #if DEBUG_MEMC_TGT_RSP 4876 if( m_debug_tgt_rsp_fsm ) 4877 { 4878 std::cout << " <MEMC.TGT_RSP_SC> SC response" 4879 << " / rsrcid = " << r_sc_to_tgt_rsp_srcid.read() 4880 << " / rtrdid = " << r_sc_to_tgt_rsp_trdid.read() << std::endl; 4881 } 4882 #endif 4883 r_tgt_rsp_fsm = TGT_RSP_SC_IDLE; 4884 r_sc_to_tgt_rsp_req = false; 4885 } 4886 break; 4887 } 4888 3819 4889 /////////////////////// 3820 case TGT_RSP_READ: // send the response 3821 { 3822 PRINTF("* <MEM_CACHE.TGT> YURI RSP_READ : rspack %d - cpt %d on %d\n", (int)p_vci_tgt.rspack,(int)r_tgt_rsp_cpt.read(),(int)(r_read_to_tgt_rsp_word.read()+r_read_to_tgt_rsp_length-1)); 3823 3824 if ( p_vci_tgt.rspack ) { 3825 if ( r_tgt_rsp_cpt.read() == (r_read_to_tgt_rsp_word.read()+r_read_to_tgt_rsp_length-1) ) { 3826 r_tgt_rsp_fsm = TGT_RSP_READ_IDLE; 3827 r_read_to_tgt_rsp_req = false; 3828 } else { 3829 r_tgt_rsp_cpt = r_tgt_rsp_cpt.read() + 1; 3830 } 3831 } 3832 break; 3833 } 3834 /////////////////// 3835 case TGT_RSP_WRITE: // send the write acknowledge 3836 { 3837 if ( p_vci_tgt.rspack ) { 3838 r_tgt_rsp_fsm = TGT_RSP_WRITE_IDLE; 3839 r_write_to_tgt_rsp_req = false; 3840 } 3841 break; 3842 } 3843 /////////////////// 3844 case TGT_RSP_CLEANUP: // send the write acknowledge 3845 { 3846 if ( p_vci_tgt.rspack ) { 3847 r_tgt_rsp_fsm = TGT_RSP_CLEANUP_IDLE; 3848 r_cleanup_to_tgt_rsp_req = false; 3849 } 3850 break; 4890 case TGT_RSP_XRAM: // send the response after XRAM access 4891 { 4892 if ( p_vci_tgt.rspack ) 4893 { 4894 4895 #if DEBUG_MEMC_TGT_RSP 4896 if( m_debug_tgt_rsp_fsm ) 4897 { 4898 std::cout << " <MEMC.TGT_RSP_XRAM> Response following XRAM access" 4899 << " / rsrcid = " << r_xram_rsp_to_tgt_rsp_srcid.read() 4900 << " / rtrdid = " << r_xram_rsp_to_tgt_rsp_trdid.read() 4901 << " / rdata = " << r_xram_rsp_to_tgt_rsp_data[r_tgt_rsp_cpt.read()].read() 4902 << " / cpt = " << r_tgt_rsp_cpt.read() << std::endl; 4903 } 4904 #endif 4905 if ( (r_tgt_rsp_cpt.read() == 4906 (r_xram_rsp_to_tgt_rsp_word.read()+r_xram_rsp_to_tgt_rsp_length.read()-1)) 4907 || r_xram_rsp_to_tgt_rsp_rerror.read() ) 4908 { 4909 r_tgt_rsp_fsm = TGT_RSP_XRAM_IDLE; 4910 r_xram_rsp_to_tgt_rsp_req = false; 4911 } 4912 else 4913 { 4914 r_tgt_rsp_cpt = r_tgt_rsp_cpt.read() + 1; 4915 } 4916 } 4917 break; 3851 4918 } 3852 4919 ////////////////// 3853 case TGT_RSP_LLSC: // send one atomic word response 3854 { 3855 if ( p_vci_tgt.rspack ) { 3856 r_tgt_rsp_fsm = TGT_RSP_LLSC_IDLE; 3857 r_llsc_to_tgt_rsp_req = false; 3858 } 3859 break; 3860 } 3861 3862 /////////////////////// 3863 case TGT_RSP_XRAM: // send the response 3864 { 3865 if ( p_vci_tgt.rspack ) { 3866 if ( (r_tgt_rsp_cpt.read() == (r_xram_rsp_to_tgt_rsp_word.read()+r_xram_rsp_to_tgt_rsp_length.read()-1)) 3867 || r_xram_rsp_to_tgt_rsp_rerror.read() ) { 3868 r_tgt_rsp_fsm = TGT_RSP_XRAM_IDLE; 3869 r_xram_rsp_to_tgt_rsp_req = false; 3870 } else { 3871 r_tgt_rsp_cpt = r_tgt_rsp_cpt.read() + 1; 3872 } 3873 } 3874 break; 3875 } 3876 /////////////////// 3877 case TGT_RSP_INIT: // send the pending write acknowledge 3878 { 3879 if ( p_vci_tgt.rspack ) { 3880 r_tgt_rsp_fsm = TGT_RSP_INIT_IDLE; 3881 r_init_rsp_to_tgt_rsp_req = false; 3882 } 3883 break; 4920 case TGT_RSP_INIT: // send the write response after coherence transaction 4921 { 4922 if ( p_vci_tgt.rspack ) 4923 { 4924 4925 #if DEBUG_MEMC_TGT_RSP 4926 if( m_debug_tgt_rsp_fsm ) 4927 { 4928 std::cout << " <MEMC.TGT_RSP_INIT> Write response after coherence transaction" 4929 << " / rsrcid = " << r_init_rsp_to_tgt_rsp_srcid.read() 4930 << " / rtrdid = " << r_init_rsp_to_tgt_rsp_trdid.read() << std::endl; 4931 } 4932 #endif 4933 r_tgt_rsp_fsm = TGT_RSP_INIT_IDLE; 4934 r_init_rsp_to_tgt_rsp_req = false; 4935 } 4936 break; 3884 4937 } 3885 4938 } // end switch tgt_rsp_fsm 4939 3886 4940 //////////////////////////////////////////////////////////////////////////////////// 3887 // NEWALLOC_UPT FSM4941 // ALLOC_UPT FSM 3888 4942 //////////////////////////////////////////////////////////////////////////////////// 3889 4943 // The ALLOC_UPT FSM allocates the access to the Update/Inval Table (UPT). … … 3896 4950 ///////////////////////////////////////////////////////////////////////////////////// 3897 4951 3898 switch ( r_alloc_upt_fsm.read() ) { 4952 switch ( r_alloc_upt_fsm.read() ) 4953 { 3899 4954 3900 4955 //////////////////////// … … 3907 4962 else if (r_xram_rsp_fsm.read() == XRAM_RSP_INVAL_LOCK) r_alloc_upt_fsm = ALLOC_UPT_XRAM_RSP; 3908 4963 else if (r_cleanup_fsm.read() == CLEANUP_UPT_LOCK) r_alloc_upt_fsm = ALLOC_UPT_CLEANUP; 3909 else if ((r_ llsc_fsm.read() == SC_UPT_LOCK) ||3910 (r_ llsc_fsm.read() == SC_INVAL_LOCK)) r_alloc_upt_fsm = ALLOC_UPT_LLSC;4964 else if ((r_sc_fsm.read() == SC_UPT_LOCK) || 4965 (r_sc_fsm.read() == SC_INVAL_LOCK)) r_alloc_upt_fsm = ALLOC_UPT_SC; 3911 4966 } 3912 4967 break; … … 3919 4974 if (r_xram_rsp_fsm.read() == XRAM_RSP_INVAL_LOCK) r_alloc_upt_fsm = ALLOC_UPT_XRAM_RSP; 3920 4975 else if (r_cleanup_fsm.read() == CLEANUP_UPT_LOCK) r_alloc_upt_fsm = ALLOC_UPT_CLEANUP; 3921 else if ((r_ llsc_fsm.read() == SC_UPT_LOCK) ||3922 (r_ llsc_fsm.read() == SC_INVAL_LOCK)) r_alloc_upt_fsm = ALLOC_UPT_LLSC;4976 else if ((r_sc_fsm.read() == SC_UPT_LOCK) || 4977 (r_sc_fsm.read() == SC_INVAL_LOCK)) r_alloc_upt_fsm = ALLOC_UPT_SC; 3923 4978 else if (r_init_rsp_fsm.read() == INIT_RSP_UPT_LOCK) r_alloc_upt_fsm = ALLOC_UPT_INIT_RSP; 3924 4979 } … … 3930 4985 { 3931 4986 if (r_cleanup_fsm.read() == CLEANUP_UPT_LOCK) r_alloc_upt_fsm = ALLOC_UPT_CLEANUP; 3932 else if ((r_ llsc_fsm.read() == SC_UPT_LOCK) ||3933 (r_ llsc_fsm.read() == SC_INVAL_LOCK)) r_alloc_upt_fsm = ALLOC_UPT_LLSC;4987 else if ((r_sc_fsm.read() == SC_UPT_LOCK) || 4988 (r_sc_fsm.read() == SC_INVAL_LOCK)) r_alloc_upt_fsm = ALLOC_UPT_SC; 3934 4989 else if (r_init_rsp_fsm.read() == INIT_RSP_UPT_LOCK) r_alloc_upt_fsm = ALLOC_UPT_INIT_RSP; 3935 4990 else if ((r_write_fsm.read() == WRITE_UPT_LOCK) || … … 3942 4997 if(r_cleanup_fsm.read() != CLEANUP_UPT_LOCK ) 3943 4998 { 3944 if ((r_ llsc_fsm.read() == SC_UPT_LOCK) ||3945 (r_ llsc_fsm.read() == SC_INVAL_LOCK)) r_alloc_upt_fsm = ALLOC_UPT_LLSC;4999 if ((r_sc_fsm.read() == SC_UPT_LOCK) || 5000 (r_sc_fsm.read() == SC_INVAL_LOCK)) r_alloc_upt_fsm = ALLOC_UPT_SC; 3946 5001 else if (r_init_rsp_fsm.read() == INIT_RSP_UPT_LOCK) r_alloc_upt_fsm = ALLOC_UPT_INIT_RSP; 3947 5002 else if ((r_write_fsm.read() == WRITE_UPT_LOCK) || … … 3952 5007 3953 5008 ////////////////////////// 3954 case ALLOC_UPT_ LLSC:3955 if( (r_ llsc_fsm.read() != SC_UPT_LOCK) &&3956 (r_ llsc_fsm.read() != SC_INVAL_LOCK))5009 case ALLOC_UPT_SC: 5010 if( (r_sc_fsm.read() != SC_UPT_LOCK) && 5011 (r_sc_fsm.read() != SC_INVAL_LOCK)) 3957 5012 { 3958 5013 if (r_init_rsp_fsm.read() == INIT_RSP_UPT_LOCK) r_alloc_upt_fsm = ALLOC_UPT_INIT_RSP; … … 3971 5026 // The ALLOC_DIR FSM allocates the access to the directory and 3972 5027 // the data cache with a round robin priority between 5 user FSMs : 3973 // The cyclic ordering is READ > WRITE > LLSC > CLEANUP > XRAM_RSP5028 // The cyclic ordering is READ > WRITE > SC > CLEANUP > XRAM_RSP 3974 5029 // The ressource is always allocated. 3975 5030 ///////////////////////////////////////////////////////////////////////////////////// 3976 5031 3977 switch ( r_alloc_dir_fsm.read() ) { 5032 switch ( r_alloc_dir_fsm.read() ) 5033 { 3978 5034 3979 5035 //////////////////// … … 3990 5046 { 3991 5047 if (r_write_fsm.read() == WRITE_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_WRITE; 3992 else if (r_ llsc_fsm.read() == SC_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_LLSC;5048 else if (r_sc_fsm.read() == SC_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_SC; 3993 5049 else if (r_cleanup_fsm.read() == CLEANUP_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_CLEANUP; 3994 5050 else if (r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_XRAM_RSP; … … 4013 5069 (r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE) ) ) 4014 5070 { 4015 if (r_ llsc_fsm.read() == SC_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_LLSC;5071 if (r_sc_fsm.read() == SC_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_SC; 4016 5072 else if (r_cleanup_fsm.read() == CLEANUP_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_CLEANUP; 4017 5073 else if (r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_XRAM_RSP; … … 4021 5077 4022 5078 //////////////////// 4023 case ALLOC_DIR_LLSC:4024 if ( ((r_ llsc_fsm.read() != SC_DIR_LOCK) &&4025 (r_ llsc_fsm.read() != SC_DIR_HIT_READ ) &&4026 (r_ llsc_fsm.read() != SC_DIR_HIT_WRITE ) &&4027 (r_llsc_fsm.read() != LLSC_TRT_LOCK ) &&4028 (r_ llsc_fsm.read() != SC_TRT_LOCK) &&4029 (r_ llsc_fsm.read() != SC_INVAL_LOCK) &&4030 (r_ llsc_fsm.read() != SC_UPT_LOCK) &&4031 (r_ llsc_fsm.read() != SC_HEAP_LOCK))5079 case ALLOC_DIR_SC: 5080 if ( ((r_sc_fsm.read() != SC_DIR_LOCK) && 5081 (r_sc_fsm.read() != SC_DIR_HIT_READ ) && 5082 (r_sc_fsm.read() != SC_DIR_HIT_WRITE ) && 5083 // (r_sc_fsm.read() != SC_TRT_GET_LOCK ) && 5084 (r_sc_fsm.read() != SC_TRT_PUT_LOCK) && 5085 (r_sc_fsm.read() != SC_INVAL_LOCK) && 5086 (r_sc_fsm.read() != SC_UPT_LOCK) && 5087 (r_sc_fsm.read() != SC_HEAP_LOCK)) 4032 5088 || 4033 ( (r_ llsc_fsm.read() == SC_HEAP_LOCK) &&4034 (r_alloc_heap_fsm.read() == ALLOC_HEAP_ LLSC) )5089 ( (r_sc_fsm.read() == SC_HEAP_LOCK) && 5090 (r_alloc_heap_fsm.read() == ALLOC_HEAP_SC) ) 4035 5091 || 4036 ( (r_ llsc_fsm.read() == LLSC_TRT_LOCK ) &&4037 (r_alloc_trt_fsm.read() == ALLOC_TRT_ LLSC) ) )5092 ( (r_sc_fsm.read() == SC_TRT_GET_LOCK ) && 5093 (r_alloc_trt_fsm.read() == ALLOC_TRT_SC) ) ) 4038 5094 { 4039 5095 if (r_cleanup_fsm.read() == CLEANUP_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_CLEANUP; … … 4045 5101 4046 5102 /////////////////////// 4047 case ALLOC_DIR_CLEANUP:5103 case ALLOC_DIR_CLEANUP: 4048 5104 if ( (r_cleanup_fsm.read() != CLEANUP_DIR_LOCK) && 4049 5105 (r_cleanup_fsm.read() != CLEANUP_HEAP_LOCK) ) … … 4052 5108 else if (r_read_fsm.read() == READ_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_READ; 4053 5109 else if (r_write_fsm.read() == WRITE_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_WRITE; 4054 else if (r_ llsc_fsm.read() == SC_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_LLSC;5110 else if (r_sc_fsm.read() == SC_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_SC; 4055 5111 } 4056 5112 break; 4057 5113 //////////////////////// 4058 case ALLOC_DIR_XRAM_RSP:5114 case ALLOC_DIR_XRAM_RSP: 4059 5115 if ( (r_xram_rsp_fsm.read() != XRAM_RSP_DIR_LOCK) && 4060 5116 (r_xram_rsp_fsm.read() != XRAM_RSP_TRT_COPY) && … … 4063 5119 if (r_read_fsm.read() == READ_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_READ; 4064 5120 else if (r_write_fsm.read() == WRITE_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_WRITE; 4065 else if (r_ llsc_fsm.read() == SC_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_LLSC;5121 else if (r_sc_fsm.read() == SC_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_SC; 4066 5122 else if (r_cleanup_fsm.read() == CLEANUP_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_CLEANUP; 4067 5123 } … … 4075 5131 // The ALLOC_TRT fsm allocates the access to the Transaction Table (write buffer) 4076 5132 // with a round robin priority between 4 user FSMs : 4077 // The cyclic priority is READ > WRITE > LLSC > XRAM_RSP5133 // The cyclic priority is READ > WRITE > SC > XRAM_RSP 4078 5134 // The ressource is always allocated. 4079 5135 /////////////////////////////////////////////////////////////////////////////////// 4080 5136 4081 switch (r_alloc_trt_fsm) { 5137 switch (r_alloc_trt_fsm) 5138 { 4082 5139 4083 5140 //////////////////// … … 4087 5144 if ((r_write_fsm.read() == WRITE_TRT_LOCK) || 4088 5145 (r_write_fsm.read() == WRITE_TRT_WRITE_LOCK)) r_alloc_trt_fsm = ALLOC_TRT_WRITE; 4089 else if ((r_ llsc_fsm.read() == LLSC_TRT_LOCK) ||4090 (r_ llsc_fsm.read() == SC_TRT_LOCK)) r_alloc_trt_fsm = ALLOC_TRT_LLSC;5146 else if ((r_sc_fsm.read() == SC_TRT_GET_LOCK) || 5147 (r_sc_fsm.read() == SC_TRT_PUT_LOCK)) r_alloc_trt_fsm = ALLOC_TRT_SC; 4091 5148 else if (r_xram_rsp_fsm.read() == XRAM_RSP_TRT_COPY) r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; 4092 5149 else if ( (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) || … … 4100 5157 (r_write_fsm.read() != WRITE_INVAL_LOCK)) 4101 5158 { 4102 if ((r_ llsc_fsm.read() == LLSC_TRT_LOCK) ||4103 (r_ llsc_fsm.read() == SC_TRT_LOCK)) r_alloc_trt_fsm = ALLOC_TRT_LLSC;5159 if ((r_sc_fsm.read() == SC_TRT_GET_LOCK) || 5160 (r_sc_fsm.read() == SC_TRT_PUT_LOCK)) r_alloc_trt_fsm = ALLOC_TRT_SC; 4104 5161 else if (r_xram_rsp_fsm.read() == XRAM_RSP_TRT_COPY) r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; 4105 5162 else if ( (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) || … … 4109 5166 break; 4110 5167 //////////////////// 4111 case ALLOC_TRT_ LLSC:4112 if ( (r_ llsc_fsm.read() != LLSC_TRT_LOCK) &&4113 (r_ llsc_fsm.read() != SC_TRT_LOCK) &&4114 (r_ llsc_fsm.read() != SC_INVAL_LOCK))5168 case ALLOC_TRT_SC: 5169 if ( (r_sc_fsm.read() != SC_TRT_GET_LOCK) && 5170 (r_sc_fsm.read() != SC_TRT_PUT_LOCK) && 5171 (r_sc_fsm.read() != SC_INVAL_LOCK)) 4115 5172 { 4116 5173 if (r_xram_rsp_fsm.read() == XRAM_RSP_TRT_COPY) r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; … … 4132 5189 else if ((r_write_fsm.read() == WRITE_TRT_LOCK) || 4133 5190 (r_write_fsm.read() == WRITE_TRT_WRITE_LOCK)) r_alloc_trt_fsm = ALLOC_TRT_WRITE; 4134 else if ((r_ llsc_fsm.read() == LLSC_TRT_LOCK) ||4135 (r_ llsc_fsm.read() == SC_TRT_LOCK)) r_alloc_trt_fsm = ALLOC_TRT_LLSC;5191 else if ((r_sc_fsm.read() == SC_TRT_GET_LOCK) || 5192 (r_sc_fsm.read() == SC_TRT_PUT_LOCK)) r_alloc_trt_fsm = ALLOC_TRT_SC; 4136 5193 } 4137 5194 break; … … 4143 5200 else if ((r_write_fsm.read() == WRITE_TRT_LOCK) || 4144 5201 (r_write_fsm.read() == WRITE_TRT_WRITE_LOCK)) r_alloc_trt_fsm = ALLOC_TRT_WRITE; 4145 else if ((r_ llsc_fsm.read() == LLSC_TRT_LOCK) ||4146 (r_ llsc_fsm.read() == SC_TRT_LOCK)) r_alloc_trt_fsm = ALLOC_TRT_LLSC;5202 else if ((r_sc_fsm.read() == SC_TRT_GET_LOCK) || 5203 (r_sc_fsm.read() == SC_TRT_PUT_LOCK)) r_alloc_trt_fsm = ALLOC_TRT_SC; 4147 5204 else if (r_xram_rsp_fsm.read() == XRAM_RSP_TRT_COPY) r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; 4148 5205 } … … 4156 5213 // The ALLOC_HEAP FSM allocates the access to the heap 4157 5214 // with a round robin priority between 5 user FSMs : 4158 // The cyclic ordering is READ > WRITE > LLSC > CLEANUP > XRAM_RSP5215 // The cyclic ordering is READ > WRITE > SC > CLEANUP > XRAM_RSP 4159 5216 // The ressource is always allocated. 4160 5217 ///////////////////////////////////////////////////////////////////////////////////// 4161 5218 4162 switch ( r_alloc_heap_fsm.read() ) { 5219 switch ( r_alloc_heap_fsm.read() ) 5220 { 4163 5221 4164 5222 //////////////////// … … 4168 5226 { 4169 5227 if (r_write_fsm.read() == WRITE_HEAP_LOCK) r_alloc_heap_fsm = ALLOC_HEAP_WRITE; 4170 else if (r_ llsc_fsm.read() == SC_HEAP_LOCK) r_alloc_heap_fsm = ALLOC_HEAP_LLSC;5228 else if (r_sc_fsm.read() == SC_HEAP_LOCK) r_alloc_heap_fsm = ALLOC_HEAP_SC; 4171 5229 else if (r_cleanup_fsm.read() == CLEANUP_HEAP_LOCK) r_alloc_heap_fsm = ALLOC_HEAP_CLEANUP; 4172 5230 else if (r_xram_rsp_fsm.read() == XRAM_RSP_HEAP_ERASE) r_alloc_heap_fsm = ALLOC_HEAP_XRAM_RSP; … … 4180 5238 (r_write_fsm.read() != WRITE_UPDATE) ) 4181 5239 { 4182 if (r_ llsc_fsm.read() == SC_HEAP_LOCK) r_alloc_heap_fsm = ALLOC_HEAP_LLSC;5240 if (r_sc_fsm.read() == SC_HEAP_LOCK) r_alloc_heap_fsm = ALLOC_HEAP_SC; 4183 5241 else if (r_cleanup_fsm.read() == CLEANUP_HEAP_LOCK) r_alloc_heap_fsm = ALLOC_HEAP_CLEANUP; 4184 5242 else if (r_xram_rsp_fsm.read() == XRAM_RSP_HEAP_ERASE) r_alloc_heap_fsm = ALLOC_HEAP_XRAM_RSP; … … 4188 5246 4189 5247 //////////////////// 4190 case ALLOC_HEAP_ LLSC:4191 if ( (r_ llsc_fsm.read() != SC_HEAP_LOCK) &&4192 (r_ llsc_fsm.read() != SC_UPT_REQ ) &&4193 (r_ llsc_fsm.read() != SC_UPDATE) )5248 case ALLOC_HEAP_SC: 5249 if ( (r_sc_fsm.read() != SC_HEAP_LOCK) && 5250 (r_sc_fsm.read() != SC_UPT_REQ ) && 5251 (r_sc_fsm.read() != SC_UPT_NEXT) ) 4194 5252 { 4195 5253 if (r_cleanup_fsm.read() == CLEANUP_HEAP_LOCK) r_alloc_heap_fsm = ALLOC_HEAP_CLEANUP; … … 4209 5267 else if (r_read_fsm.read() == READ_HEAP_LOCK) r_alloc_heap_fsm = ALLOC_HEAP_READ; 4210 5268 else if (r_write_fsm.read() == WRITE_HEAP_LOCK) r_alloc_heap_fsm = ALLOC_HEAP_WRITE; 4211 else if (r_ llsc_fsm.read() == SC_HEAP_LOCK) r_alloc_heap_fsm = ALLOC_HEAP_LLSC;5269 else if (r_sc_fsm.read() == SC_HEAP_LOCK) r_alloc_heap_fsm = ALLOC_HEAP_SC; 4212 5270 } 4213 5271 break; … … 4218 5276 if (r_read_fsm.read() == READ_HEAP_LOCK) r_alloc_heap_fsm = ALLOC_HEAP_READ; 4219 5277 else if (r_write_fsm.read() == WRITE_HEAP_LOCK) r_alloc_heap_fsm = ALLOC_HEAP_WRITE; 4220 else if (r_ llsc_fsm.read() == SC_HEAP_LOCK) r_alloc_heap_fsm = ALLOC_HEAP_LLSC;5278 else if (r_sc_fsm.read() == SC_HEAP_LOCK) r_alloc_heap_fsm = ALLOC_HEAP_SC; 4221 5279 else if (r_cleanup_fsm.read() == CLEANUP_HEAP_LOCK) r_alloc_heap_fsm = ALLOC_HEAP_CLEANUP; 4222 5280 } … … 4287 5345 } 4288 5346 //////////////////////////////////////////////////////////////////////////////////// 4289 // TGT_CMD to LLSC FIFO5347 // TGT_CMD to SC FIFO 4290 5348 //////////////////////////////////////////////////////////////////////////////////// 4291 5349 4292 if ( cmd_ llsc_fifo_put ) {4293 if ( cmd_ llsc_fifo_get ) {4294 m_cmd_ llsc_addr_fifo.put_and_get((addr_t)(p_vci_tgt.address.read()));4295 m_cmd_ llsc_eop_fifo.put_and_get(p_vci_tgt.eop.read());4296 m_cmd_ llsc_srcid_fifo.put_and_get(p_vci_tgt.srcid.read());4297 m_cmd_ llsc_trdid_fifo.put_and_get(p_vci_tgt.trdid.read());4298 m_cmd_ llsc_pktid_fifo.put_and_get(p_vci_tgt.pktid.read());4299 m_cmd_ llsc_wdata_fifo.put_and_get(p_vci_tgt.wdata.read());5350 if ( cmd_sc_fifo_put ) { 5351 if ( cmd_sc_fifo_get ) { 5352 m_cmd_sc_addr_fifo.put_and_get((addr_t)(p_vci_tgt.address.read())); 5353 m_cmd_sc_eop_fifo.put_and_get(p_vci_tgt.eop.read()); 5354 m_cmd_sc_srcid_fifo.put_and_get(p_vci_tgt.srcid.read()); 5355 m_cmd_sc_trdid_fifo.put_and_get(p_vci_tgt.trdid.read()); 5356 m_cmd_sc_pktid_fifo.put_and_get(p_vci_tgt.pktid.read()); 5357 m_cmd_sc_wdata_fifo.put_and_get(p_vci_tgt.wdata.read()); 4300 5358 } else { 4301 m_cmd_ llsc_addr_fifo.simple_put((addr_t)(p_vci_tgt.address.read()));4302 m_cmd_ llsc_eop_fifo.simple_put(p_vci_tgt.eop.read());4303 m_cmd_ llsc_srcid_fifo.simple_put(p_vci_tgt.srcid.read());4304 m_cmd_ llsc_trdid_fifo.simple_put(p_vci_tgt.trdid.read());4305 m_cmd_ llsc_pktid_fifo.simple_put(p_vci_tgt.pktid.read());4306 m_cmd_ llsc_wdata_fifo.simple_put(p_vci_tgt.wdata.read());5359 m_cmd_sc_addr_fifo.simple_put((addr_t)(p_vci_tgt.address.read())); 5360 m_cmd_sc_eop_fifo.simple_put(p_vci_tgt.eop.read()); 5361 m_cmd_sc_srcid_fifo.simple_put(p_vci_tgt.srcid.read()); 5362 m_cmd_sc_trdid_fifo.simple_put(p_vci_tgt.trdid.read()); 5363 m_cmd_sc_pktid_fifo.simple_put(p_vci_tgt.pktid.read()); 5364 m_cmd_sc_wdata_fifo.simple_put(p_vci_tgt.wdata.read()); 4307 5365 } 4308 5366 } else { 4309 if ( cmd_ llsc_fifo_get ) {4310 m_cmd_ llsc_addr_fifo.simple_get();4311 m_cmd_ llsc_eop_fifo.simple_get();4312 m_cmd_ llsc_srcid_fifo.simple_get();4313 m_cmd_ llsc_trdid_fifo.simple_get();4314 m_cmd_ llsc_pktid_fifo.simple_get();4315 m_cmd_ llsc_wdata_fifo.simple_get();5367 if ( cmd_sc_fifo_get ) { 5368 m_cmd_sc_addr_fifo.simple_get(); 5369 m_cmd_sc_eop_fifo.simple_get(); 5370 m_cmd_sc_srcid_fifo.simple_get(); 5371 m_cmd_sc_trdid_fifo.simple_get(); 5372 m_cmd_sc_pktid_fifo.simple_get(); 5373 m_cmd_sc_wdata_fifo.simple_get(); 4316 5374 } 4317 5375 } … … 4371 5429 } 4372 5430 //////////////////////////////////////////////////////////////////////////////////// 4373 // LLSC to INIT_CMD FIFO5431 // SC to INIT_CMD FIFO 4374 5432 //////////////////////////////////////////////////////////////////////////////////// 4375 5433 4376 if ( llsc_to_init_cmd_fifo_put ) {4377 if ( llsc_to_init_cmd_fifo_get ) {4378 m_ llsc_to_init_cmd_inst_fifo.put_and_get(llsc_to_init_cmd_fifo_inst);4379 m_ llsc_to_init_cmd_srcid_fifo.put_and_get(llsc_to_init_cmd_fifo_srcid);5434 if ( sc_to_init_cmd_fifo_put ) { 5435 if ( sc_to_init_cmd_fifo_get ) { 5436 m_sc_to_init_cmd_inst_fifo.put_and_get(sc_to_init_cmd_fifo_inst); 5437 m_sc_to_init_cmd_srcid_fifo.put_and_get(sc_to_init_cmd_fifo_srcid); 4380 5438 #if L1_MULTI_CACHE 4381 m_ llsc_to_init_cmd_cache_id_fifo.put_and_get(llsc_to_init_cmd_fifo_cache_id);5439 m_sc_to_init_cmd_cache_id_fifo.put_and_get(sc_to_init_cmd_fifo_cache_id); 4382 5440 #endif 4383 5441 } else { 4384 m_ llsc_to_init_cmd_inst_fifo.simple_put(llsc_to_init_cmd_fifo_inst);4385 m_ llsc_to_init_cmd_srcid_fifo.simple_put(llsc_to_init_cmd_fifo_srcid);5442 m_sc_to_init_cmd_inst_fifo.simple_put(sc_to_init_cmd_fifo_inst); 5443 m_sc_to_init_cmd_srcid_fifo.simple_put(sc_to_init_cmd_fifo_srcid); 4386 5444 #if L1_MULTI_CACHE 4387 m_ llsc_to_init_cmd_cache_id_fifo.simple_put(llsc_to_init_cmd_fifo_cache_id);5445 m_sc_to_init_cmd_cache_id_fifo.simple_put(sc_to_init_cmd_fifo_cache_id); 4388 5446 #endif 4389 5447 } 4390 5448 } else { 4391 if ( llsc_to_init_cmd_fifo_get ) {4392 m_ llsc_to_init_cmd_inst_fifo.simple_get();4393 m_ llsc_to_init_cmd_srcid_fifo.simple_get();5449 if ( sc_to_init_cmd_fifo_get ) { 5450 m_sc_to_init_cmd_inst_fifo.simple_get(); 5451 m_sc_to_init_cmd_srcid_fifo.simple_get(); 4394 5452 #if L1_MULTI_CACHE 4395 m_ llsc_to_init_cmd_cache_id_fifo.simple_get();5453 m_sc_to_init_cmd_cache_id_fifo.simple_get(); 4396 5454 #endif 4397 5455 } 4398 5456 } 4399 5457 4400 4401 //////////////////////////////////////////////////////////////4402 5458 m_cpt_cycles++; 4403 5459 4404 4405 4406 4407 4408 4409 5460 } // end transition() 5461 5462 ///////////////////////////// 5463 tmpl(void)::genMoore() 5464 ///////////////////////////// 5465 { 4410 5466 //////////////////////////////////////////////////////////// 4411 5467 // Command signals on the p_vci_ixr port … … 4430 5486 p_vci_ixr.eop = true; 4431 5487 } 4432 else if ( r_ixr_cmd_fsm.read() == IXR_CMD_ LLSC_NLINE ) {4433 if(r_ llsc_to_ixr_cmd_write.read()){5488 else if ( r_ixr_cmd_fsm.read() == IXR_CMD_SC_NLINE ) { 5489 if(r_sc_to_ixr_cmd_write.read()){ 4434 5490 p_vci_ixr.cmd = vci_param::CMD_WRITE; 4435 5491 p_vci_ixr.cmdval = true; 4436 p_vci_ixr.address = (addr_t)((r_ llsc_to_ixr_cmd_nline.read()*m_words+r_ixr_cmd_cpt.read())*4);5492 p_vci_ixr.address = (addr_t)((r_sc_to_ixr_cmd_nline.read()*m_words+r_ixr_cmd_cpt.read())*4); 4437 5493 p_vci_ixr.plen = m_words*4; 4438 p_vci_ixr.wdata = r_ llsc_to_ixr_cmd_data[r_ixr_cmd_cpt.read()].read();4439 p_vci_ixr.trdid = r_ llsc_to_ixr_cmd_trdid.read();5494 p_vci_ixr.wdata = r_sc_to_ixr_cmd_data[r_ixr_cmd_cpt.read()].read(); 5495 p_vci_ixr.trdid = r_sc_to_ixr_cmd_trdid.read(); 4440 5496 p_vci_ixr.eop = (r_ixr_cmd_cpt == (m_words-1)); 4441 5497 } else { 4442 5498 p_vci_ixr.cmd = vci_param::CMD_READ; 4443 5499 p_vci_ixr.cmdval = true; 4444 p_vci_ixr.address = (addr_t)(r_ llsc_to_ixr_cmd_nline.read()*m_words*4);5500 p_vci_ixr.address = (addr_t)(r_sc_to_ixr_cmd_nline.read()*m_words*4); 4445 5501 p_vci_ixr.plen = m_words*4; 4446 5502 p_vci_ixr.wdata = 0x00000000; 4447 p_vci_ixr.trdid = r_ llsc_to_ixr_cmd_trdid.read();5503 p_vci_ixr.trdid = r_sc_to_ixr_cmd_trdid.read(); 4448 5504 p_vci_ixr.eop = true; 4449 5505 } … … 4505 5561 p_vci_tgt.cmdack = m_cmd_read_addr_fifo.wok(); 4506 5562 break; 4507 case TGT_CMD_READ_EOP:4508 p_vci_tgt.cmdack = true;4509 break;4510 5563 case TGT_CMD_WRITE: 4511 5564 p_vci_tgt.cmdack = m_cmd_write_addr_fifo.wok(); 4512 5565 break; 4513 5566 case TGT_CMD_ATOMIC: 4514 p_vci_tgt.cmdack = m_cmd_ llsc_addr_fifo.wok();5567 p_vci_tgt.cmdack = m_cmd_sc_addr_fifo.wok(); 4515 5568 break; 4516 5569 default: … … 4526 5579 case TGT_RSP_READ_IDLE: 4527 5580 case TGT_RSP_WRITE_IDLE: 4528 case TGT_RSP_ LLSC_IDLE:5581 case TGT_RSP_SC_IDLE: 4529 5582 case TGT_RSP_XRAM_IDLE: 4530 5583 case TGT_RSP_INIT_IDLE: … … 4539 5592 break; 4540 5593 case TGT_RSP_READ: 4541 PRINTF(" * <MEM_CACHE.TGT> RSP_READ : srcid %d, trdid %d, pktid %d\n"4542 ,(uint32_t)r_read_to_tgt_rsp_srcid.read()4543 ,(uint32_t)r_read_to_tgt_rsp_trdid.read()4544 ,(uint32_t)r_read_to_tgt_rsp_pktid.read()4545 );4546 4547 5594 p_vci_tgt.rspval = true; 4548 5595 p_vci_tgt.rdata = r_read_to_tgt_rsp_data[r_tgt_rsp_cpt.read()].read(); … … 4554 5601 break; 4555 5602 case TGT_RSP_WRITE: 4556 PRINTF(" * <MEM_CACHE.TGT> RSP_WRITE : BURP srcid %d, trdid %d, pktid %d\n"4557 ,(uint32_t)r_write_to_tgt_rsp_srcid.read()4558 ,(uint32_t)r_write_to_tgt_rsp_trdid.read()4559 ,(uint32_t)r_write_to_tgt_rsp_pktid.read()4560 );4561 4562 5603 p_vci_tgt.rspval = true; 4563 5604 p_vci_tgt.rdata = 0; … … 4565 5606 p_vci_tgt.rtrdid = r_write_to_tgt_rsp_trdid.read(); 4566 5607 p_vci_tgt.rpktid = r_write_to_tgt_rsp_pktid.read(); 4567 p_vci_tgt.rerror = 0x2 & ( (1 << vci_param::E) - 1); // Write OK5608 p_vci_tgt.rerror = 0x2 & ( (1 << vci_param::E) - 1); 4568 5609 p_vci_tgt.reop = true; 4569 5610 break; 4570 5611 case TGT_RSP_CLEANUP: 4571 PRINTF(" * <MEM_CACHE.TGT> RSP_CLEANUP : srcid %d, trdid %d, pktid %d\n"4572 ,(uint32_t)r_cleanup_to_tgt_rsp_srcid.read()4573 ,(uint32_t)r_cleanup_to_tgt_rsp_trdid.read()4574 ,(uint32_t)r_cleanup_to_tgt_rsp_pktid.read()4575 );4576 4577 5612 p_vci_tgt.rspval = true; 4578 5613 p_vci_tgt.rdata = 0; … … 4583 5618 p_vci_tgt.reop = true; 4584 5619 break; 4585 case TGT_RSP_LLSC: 4586 PRINTF(" * <MEM_CACHE.TGT> RSP_LLSC : srcid %d, trdid %d, pktid %d\n" 4587 ,(uint32_t)r_llsc_to_tgt_rsp_srcid.read() 4588 ,(uint32_t)r_llsc_to_tgt_rsp_trdid.read() 4589 ,(uint32_t)r_llsc_to_tgt_rsp_pktid.read() 4590 ); 4591 5620 case TGT_RSP_SC: 4592 5621 p_vci_tgt.rspval = true; 4593 p_vci_tgt.rdata = r_ llsc_to_tgt_rsp_data.read();4594 p_vci_tgt.rsrcid = r_ llsc_to_tgt_rsp_srcid.read();4595 p_vci_tgt.rtrdid = r_ llsc_to_tgt_rsp_trdid.read();4596 p_vci_tgt.rpktid = r_ llsc_to_tgt_rsp_pktid.read();5622 p_vci_tgt.rdata = r_sc_to_tgt_rsp_data.read(); 5623 p_vci_tgt.rsrcid = r_sc_to_tgt_rsp_srcid.read(); 5624 p_vci_tgt.rtrdid = r_sc_to_tgt_rsp_trdid.read(); 5625 p_vci_tgt.rpktid = r_sc_to_tgt_rsp_pktid.read(); 4597 5626 p_vci_tgt.rerror = 0; 4598 5627 p_vci_tgt.reop = true; 4599 5628 break; 4600 5629 case TGT_RSP_XRAM: 4601 PRINTF(" * <MEM_CACHE.TGT> RSP_XRAM : srcid %d, trdid %d, pktid %d\n"4602 ,(uint32_t)r_xram_rsp_to_tgt_rsp_srcid.read()4603 ,(uint32_t)r_xram_rsp_to_tgt_rsp_trdid.read()4604 ,(uint32_t)r_xram_rsp_to_tgt_rsp_pktid.read()4605 );4606 4607 5630 p_vci_tgt.rspval = true; 4608 5631 p_vci_tgt.rdata = r_xram_rsp_to_tgt_rsp_data[r_tgt_rsp_cpt.read()].read(); … … 4611 5634 p_vci_tgt.rpktid = r_xram_rsp_to_tgt_rsp_pktid.read(); 4612 5635 p_vci_tgt.rerror = r_xram_rsp_to_tgt_rsp_rerror.read(); 4613 p_vci_tgt.reop = (( r_tgt_rsp_cpt.read() == (r_xram_rsp_to_tgt_rsp_word.read()+r_xram_rsp_to_tgt_rsp_length.read()-1)) 5636 p_vci_tgt.reop = (( r_tgt_rsp_cpt.read() 5637 == (r_xram_rsp_to_tgt_rsp_word.read()+r_xram_rsp_to_tgt_rsp_length.read()-1)) 4614 5638 || r_xram_rsp_to_tgt_rsp_rerror.read()); 4615 5639 break; 4616 5640 case TGT_RSP_INIT: 4617 PRINTF(" * <MEM_CACHE.TGT> RSP_INIT : srcid %d, trdid %d, pktid %d\n"4618 ,(uint32_t)r_init_rsp_to_tgt_rsp_srcid.read()4619 ,(uint32_t)r_init_rsp_to_tgt_rsp_trdid.read()4620 ,(uint32_t)r_init_rsp_to_tgt_rsp_pktid.read()4621 );4622 4623 5641 p_vci_tgt.rspval = true; 4624 5642 p_vci_tgt.rdata = 0; … … 4659 5677 case INIT_CMD_INVAL_NLINE: 4660 5678 { 4661 PRINTF(" * <MEM_CACHE.INIT_CMD> INVAL_NLINE : trdid %d, pktid %d\n"4662 ,(uint32_t)r_xram_rsp_to_init_cmd_trdid.read()4663 ,(uint32_t)m_xram_rsp_to_init_cmd_cache_id_fifo.read()4664 );4665 4666 5679 p_vci_ini.cmdval = m_xram_rsp_to_init_cmd_inst_fifo.rok(); 4667 5680 if(m_xram_rsp_to_init_cmd_inst_fifo.rok()){ … … 4753 5766 p_vci_ini.cmdval = true; 4754 5767 p_vci_ini.address = m_broadcast_address; 4755 p_vci_ini.wdata = (addr_t)r_ llsc_to_init_cmd_nline.read();4756 p_vci_ini.be = ((r_ llsc_to_init_cmd_nline.read() >> 32) & 0x3);5768 p_vci_ini.wdata = (addr_t)r_sc_to_init_cmd_nline.read(); 5769 p_vci_ini.be = ((r_sc_to_init_cmd_nline.read() >> 32) & 0x3); 4757 5770 p_vci_ini.plen = 4 ; 4758 5771 p_vci_ini.eop = true; 4759 p_vci_ini.trdid = r_ llsc_to_init_cmd_trdid.read();5772 p_vci_ini.trdid = r_sc_to_init_cmd_trdid.read(); 4760 5773 p_vci_ini.pktid = 0; 4761 5774 break; 4762 5775 case INIT_CMD_SC_UPDT_NLINE: 4763 p_vci_ini.cmdval = m_ llsc_to_init_cmd_inst_fifo.rok();4764 if(m_ llsc_to_init_cmd_inst_fifo.rok()){4765 if( m_ llsc_to_init_cmd_inst_fifo.read() ) {4766 p_vci_ini.address = (addr_t)(m_coherence_table[m_ llsc_to_init_cmd_srcid_fifo.read()] + 12);5776 p_vci_ini.cmdval = m_sc_to_init_cmd_inst_fifo.rok(); 5777 if(m_sc_to_init_cmd_inst_fifo.rok()){ 5778 if( m_sc_to_init_cmd_inst_fifo.read() ) { 5779 p_vci_ini.address = (addr_t)(m_coherence_table[m_sc_to_init_cmd_srcid_fifo.read()] + 12); 4767 5780 } else { 4768 p_vci_ini.address = (addr_t)(m_coherence_table[m_ llsc_to_init_cmd_srcid_fifo.read()] + 8);5781 p_vci_ini.address = (addr_t)(m_coherence_table[m_sc_to_init_cmd_srcid_fifo.read()] + 8); 4769 5782 } 4770 5783 } else { 4771 5784 p_vci_ini.address = 0; 4772 5785 } 4773 p_vci_ini.wdata = (uint32_t)r_ llsc_to_init_cmd_nline.read();4774 p_vci_ini.be = ((r_ llsc_to_init_cmd_nline.read() >> 32 ) & 0x3);4775 if(r_ llsc_to_init_cmd_is_long.read()){5786 p_vci_ini.wdata = (uint32_t)r_sc_to_init_cmd_nline.read(); 5787 p_vci_ini.be = ((r_sc_to_init_cmd_nline.read() >> 32 ) & 0x3); 5788 if(r_sc_to_init_cmd_is_long.read()){ 4776 5789 p_vci_ini.plen = 4 * 4; 4777 5790 } else { … … 4779 5792 } 4780 5793 p_vci_ini.eop = false; 4781 p_vci_ini.trdid = r_ llsc_to_init_cmd_trdid.read();4782 p_vci_ini.pktid = m_ llsc_to_init_cmd_cache_id_fifo.read();5794 p_vci_ini.trdid = r_sc_to_init_cmd_trdid.read(); 5795 p_vci_ini.pktid = m_sc_to_init_cmd_cache_id_fifo.read(); 4783 5796 break; 4784 5797 case INIT_CMD_SC_UPDT_INDEX: 4785 5798 p_vci_ini.cmdval = true; 4786 if( m_ llsc_to_init_cmd_inst_fifo.read() ) {4787 p_vci_ini.address = (addr_t)(m_coherence_table[m_ llsc_to_init_cmd_srcid_fifo.read()] + 12);5799 if( m_sc_to_init_cmd_inst_fifo.read() ) { 5800 p_vci_ini.address = (addr_t)(m_coherence_table[m_sc_to_init_cmd_srcid_fifo.read()] + 12); 4788 5801 } else { 4789 p_vci_ini.address = (addr_t)(m_coherence_table[m_ llsc_to_init_cmd_srcid_fifo.read()] + 8);4790 } 4791 p_vci_ini.wdata = r_ llsc_to_init_cmd_index.read();5802 p_vci_ini.address = (addr_t)(m_coherence_table[m_sc_to_init_cmd_srcid_fifo.read()] + 8); 5803 } 5804 p_vci_ini.wdata = r_sc_to_init_cmd_index.read(); 4792 5805 p_vci_ini.be = 0xF; 4793 if(r_ llsc_to_init_cmd_is_long.read()){5806 if(r_sc_to_init_cmd_is_long.read()){ 4794 5807 p_vci_ini.plen = 4 * 4; 4795 5808 } else { 4796 5809 p_vci_ini.plen = 4 * 3; 4797 5810 } 4798 p_vci_ini.trdid = r_ llsc_to_init_cmd_trdid.read();4799 p_vci_ini.pktid = m_ llsc_to_init_cmd_cache_id_fifo.read();5811 p_vci_ini.trdid = r_sc_to_init_cmd_trdid.read(); 5812 p_vci_ini.pktid = m_sc_to_init_cmd_cache_id_fifo.read(); 4800 5813 p_vci_ini.eop = false; 4801 5814 break; 4802 5815 case INIT_CMD_SC_UPDT_DATA: 4803 5816 p_vci_ini.cmdval = true; 4804 if( m_ llsc_to_init_cmd_inst_fifo.read() ) {4805 p_vci_ini.address = (addr_t)(m_coherence_table[m_ llsc_to_init_cmd_srcid_fifo.read()] + 12);5817 if( m_sc_to_init_cmd_inst_fifo.read() ) { 5818 p_vci_ini.address = (addr_t)(m_coherence_table[m_sc_to_init_cmd_srcid_fifo.read()] + 12); 4806 5819 } else { 4807 p_vci_ini.address = (addr_t)(m_coherence_table[m_ llsc_to_init_cmd_srcid_fifo.read()] + 8);4808 } 4809 p_vci_ini.wdata = r_ llsc_to_init_cmd_wdata.read();5820 p_vci_ini.address = (addr_t)(m_coherence_table[m_sc_to_init_cmd_srcid_fifo.read()] + 8); 5821 } 5822 p_vci_ini.wdata = r_sc_to_init_cmd_wdata.read(); 4810 5823 p_vci_ini.be = 0xF; 4811 p_vci_ini.trdid = r_ llsc_to_init_cmd_trdid.read();4812 p_vci_ini.pktid = m_ llsc_to_init_cmd_cache_id_fifo.read();4813 if(r_ llsc_to_init_cmd_is_long.read()){5824 p_vci_ini.trdid = r_sc_to_init_cmd_trdid.read(); 5825 p_vci_ini.pktid = m_sc_to_init_cmd_cache_id_fifo.read(); 5826 if(r_sc_to_init_cmd_is_long.read()){ 4814 5827 p_vci_ini.plen = 4 * 4; 4815 5828 p_vci_ini.eop = false; … … 4821 5834 case INIT_CMD_SC_UPDT_DATA_HIGH: 4822 5835 p_vci_ini.cmdval = true; 4823 if( m_ llsc_to_init_cmd_inst_fifo.read() ) {4824 p_vci_ini.address = (addr_t)(m_coherence_table[m_ llsc_to_init_cmd_srcid_fifo.read()] + 12);5836 if( m_sc_to_init_cmd_inst_fifo.read() ) { 5837 p_vci_ini.address = (addr_t)(m_coherence_table[m_sc_to_init_cmd_srcid_fifo.read()] + 12); 4825 5838 } else { 4826 p_vci_ini.address = (addr_t)(m_coherence_table[m_ llsc_to_init_cmd_srcid_fifo.read()] + 8);4827 } 4828 p_vci_ini.wdata = r_ llsc_to_init_cmd_wdata_high.read();5839 p_vci_ini.address = (addr_t)(m_coherence_table[m_sc_to_init_cmd_srcid_fifo.read()] + 8); 5840 } 5841 p_vci_ini.wdata = r_sc_to_init_cmd_wdata_high.read(); 4829 5842 p_vci_ini.be = 0xF; 4830 5843 p_vci_ini.plen = 4 * 4; 4831 p_vci_ini.trdid = r_ llsc_to_init_cmd_trdid.read();4832 p_vci_ini.pktid = m_ llsc_to_init_cmd_cache_id_fifo.read();5844 p_vci_ini.trdid = r_sc_to_init_cmd_trdid.read(); 5845 p_vci_ini.pktid = m_sc_to_init_cmd_cache_id_fifo.read(); 4833 5846 p_vci_ini.eop = true; 4834 5847 break; … … 4863 5876 case CLEANUP_RSP: 4864 5877 { 4865 PRINTF(" * <MEM_CACHE.CLEANUP_RSP> Respons to %d.%d\n",(uint32_t)r_cleanup_srcid.read(),(uint32_t)r_cleanup_pktid.read());4866 4867 5878 p_vci_tgt_cleanup.rspval = true; 4868 5879 p_vci_tgt_cleanup.rdata = 0; … … 4877 5888 } 4878 5889 4879 5890 } // end genMoore() 4880 5891 4881 5892 }} // end name space
Note: See TracChangeset
for help on using the changeset viewer.