Changeset 494 for branches/ODCCP/modules/vci_mem_cache/caba/source
- Timestamp:
- Aug 20, 2013, 2:13:08 PM (11 years ago)
- Location:
- branches/ODCCP/modules/vci_mem_cache
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
branches/ODCCP/modules/vci_mem_cache
- Property svn:mergeinfo changed
/trunk/modules/vci_mem_cache merged: 481-483,489,491
- Property svn:mergeinfo changed
-
branches/ODCCP/modules/vci_mem_cache/caba/source/include/mem_cache_directory.h
r460 r494 6 6 #include <cassert> 7 7 #include "arithmetics.h" 8 9 // !!!10 // The L1_MULTI_CACHE mechanism does no longer work with the new pktid encoding11 // of TSAR. Turning the define below to a non null value will cause the memcache12 // to behave in an unpredicted way.13 // TODO Either remove the mechanism from the mem cache or update its behaviour.14 15 #define L1_MULTI_CACHE 016 8 17 9 //#define RANDOM_EVICTION … … 46 38 bool inst; // Is the owner an ICache ? 47 39 size_t srcid; // The SRCID of the owner 48 #if L1_MULTI_CACHE49 size_t cache_id; // In multi_cache configuration50 #endif51 40 52 41 //////////////////////// 53 42 // Constructors 54 43 //////////////////////// 55 Owner(bool i_inst 56 ,size_t i_srcid 57 #if L1_MULTI_CACHE 58 ,size_t i_cache_id 59 #endif 60 ){ 44 Owner(bool i_inst, 45 size_t i_srcid) 46 { 61 47 inst = i_inst; 62 48 srcid = i_srcid; 63 #if L1_MULTI_CACHE 64 cache_id= i_cache_id; 65 #endif 66 } 67 68 Owner(const Owner &a){ 49 } 50 51 Owner(const Owner &a) 52 { 69 53 inst = a.inst; 70 54 srcid = a.srcid; 71 #if L1_MULTI_CACHE 72 cache_id= a.cache_id; 73 #endif 74 } 75 76 Owner(){ 55 } 56 57 Owner() 58 { 77 59 inst = false; 78 60 srcid = 0; 79 #if L1_MULTI_CACHE80 cache_id= 0;81 #endif82 61 } 83 62 // end constructors … … 116 95 owner.inst = 0; 117 96 owner.srcid = 0; 118 #if L1_MULTI_CACHE119 owner.cache_id= 0;120 #endif121 97 ptr = 0; 122 98 } … … 176 152 << " ; Count = " << count 177 153 << " ; Owner = " << owner.srcid 178 #if L1_MULTI_CACHE179 << "." << owner.cache_id180 #endif181 154 << " " << owner.inst 182 155 << " ; Pointer = " << ptr << std::endl; … … 327 300 // - entry : the entry value 328 301 ///////////////////////////////////////////////////////////////////// 329 void write(const size_t &set, const size_t &way, const DirectoryEntry &entry) 302 void write( const size_t &set, 303 const size_t &way, 304 const DirectoryEntry &entry) 330 305 { 331 306 assert( (set<m_sets) … … 373 348 DirectoryEntry select(const size_t &set, size_t &way) 374 349 { 375 assert( (set < m_sets)350 assert( (set < m_sets) 376 351 && "Cache Directory : (select) The set index is invalid"); 377 352 378 for(size_t i=0; i<m_ways; i++){ 379 if(!m_dir_tab[set][i].valid){ 380 way=i; 381 return DirectoryEntry(m_dir_tab[set][way]); 353 // looking for an empty slot 354 for(size_t i=0; i<m_ways; i++) 355 { 356 if( not m_dir_tab[set][i].valid ) 357 { 358 way=i; 359 return DirectoryEntry(m_dir_tab[set][way]); 360 } 382 361 } 383 }384 362 385 363 #ifdef RANDOM_EVICTION 386 lfsr = (lfsr >> 1) ^ ((-(lfsr & 1)) & 0xd0000001);387 way = lfsr % m_ways;388 return DirectoryEntry(m_dir_tab[set][way]);364 lfsr = (lfsr >> 1) ^ ((-(lfsr & 1)) & 0xd0000001); 365 way = lfsr % m_ways; 366 return DirectoryEntry(m_dir_tab[set][way]); 389 367 #endif 390 368 391 for(size_t i=0; i<m_ways; i++){ 392 if(!(m_lru_tab[set][i].recent) && !(m_dir_tab[set][i].lock)){ 393 way=i; 394 return DirectoryEntry(m_dir_tab[set][way]); 369 // looking for a not locked and not recently used entry 370 for(size_t i=0; i<m_ways; i++) 371 { 372 if((not m_lru_tab[set][i].recent) && (not m_dir_tab[set][i].lock) ) 373 { 374 way=i; 375 return DirectoryEntry(m_dir_tab[set][way]); 376 } 395 377 } 396 } 397 for(size_t i=0; i<m_ways; i++){ 398 if( !(m_lru_tab[set][i].recent) && (m_dir_tab[set][i].lock)){ 399 way=i; 400 return DirectoryEntry(m_dir_tab[set][way]); 378 379 // looking for a locked not recently used entry 380 for(size_t i=0; i<m_ways; i++) 381 { 382 if( (not m_lru_tab[set][i].recent) && (m_dir_tab[set][i].lock)) 383 { 384 way=i; 385 return DirectoryEntry(m_dir_tab[set][way]); 386 } 401 387 } 402 } 403 for(size_t i=0; i<m_ways; i++){ 404 if( (m_lru_tab[set][i].recent) && !(m_dir_tab[set][i].lock)){ 405 way=i; 406 return DirectoryEntry(m_dir_tab[set][way]); 388 389 // looking for a recently used entry not locked 390 for(size_t i=0; i<m_ways; i++) 391 { 392 if( (m_lru_tab[set][i].recent) && (not m_dir_tab[set][i].lock)) 393 { 394 way=i; 395 return DirectoryEntry(m_dir_tab[set][way]); 396 } 407 397 } 408 } 409 way = 0; 410 return DirectoryEntry(m_dir_tab[set][0]); 398 399 // select way 0 (even if entry is locked and recently used) 400 way = 0; 401 return DirectoryEntry(m_dir_tab[set][0]); 411 402 } // end select() 412 403 … … 442 433 //////////////////////// 443 434 HeapEntry() 444 :owner(false,0 445 #if L1_MULTI_CACHE 446 ,0 447 #endif 448 ) 435 :owner(false,0) 449 436 { 450 437 next = 0; … … 454 441 // Constructor 455 442 //////////////////////// 456 HeapEntry(const HeapEntry &entry){ 443 HeapEntry(const HeapEntry &entry) 444 { 457 445 owner.inst = entry.owner.inst; 458 446 owner.srcid = entry.owner.srcid; 459 #if L1_MULTI_CACHE460 owner.cache_id = entry.owner.cache_id;461 #endif462 447 next = entry.next; 463 448 } // end constructor … … 466 451 // The copy() function copies an existing source entry to a target 467 452 ///////////////////////////////////////////////////////////////////// 468 void copy(const HeapEntry &entry){ 453 void copy(const HeapEntry &entry) 454 { 469 455 owner.inst = entry.owner.inst; 470 456 owner.srcid = entry.owner.srcid; 471 #if L1_MULTI_CACHE472 owner.cache_id = entry.owner.cache_id;473 #endif474 457 next = entry.next; 475 458 } // end copy() … … 482 465 << " -- owner.inst : " << std::dec << owner.inst << std::endl 483 466 << " -- owner.srcid : " << std::dec << owner.srcid << std::endl 484 #if L1_MULTI_CACHE485 << " -- owner.cache_id : " << std::dec << owner.cache_id << std::endl486 #endif487 467 << " -- next : " << std::dec << next << std::endl; 488 468 … … 645 625 // Cache Data 646 626 //////////////////////////////////////////////////////////////////////// 647 class CacheData { 627 class CacheData 628 { 648 629 private: 649 630 const uint32_t m_sets; … … 655 636 public: 656 637 638 /////////////////////////////////////////////////////// 657 639 CacheData(uint32_t ways, uint32_t sets, uint32_t words) 658 : m_sets(sets), m_ways(ways), m_words(words) {659 640 : m_sets(sets), m_ways(ways), m_words(words) 641 { 660 642 m_cache_data = new uint32_t ** [ways]; 661 for ( size_t i=0 ; i < ways ; i++ ) { 662 m_cache_data[i] = new uint32_t * [sets]; 643 for ( size_t i=0 ; i < ways ; i++ ) 644 { 645 m_cache_data[i] = new uint32_t * [sets]; 663 646 } 664 for ( size_t i=0; i<ways; i++ ) { 665 for ( size_t j=0; j<sets; j++ ) { 666 m_cache_data[i][j] = new uint32_t [words]; 667 } 647 for ( size_t i=0; i<ways; i++ ) 648 { 649 for ( size_t j=0; j<sets; j++ ) 650 { 651 m_cache_data[i][j] = new uint32_t [words]; 652 } 668 653 } 669 } 670 671 ~CacheData() { 672 for(size_t i=0; i<m_ways ; i++){ 673 for(size_t j=0; j<m_sets ; j++){ 654 } 655 //////////// 656 ~CacheData() 657 { 658 for(size_t i=0; i<m_ways ; i++) 659 { 660 for(size_t j=0; j<m_sets ; j++) 661 { 674 662 delete [] m_cache_data[i][j]; 675 663 } 676 664 } 677 for(size_t i=0; i<m_ways ; i++){ 665 for(size_t i=0; i<m_ways ; i++) 666 { 678 667 delete [] m_cache_data[i]; 679 668 } 680 669 delete [] m_cache_data; 681 670 } 682 683 uint32_t read ( 684 const uint32_t &way, 685 const uint32_t &set, 686 const uint32_t &word) const { 687 688 assert((set < m_sets ) && "Cache data error: Trying to read a wrong set" ); 689 assert((way < m_ways ) && "Cache data error: Trying to read a wrong way" ); 690 assert((word < m_words) && "Cache data error: Trying to read a wrong word"); 691 692 return m_cache_data[way][set][word]; 693 } 694 695 void read_line( 696 const uint32_t &way, 697 const uint32_t &set, 698 sc_core::sc_signal<uint32_t> * cache_line) 699 { 700 assert((set < m_sets ) && "Cache data error: Trying to read a wrong set" ); 701 assert((way < m_ways ) && "Cache data error: Trying to read a wrong way" ); 702 703 for (uint32_t word=0; word<m_words; word++) 704 cache_line[word].write(m_cache_data[way][set][word]); 705 } 706 707 void write ( 708 const uint32_t &way, 709 const uint32_t &set, 710 const uint32_t &word, 711 const uint32_t &data, 712 const uint32_t &be = 0xF) { 713 714 assert((set < m_sets ) && "Cache data error: Trying to write a wrong set" ); 715 assert((way < m_ways ) && "Cache data error: Trying to write a wrong way" ); 716 assert((word < m_words) && "Cache data error: Trying to write a wrong word"); 717 assert((be <= 0xF ) && "Cache data error: Trying to write a wrong word cell"); 718 719 if (be == 0x0) return; 720 721 if (be == 0xF) { 722 m_cache_data[way][set][word] = data; 723 return; 724 } 725 726 uint32_t mask = 0; 727 if (be & 0x1) mask = mask | 0x000000FF; 728 if (be & 0x2) mask = mask | 0x0000FF00; 729 if (be & 0x4) mask = mask | 0x00FF0000; 730 if (be & 0x8) mask = mask | 0xFF000000; 731 732 m_cache_data[way][set][word] = 733 (data & mask) | (m_cache_data[way][set][word] & ~mask); 671 ////////////////////////////////////////// 672 uint32_t read ( const uint32_t &way, 673 const uint32_t &set, 674 const uint32_t &word) const 675 { 676 assert((set < m_sets ) && "Cache data error: Trying to read a wrong set" ); 677 assert((way < m_ways ) && "Cache data error: Trying to read a wrong way" ); 678 assert((word < m_words) && "Cache data error: Trying to read a wrong word"); 679 680 return m_cache_data[way][set][word]; 681 } 682 ////////////////////////////////////////// 683 void read_line( const uint32_t &way, 684 const uint32_t &set, 685 sc_core::sc_signal<uint32_t> * cache_line) 686 { 687 assert((set < m_sets ) && "Cache data error: Trying to read a wrong set" ); 688 assert((way < m_ways ) && "Cache data error: Trying to read a wrong way" ); 689 690 for (uint32_t word=0; word<m_words; word++) 691 cache_line[word].write(m_cache_data[way][set][word]); 692 } 693 ///////////////////////////////////////// 694 void write ( const uint32_t &way, 695 const uint32_t &set, 696 const uint32_t &word, 697 const uint32_t &data, 698 const uint32_t &be = 0xF) 699 { 700 701 assert((set < m_sets ) && "Cache data error: Trying to write a wrong set" ); 702 assert((way < m_ways ) && "Cache data error: Trying to write a wrong way" ); 703 assert((word < m_words) && "Cache data error: Trying to write a wrong word"); 704 assert((be <= 0xF ) && "Cache data error: Trying to write a wrong be"); 705 706 if (be == 0x0) return; 707 708 if (be == 0xF) 709 { 710 m_cache_data[way][set][word] = data; 711 return; 712 } 713 714 uint32_t mask = 0; 715 if (be & 0x1) mask = mask | 0x000000FF; 716 if (be & 0x2) mask = mask | 0x0000FF00; 717 if (be & 0x4) mask = mask | 0x00FF0000; 718 if (be & 0x8) mask = mask | 0xFF000000; 719 720 m_cache_data[way][set][word] = 721 (data & mask) | (m_cache_data[way][set][word] & ~mask); 734 722 } 735 723 }; // end class CacheData -
branches/ODCCP/modules/vci_mem_cache/caba/source/include/vci_mem_cache.h
r479 r494 25 25 * SOCLIB_LGPL_HEADER_END 26 26 * 27 * Maintainers: alain eric.guthmuller@polytechnique.edu 27 * Maintainers: alain.greiner@lip6.fr 28 * eric.guthmuller@polytechnique.edu 28 29 * cesar.fuguet-tortolero@lip6.fr 29 30 * alexandre.joannou@lip6.fr … … 150 151 MULTI_ACK_UPT_LOCK, 151 152 MULTI_ACK_UPT_CLEAR, 152 MULTI_ACK_WRITE_RSP, 153 MULTI_ACK_CONFIG_ACK 153 MULTI_ACK_WRITE_RSP 154 154 }; 155 155 … … 159 159 CONFIG_IDLE, 160 160 CONFIG_LOOP, 161 CONFIG_WAIT, 161 162 CONFIG_RSP, 162 163 CONFIG_DIR_REQ, 163 164 CONFIG_DIR_ACCESS, 164 CONFIG_ DIR_IVT_LOCK,165 CONFIG_IVT_LOCK, 165 166 CONFIG_BC_SEND, 166 CONFIG_BC_WAIT, 167 CONFIG_INV_SEND, 167 CONFIG_INVAL_SEND, 168 168 CONFIG_HEAP_REQ, 169 169 CONFIG_HEAP_SCAN, 170 170 CONFIG_HEAP_LAST, 171 CONFIG_INV_WAIT 171 CONFIG_TRT_LOCK, 172 CONFIG_TRT_SET, 173 CONFIG_PUT_REQ 172 174 }; 173 175 … … 197 199 WRITE_DIR_REQ, 198 200 WRITE_DIR_LOCK, 199 WRITE_DIR_READ,200 201 WRITE_DIR_HIT, 201 202 WRITE_UPT_LOCK, … … 209 210 WRITE_MISS_TRT_SET, 210 211 WRITE_MISS_XRAM_REQ, 212 WRITE_BC_DIR_READ, 211 213 WRITE_BC_TRT_LOCK, 212 214 WRITE_BC_IVT_LOCK, … … 235 237 XRAM_RSP_DIR_UPDT, 236 238 XRAM_RSP_DIR_RSP, 237 XRAM_RSP_I NVAL_LOCK,239 XRAM_RSP_IVT_LOCK, 238 240 XRAM_RSP_INVAL_WAIT, 239 241 XRAM_RSP_INVAL, … … 254 256 IXR_CMD_XRAM_IDLE, 255 257 IXR_CMD_CLEANUP_IDLE, 256 IXR_CMD_TRT_LOCK, 257 IXR_CMD_READ, 258 IXR_CMD_WRITE, 259 IXR_CMD_CAS, 260 IXR_CMD_XRAM, 261 IXR_CMD_CLEANUP_DATA 258 IXR_CMD_CONFIG_IDLE, 259 IXR_CMD_READ_TRT, 260 IXR_CMD_WRITE_TRT, 261 IXR_CMD_CAS_TRT, 262 IXR_CMD_XRAM_TRT, 263 IXR_CMD_CLEANUP_TRT, 264 IXR_CMD_CONFIG_TRT, 265 IXR_CMD_READ_SEND, 266 IXR_CMD_WRITE_SEND, 267 IXR_CMD_CAS_SEND, 268 IXR_CMD_XRAM_SEND, 269 IXR_CMD_CLEANUP_DATA_SEND, 270 IXR_CMD_CONFIG_SEND 262 271 }; 263 272 … … 306 315 CLEANUP_IVT_CLEAR, 307 316 CLEANUP_WRITE_RSP, 308 CLEANUP_CONFIG_ACK,309 317 CLEANUP_IXR_REQ, 310 318 CLEANUP_WAIT, … … 333 341 ALLOC_TRT_IXR_RSP, 334 342 ALLOC_TRT_CLEANUP, 335 ALLOC_TRT_IXR_CMD 343 ALLOC_TRT_IXR_CMD, 344 ALLOC_TRT_CONFIG 336 345 }; 337 346 … … 394 403 }; 395 404 396 /* Configuration commands */ 397 enum cmd_config_type_e 398 { 399 CMD_CONFIG_INVAL = 0, 400 CMD_CONFIG_SYNC = 1 401 }; 402 403 // debug variables (for each FSM) 405 // debug variables 404 406 bool m_debug; 405 407 bool m_debug_previous_valid; 406 408 size_t m_debug_previous_count; 407 409 bool m_debug_previous_dirty; 408 sc_signal<data_t>* m_debug_previous_data; 409 sc_signal<data_t>* m_debug_data; 410 411 bool m_monitor_ok; 412 addr_t m_monitor_base; 413 addr_t m_monitor_length; 410 data_t * m_debug_previous_data; 411 data_t * m_debug_data; 414 412 415 413 // instrumentation counters … … 619 617 uint32_t m_broadcast_boundaries; 620 618 621 //////////////////////////////////////////////////622 // Registers controlled by the TGT_CMD fsm623 //////////////////////////////////////////////////624 625 sc_signal<int> r_tgt_cmd_fsm;626 627 619 // Fifo between TGT_CMD fsm and READ fsm 628 620 GenericFifo<addr_t> m_cmd_read_addr_fifo; … … 668 660 sc_signal<size_t> r_tgt_cmd_config_cmd; 669 661 662 ////////////////////////////////////////////////// 663 // Registers controlled by the TGT_CMD fsm 664 ////////////////////////////////////////////////// 665 666 sc_signal<int> r_tgt_cmd_fsm; 667 sc_signal<size_t> r_tgt_cmd_srcid; // srcid for response to config 668 sc_signal<size_t> r_tgt_cmd_trdid; // trdid for response to config 669 sc_signal<size_t> r_tgt_cmd_pktid; // pktid for response to config 670 670 671 /////////////////////////////////////////////////////// 671 672 // Registers controlled by the CONFIG fsm 672 673 /////////////////////////////////////////////////////// 673 674 674 sc_signal<int> r_config_fsm; // FSM state 675 sc_signal<bool> r_config_lock; // lock protecting exclusive access 676 sc_signal<int> r_config_cmd; // config request status 677 sc_signal<addr_t> r_config_address; // target buffer physical address 678 sc_signal<size_t> r_config_srcid; // config request srcid 679 sc_signal<size_t> r_config_trdid; // config request trdid 680 sc_signal<size_t> r_config_pktid; // config request pktid 681 sc_signal<size_t> r_config_nlines; // number of lines covering the buffer 682 sc_signal<size_t> r_config_dir_way; // DIR: selected way 683 sc_signal<size_t> r_config_dir_count; // DIR: number of copies 684 sc_signal<bool> r_config_dir_is_cnt; // DIR: counter mode (broadcast required) 685 sc_signal<size_t> r_config_dir_copy_srcid; // DIR: first copy SRCID 686 sc_signal<bool> r_config_dir_copy_inst; // DIR: first copy L1 type 687 sc_signal<size_t> r_config_dir_next_ptr; // DIR: index of next copy in HEAP 688 sc_signal<size_t> r_config_heap_next; // current pointer to scan HEAP 689 690 sc_signal<size_t> r_config_ivt_index; // IVT index 675 sc_signal<int> r_config_fsm; // FSM state 676 sc_signal<bool> r_config_lock; // lock protecting exclusive access 677 sc_signal<int> r_config_cmd; // config request type 678 sc_signal<addr_t> r_config_address; // target buffer physical address 679 sc_signal<size_t> r_config_srcid; // config request srcid 680 sc_signal<size_t> r_config_trdid; // config request trdid 681 sc_signal<size_t> r_config_pktid; // config request pktid 682 sc_signal<size_t> r_config_cmd_lines; // number of lines to be handled 683 sc_signal<size_t> r_config_rsp_lines; // number of lines not completed 684 sc_signal<size_t> r_config_dir_way; // DIR: selected way 685 sc_signal<bool> r_config_dir_lock; // DIR: locked entry 686 sc_signal<size_t> r_config_dir_count; // DIR: number of copies 687 sc_signal<bool> r_config_dir_is_cnt; // DIR: counter mode (broadcast) 688 sc_signal<size_t> r_config_dir_copy_srcid; // DIR: first copy SRCID 689 sc_signal<bool> r_config_dir_copy_inst; // DIR: first copy L1 type 690 sc_signal<size_t> r_config_dir_ptr; // DIR: index of next copy in HEAP 691 sc_signal<size_t> r_config_heap_next; // current pointer to scan HEAP 692 sc_signal<size_t> r_config_trt_index; // selected entry in TRT 693 sc_signal<size_t> r_config_ivt_index; // selected entry in IVT 694 695 // Buffer between CONFIG fsm and IXR_CMD fsm 696 sc_signal<bool> r_config_to_ixr_cmd_req; // valid request 697 sc_signal<size_t> r_config_to_ixr_cmd_index; // TRT index 698 691 699 692 700 // Buffer between CONFIG fsm and TGT_RSP fsm (send a done response to L1 cache) … … 705 713 GenericFifo<size_t> m_config_to_cc_send_srcid_fifo; // fifo for owners srcid 706 714 707 #if L1_MULTI_CACHE708 GenericFifo<size_t> m_config_to_cc_send_cache_id_fifo; // fifo for cache_id709 #endif710 711 715 /////////////////////////////////////////////////////// 712 716 // Registers controlled by the READ fsm 713 717 /////////////////////////////////////////////////////// 714 718 715 sc_signal<int> r_read_fsm; // FSM state 716 sc_signal<size_t> r_read_copy; // Srcid of the first copy 717 sc_signal<size_t> r_read_copy_cache; // Srcid of the first copy 718 sc_signal<bool> r_read_copy_inst; // Type of the first copy 719 sc_signal<tag_t> r_read_tag; // cache line tag (in directory) 720 sc_signal<bool> r_read_is_cnt; // is_cnt bit (in directory) 721 sc_signal<bool> r_read_lock; // lock bit (in directory) 722 sc_signal<bool> r_read_dirty; // dirty bit (in directory) 723 sc_signal<size_t> r_read_count; // number of copies 724 sc_signal<size_t> r_read_ptr; // pointer to the heap 725 sc_signal<data_t> * r_read_data; // data (one cache line) 726 sc_signal<size_t> r_read_way; // associative way (in cache) 727 sc_signal<size_t> r_read_trt_index; // Transaction Table index 728 sc_signal<size_t> r_read_next_ptr; // Next entry to point to 729 sc_signal<bool> r_read_last_free; // Last free entry 730 sc_signal<addr_t> r_read_ll_key; // LL key from the llsc_global_table 731 732 // Buffer between READ fsm and IXR_CMD fsm (ask a missing cache line to XRAM) 733 sc_signal<bool> r_read_to_ixr_cmd_req; // valid request 734 sc_signal<addr_t> r_read_to_ixr_cmd_nline; // cache line index 735 sc_signal<size_t> r_read_to_ixr_cmd_trdid; // index in Transaction Table 719 sc_signal<int> r_read_fsm; // FSM state 720 sc_signal<size_t> r_read_copy; // Srcid of the first copy 721 sc_signal<size_t> r_read_copy_cache; // Srcid of the first copy 722 sc_signal<bool> r_read_copy_inst; // Type of the first copy 723 sc_signal<tag_t> r_read_tag; // cache line tag (in directory) 724 sc_signal<bool> r_read_is_cnt; // is_cnt bit (in directory) 725 sc_signal<bool> r_read_lock; // lock bit (in directory) 726 sc_signal<bool> r_read_dirty; // dirty bit (in directory) 727 sc_signal<size_t> r_read_count; // number of copies 728 sc_signal<size_t> r_read_ptr; // pointer to the heap 729 sc_signal<data_t> * r_read_data; // data (one cache line) 730 sc_signal<size_t> r_read_way; // associative way (in cache) 731 sc_signal<size_t> r_read_trt_index; // Transaction Table index 732 sc_signal<size_t> r_read_next_ptr; // Next entry to point to 733 sc_signal<bool> r_read_last_free; // Last free entry 734 sc_signal<addr_t> r_read_ll_key; // LL key from llsc_global_table 735 736 // Buffer between READ fsm and IXR_CMD fsm 737 sc_signal<bool> r_read_to_ixr_cmd_req; // valid request 738 sc_signal<size_t> r_read_to_ixr_cmd_index; // TRT index 736 739 737 740 // Buffer between READ fsm and TGT_RSP fsm (send a hit read response to L1 cache) 738 sc_signal<bool> r_read_to_tgt_rsp_req; // valid request739 sc_signal<size_t> r_read_to_tgt_rsp_srcid; // Transaction srcid740 sc_signal<size_t> r_read_to_tgt_rsp_trdid; // Transaction trdid741 sc_signal<size_t> r_read_to_tgt_rsp_pktid; // Transaction pktid742 sc_signal<data_t> * r_read_to_tgt_rsp_data; // data (one cache line)743 sc_signal<size_t> r_read_to_tgt_rsp_word; // first word of the response744 sc_signal<size_t> r_read_to_tgt_rsp_length; // length of the response745 sc_signal<addr_t> r_read_to_tgt_rsp_ll_key; // LL key from thellsc_global_table741 sc_signal<bool> r_read_to_tgt_rsp_req; // valid request 742 sc_signal<size_t> r_read_to_tgt_rsp_srcid; // Transaction srcid 743 sc_signal<size_t> r_read_to_tgt_rsp_trdid; // Transaction trdid 744 sc_signal<size_t> r_read_to_tgt_rsp_pktid; // Transaction pktid 745 sc_signal<data_t> * r_read_to_tgt_rsp_data; // data (one cache line) 746 sc_signal<size_t> r_read_to_tgt_rsp_word; // first word of the response 747 sc_signal<size_t> r_read_to_tgt_rsp_length; // length of the response 748 sc_signal<addr_t> r_read_to_tgt_rsp_ll_key; // LL key from llsc_global_table 746 749 747 750 /////////////////////////////////////////////////////////////// … … 749 752 /////////////////////////////////////////////////////////////// 750 753 751 sc_signal<int> r_write_fsm; // FSM state752 sc_signal<addr_t> r_write_address; // first word address753 sc_signal<size_t> r_write_word_index; // first word index in line754 sc_signal<size_t> r_write_word_count; // number of words in line755 sc_signal<size_t> r_write_srcid; // transaction srcid756 sc_signal<size_t> r_write_trdid; // transaction trdid757 sc_signal<size_t> r_write_pktid; // transaction pktid758 sc_signal<data_t> * r_write_data; // data (one cache line)759 sc_signal<be_t> * r_write_be; // one byte enable per word760 sc_signal<bool> r_write_byte; // (BE != 0X0) and (BE != 0xF)761 sc_signal<bool> r_write_is_cnt; // is_cnt bit (in directory)762 sc_signal<bool> r_write_lock; // lock bit (in directory)763 sc_signal<tag_t> r_write_tag; // cache line tag (in directory)764 sc_signal<size_t> r_write_copy; // first owner of the line765 sc_signal<size_t> r_write_copy_cache; // first owner of the line766 sc_signal<bool> r_write_copy_inst; // is this owner a ICache ?767 sc_signal<size_t> r_write_count; // number of copies768 sc_signal<size_t> r_write_ptr; // pointer to the heap769 sc_signal<size_t> r_write_next_ptr; // next pointer to the heap770 sc_signal<bool> r_write_to_dec; // need to decrement update counter771 sc_signal<size_t> r_write_way; // way of the line772 sc_signal<size_t> r_write_trt_index; // index in Transaction Table773 sc_signal<size_t> r_write_upt_index; // index in Update Table774 sc_signal<bool> r_write_sc_fail; // sc command failed775 sc_signal<bool> r_write_pending_sc; // sc command pending754 sc_signal<int> r_write_fsm; // FSM state 755 sc_signal<addr_t> r_write_address; // first word address 756 sc_signal<size_t> r_write_word_index; // first word index in line 757 sc_signal<size_t> r_write_word_count; // number of words in line 758 sc_signal<size_t> r_write_srcid; // transaction srcid 759 sc_signal<size_t> r_write_trdid; // transaction trdid 760 sc_signal<size_t> r_write_pktid; // transaction pktid 761 sc_signal<data_t> * r_write_data; // data (one cache line) 762 sc_signal<be_t> * r_write_be; // one byte enable per word 763 sc_signal<bool> r_write_byte; // (BE != 0X0) and (BE != 0xF) 764 sc_signal<bool> r_write_is_cnt; // is_cnt bit (in directory) 765 sc_signal<bool> r_write_lock; // lock bit (in directory) 766 sc_signal<tag_t> r_write_tag; // cache line tag (in directory) 767 sc_signal<size_t> r_write_copy; // first owner of the line 768 sc_signal<size_t> r_write_copy_cache; // first owner of the line 769 sc_signal<bool> r_write_copy_inst; // is this owner a ICache ? 770 sc_signal<size_t> r_write_count; // number of copies 771 sc_signal<size_t> r_write_ptr; // pointer to the heap 772 sc_signal<size_t> r_write_next_ptr; // next pointer to the heap 773 sc_signal<bool> r_write_to_dec; // need to decrement update counter 774 sc_signal<size_t> r_write_way; // way of the line 775 sc_signal<size_t> r_write_trt_index; // index in Transaction Table 776 sc_signal<size_t> r_write_upt_index; // index in Update Table 777 sc_signal<bool> r_write_sc_fail; // sc command failed 778 sc_signal<bool> r_write_pending_sc; // sc command pending 776 779 777 780 // Buffer between WRITE fsm and TGT_RSP fsm (acknowledge a write command from L1) … … 782 785 sc_signal<bool> r_write_to_tgt_rsp_sc_fail; // sc command failed 783 786 784 // Buffer between WRITE fsm and IXR_CMD fsm (ask a missing cache line to XRAM) 785 sc_signal<bool> r_write_to_ixr_cmd_req; // valid request 786 sc_signal<bool> r_write_to_ixr_cmd_write; // write request 787 sc_signal<addr_t> r_write_to_ixr_cmd_nline; // cache line index 788 sc_signal<data_t> * r_write_to_ixr_cmd_data; // cache line data 789 sc_signal<size_t> r_write_to_ixr_cmd_trdid; // index in Transaction Table 787 // Buffer between WRITE fsm and IXR_CMD fsm 788 sc_signal<bool> r_write_to_ixr_cmd_req; // valid request 789 sc_signal<bool> r_write_to_ixr_cmd_put; // request type (GET/PUT) 790 sc_signal<size_t> r_write_to_ixr_cmd_index; // TRT index 790 791 791 792 // Buffer between WRITE fsm and CC_SEND fsm (Update/Invalidate L1 caches) … … 801 802 GenericFifo<size_t> m_write_to_cc_send_srcid_fifo; // fifo for srcids 802 803 803 #if L1_MULTI_CACHE804 GenericFifo<size_t> m_write_to_cc_send_cache_id_fifo; // fifo for srcids805 #endif806 807 804 // Buffer between WRITE fsm and MULTI_ACK fsm (Decrement UPT entry) 808 805 sc_signal<bool> r_write_to_multi_ack_req; // valid request … … 820 817 sc_signal<addr_t> r_multi_ack_nline; // pending write nline 821 818 822 // signaling completion of multi-inval to CONFIG fsm823 sc_signal<bool> r_multi_ack_to_config_ack;824 825 819 // Buffer between MULTI_ACK fsm and TGT_RSP fsm (complete write/update transaction) 826 820 sc_signal<bool> r_multi_ack_to_tgt_rsp_req; // valid request … … 839 833 sc_signal<addr_t> r_cleanup_nline; // cache line index 840 834 841 #if L1_MULTI_CACHE842 sc_signal<size_t> r_cleanup_pktid; // transaction pktid843 #endif844 835 845 836 sc_signal<copy_t> r_cleanup_copy; // first copy … … 868 859 sc_signal<size_t> r_cleanup_index; // index of the INVAL line (in the UPT) 869 860 870 // signaling completion of broadcast-inval to CONFIG fsm871 sc_signal<bool> r_cleanup_to_config_ack;872 873 861 // Buffer between CLEANUP fsm and TGT_RSP fsm (acknowledge a write command from L1) 874 862 sc_signal<bool> r_cleanup_to_tgt_rsp_req; // valid request … … 881 869 /////////////////////////////////////////////////////// 882 870 883 sc_signal<int> r_cas_fsm; // FSM state884 sc_signal<data_t> r_cas_wdata; // write data word885 sc_signal<data_t> * r_cas_rdata; // read data word886 sc_signal<uint32_t> r_cas_lfsr; // lfsr for random introducing887 sc_signal<size_t> r_cas_cpt; // size of command888 sc_signal<copy_t> r_cas_copy; // Srcid of the first copy889 sc_signal<copy_t> r_cas_copy_cache; // Srcid of the first copy890 sc_signal<bool> r_cas_copy_inst; // Type of the first copy891 sc_signal<size_t> r_cas_count; // number of copies892 sc_signal<size_t> r_cas_ptr; // pointer to the heap893 sc_signal<size_t> r_cas_next_ptr; // next pointer to the heap894 sc_signal<bool> r_cas_is_cnt; // is_cnt bit (in directory)895 sc_signal<bool> r_cas_dirty; // dirty bit (in directory)896 sc_signal<size_t> r_cas_way; // way in directory897 sc_signal<size_t> r_cas_set; // set in directory898 sc_signal<data_t> r_cas_tag; // cache line tag (in directory)899 sc_signal<size_t> r_cas_trt_index; // Transaction Table index900 sc_signal<size_t> r_cas_upt_index; // Update Table index901 sc_signal<data_t> * r_cas_data; // cache line data902 903 // Buffer between CAS fsm and IXR_CMD fsm (XRAM write)871 sc_signal<int> r_cas_fsm; // FSM state 872 sc_signal<data_t> r_cas_wdata; // write data word 873 sc_signal<data_t> * r_cas_rdata; // read data word 874 sc_signal<uint32_t> r_cas_lfsr; // lfsr for random introducing 875 sc_signal<size_t> r_cas_cpt; // size of command 876 sc_signal<copy_t> r_cas_copy; // Srcid of the first copy 877 sc_signal<copy_t> r_cas_copy_cache; // Srcid of the first copy 878 sc_signal<bool> r_cas_copy_inst; // Type of the first copy 879 sc_signal<size_t> r_cas_count; // number of copies 880 sc_signal<size_t> r_cas_ptr; // pointer to the heap 881 sc_signal<size_t> r_cas_next_ptr; // next pointer to the heap 882 sc_signal<bool> r_cas_is_cnt; // is_cnt bit (in directory) 883 sc_signal<bool> r_cas_dirty; // dirty bit (in directory) 884 sc_signal<size_t> r_cas_way; // way in directory 885 sc_signal<size_t> r_cas_set; // set in directory 886 sc_signal<data_t> r_cas_tag; // cache line tag (in directory) 887 sc_signal<size_t> r_cas_trt_index; // Transaction Table index 888 sc_signal<size_t> r_cas_upt_index; // Update Table index 889 sc_signal<data_t> * r_cas_data; // cache line data 890 891 // Buffer between CAS fsm and IXR_CMD fsm 904 892 sc_signal<bool> r_cas_to_ixr_cmd_req; // valid request 905 sc_signal<addr_t> r_cas_to_ixr_cmd_nline; // cache line index 906 sc_signal<size_t> r_cas_to_ixr_cmd_trdid; // index in Transaction Table 907 sc_signal<bool> r_cas_to_ixr_cmd_write; // write request 908 sc_signal<data_t> * r_cas_to_ixr_cmd_data; // cache line data 909 893 sc_signal<bool> r_cas_to_ixr_cmd_put; // request type (GET/PUT) 894 sc_signal<size_t> r_cas_to_ixr_cmd_index; // TRT index 910 895 911 896 // Buffer between CAS fsm and TGT_RSP fsm … … 928 913 GenericFifo<size_t> m_cas_to_cc_send_srcid_fifo; // fifo for srcids 929 914 930 #if L1_MULTI_CACHE931 GenericFifo<size_t> m_cas_to_cc_send_cache_id_fifo; // fifo for srcids932 #endif933 934 915 //////////////////////////////////////////////////// 935 916 // Registers controlled by the IXR_RSP fsm 936 917 //////////////////////////////////////////////////// 937 918 938 sc_signal<int> r_ixr_rsp_fsm; // FSM state 939 sc_signal<size_t> r_ixr_rsp_trt_index; // TRT entry index 940 sc_signal<size_t> r_ixr_rsp_cpt; // word counter 919 sc_signal<int> r_ixr_rsp_fsm; // FSM state 920 sc_signal<size_t> r_ixr_rsp_trt_index; // TRT entry index 921 sc_signal<size_t> r_ixr_rsp_cpt; // word counter 922 923 // Buffer between IXR_RSP fsm and CONFIG fsm (response from the XRAM) 924 sc_signal<bool> r_ixr_rsp_to_config_ack; // one single bit 941 925 942 926 // Buffer between IXR_RSP fsm and XRAM_RSP fsm (response from the XRAM) 943 sc_signal<bool> * r_ixr_rsp_to_xram_rsp_rok; // A xram response is ready927 sc_signal<bool> * r_ixr_rsp_to_xram_rsp_rok; // one bit per TRT entry 944 928 sc_signal<bool> * r_ixr_rsp_to_xram_rsp_no_coherent; // A xram response is ready and no coherent (ODCCP) 945 929 … … 986 970 GenericFifo<size_t> m_xram_rsp_to_cc_send_srcid_fifo; // fifo for srcids 987 971 988 #if L1_MULTI_CACHE 989 GenericFifo<size_t> m_xram_rsp_to_cc_send_cache_id_fifo; // fifo for srcids 990 #endif 991 992 // Buffer between XRAM_RSP fsm and IXR_CMD fsm (XRAM write) 972 // Buffer between XRAM_RSP fsm and IXR_CMD fsm 993 973 sc_signal<bool> r_xram_rsp_to_ixr_cmd_req; // Valid request 994 sc_signal<addr_t> r_xram_rsp_to_ixr_cmd_nline; // cache line index 995 sc_signal<data_t> * r_xram_rsp_to_ixr_cmd_data; // cache line data 996 sc_signal<size_t> r_xram_rsp_to_ixr_cmd_trdid; // index in transaction table 974 sc_signal<size_t> r_xram_rsp_to_ixr_cmd_index; // TRT index 997 975 998 976 //////////////////////////////////////////////////// … … 1001 979 1002 980 sc_signal<int> r_ixr_cmd_fsm; 1003 sc_signal<size_t> r_ixr_cmd_cpt; 981 sc_signal<size_t> r_ixr_cmd_word; // word index for a put 982 sc_signal<size_t> r_ixr_cmd_trdid; // TRT index value 983 sc_signal<addr_t> r_ixr_cmd_address; // address to XRAM 984 sc_signal<data_t> * r_ixr_cmd_wdata; // cache line buffer 985 sc_signal<bool> r_ixr_cmd_get; // transaction type (PUT/GET) 1004 986 1005 987 //////////////////////////////////////////////////// … … 1076 1058 sc_signal<uint32_t> r_cleanup_to_ixr_cmd_srcid; 1077 1059 sc_signal<bool> r_cleanup_to_ixr_cmd_l1_dirty_ncc; // this cleanup was dirty in L1 1078 sc_signal<uint32_t> r_cleanup_to_ixr_cmd_ trdid;1060 sc_signal<uint32_t> r_cleanup_to_ixr_cmd_index; 1079 1061 sc_signal<uint32_t> r_cleanup_to_ixr_cmd_pktid; 1080 1062 sc_signal<addr_t> r_cleanup_to_ixr_cmd_nline; -
branches/ODCCP/modules/vci_mem_cache/caba/source/include/xram_transaction.h
r460 r494 34 34 bool rerror; // error returned by xram 35 35 data_t ll_key; // LL key returned by the llsc_global_table 36 bool config; // transaction required by CONFIG FSM 36 37 37 38 ///////////////////////////////////////////////////////////////////// … … 42 43 valid = false; 43 44 rerror = false; 45 config = false; 44 46 } 45 47 … … 80 82 rerror = source.rerror; 81 83 ll_key = source.ll_key; 84 config = source.config; 82 85 } 83 86 … … 87 90 void print() 88 91 { 92 std::cout << "------- TRT entry -------" << std::endl; 89 93 std::cout << "valid = " << valid << std::endl; 90 94 std::cout << "xram_read = " << xram_read << std::endl; … … 96 100 std::cout << "read_length = " << read_length << std::endl; 97 101 std::cout << "word_index = " << word_index << std::endl; 98 for(size_t i=0; i<wdata_be.size() ; i++){ 99 std::cout << "wdata_be [" << i <<"] = " << wdata_be[i] << std::endl; 100 } 101 for(size_t i=0; i<wdata.size() ; i++){ 102 std::cout << "wdata [" << i <<"] = " << wdata[i] << std::endl; 103 } 102 for(size_t i=0; i<wdata_be.size() ; i++) 103 { 104 std::cout << "wdata_be[" << std::dec << i << "] = " 105 << std::hex << wdata_be[i] << std::endl; 106 } 107 for(size_t i=0; i<wdata.size() ; i++) 108 { 109 std::cout << "wdata[" << std::dec << i << "] = " 110 << std::hex << wdata[i] << std::endl; 111 } 112 std::cout << "rerror = " << rerror << std::endl; 113 std::cout << "ll_key = " << ll_key << std::endl; 114 std::cout << "config = " << config << std::endl; 104 115 std::cout << std::endl; 105 std::cout << "rerror = " << rerror << std::endl;106 116 } 107 117 … … 114 124 wdata_be.clear(); 115 125 wdata.clear(); 116 valid=false; 117 rerror=false; 118 } 119 120 TransactionTabEntry(const TransactionTabEntry &source){ 126 valid = false; 127 rerror = false; 128 config = false; 129 } 130 131 TransactionTabEntry(const TransactionTabEntry &source) 132 { 121 133 valid = source.valid; 122 134 xram_read = source.xram_read; … … 132 144 rerror = source.rerror; 133 145 ll_key = source.ll_key; 146 config = source.config; 134 147 } 135 148 … … 197 210 delete [] tab; 198 211 } 199 200 212 ///////////////////////////////////////////////////////////////////// 201 213 // The size() function returns the size of the tab … … 205 217 return size_tab; 206 218 } 207 208 219 ///////////////////////////////////////////////////////////////////// 209 220 // The init() function initializes the transaction tab entries … … 211 222 void init() 212 223 { 213 for ( size_t i=0; i<size_tab; i++) { 224 for ( size_t i=0; i<size_tab; i++) 225 { 214 226 tab[i].init(); 215 227 } 216 228 } 217 218 229 ///////////////////////////////////////////////////////////////////// 219 230 // The print() function prints a transaction tab entry … … 223 234 void print(const size_t index) 224 235 { 225 assert( (index < size_tab) 226 && "Invalid Transaction Tab Entry"); 236 assert( (index < size_tab) and 237 "MEMC ERROR: The selected entry is out of range in TRT write_data_mask()"); 238 227 239 tab[index].print(); 228 240 return; 229 241 } 230 231 242 ///////////////////////////////////////////////////////////////////// 232 243 // The read() function returns a transaction tab entry. … … 236 247 TransactionTabEntry read(const size_t index) 237 248 { 238 assert( (index < size_tab) 239 && "Invalid Transaction Tab Entry"); 249 assert( (index < size_tab) and 250 "MEMC ERROR: Invalid Transaction Tab Entry"); 251 240 252 return tab[index]; 241 253 } 242 243 254 ///////////////////////////////////////////////////////////////////// 244 255 // The full() function returns the state of the transaction tab … … 249 260 bool full(size_t &index) 250 261 { 251 for(size_t i=0; i<size_tab; i++){ 252 if(!tab[i].valid){ 262 for(size_t i=0; i<size_tab; i++) 263 { 264 if(!tab[i].valid) 265 { 253 266 index=i; 254 267 return false; … … 257 270 return true; 258 271 } 259 260 272 ///////////////////////////////////////////////////////////////////// 261 273 // The hit_read() function checks if an XRAM read transaction exists … … 268 280 bool hit_read(const addr_t nline,size_t &index) 269 281 { 270 for(size_t i=0; i<size_tab; i++){ 271 if((tab[i].valid && (nline==tab[i].nline)) && (tab[i].xram_read)) { 282 for(size_t i=0; i<size_tab; i++) 283 { 284 if((tab[i].valid && (nline==tab[i].nline)) && (tab[i].xram_read)) 285 { 272 286 index=i; 273 287 return true; … … 276 290 return false; 277 291 } 278 279 292 /////////////////////////////////////////////////////////////////////// 280 293 // The hit_write() function looks if an XRAM write transaction exists … … 286 299 bool hit_write(const addr_t nline) 287 300 { 288 for(size_t i=0; i<size_tab; i++){ 289 if(tab[i].valid && (nline==tab[i].nline) && !(tab[i].xram_read)) { 301 for(size_t i=0; i<size_tab; i++) 302 { 303 if(tab[i].valid && (nline==tab[i].nline) && !(tab[i].xram_read)) 304 { 290 305 return true; 291 306 } … … 325 340 const std::vector<data_t> &data) 326 341 { 327 assert( (index < size_tab) 328 && "Invalid Transaction Tab Entry"); 329 assert(be.size()==tab[index].wdata_be.size() 330 && "Bad data mask in write_data_mask in TransactionTab"); 331 assert(data.size()==tab[index].wdata.size() 332 && "Bad data in write_data_mask in TransactionTab"); 333 334 for(size_t i=0; i<tab[index].wdata_be.size() ; i++) { 342 assert( (index < size_tab) and 343 "MEMC ERROR: The selected entry is out of range in TRT write_data_mask()"); 344 345 assert( (be.size()==tab[index].wdata_be.size()) and 346 "MEMC ERROR: Bad be size in TRT write_data_mask()"); 347 348 assert( (data.size()==tab[index].wdata.size()) and 349 "MEMC ERROR: Bad data size in TRT write_data_mask()"); 350 351 for(size_t i=0; i<tab[index].wdata_be.size() ; i++) 352 { 335 353 tab[index].wdata_be[i] = tab[index].wdata_be[i] | be[i]; 336 354 data_t mask = be_to_mask(be[i]); … … 338 356 } 339 357 } 340 341 358 ///////////////////////////////////////////////////////////////////// 342 359 // The set() function registers a transaction (read or write) … … 355 372 // - data_be : the mask of the data to write (in case of write) 356 373 // - ll_key : the ll key (if any) returned by the llsc_global_table 374 // - config : transaction required by config FSM 357 375 ///////////////////////////////////////////////////////////////////// 358 376 void set(const size_t index, … … 367 385 const std::vector<be_t> &data_be, 368 386 const std::vector<data_t> &data, 369 const data_t ll_key = 0) 370 { 371 assert( (index < size_tab) 372 && "The selected entry is out of range in set() Transaction Tab"); 373 assert(data_be.size()==tab[index].wdata_be.size() 374 && "Bad data_be argument in set() TransactionTab"); 375 assert(data.size()==tab[index].wdata.size() 376 && "Bad data argument in set() TransactionTab"); 387 const data_t ll_key = 0, 388 const bool config = false) 389 { 390 assert( (index < size_tab) and 391 "MEMC ERROR: The selected entry is out of range in TRT set()"); 392 393 assert( (data_be.size()==tab[index].wdata_be.size()) and 394 "MEMC ERROR: Bad data_be argument in TRT set()"); 395 396 assert( (data.size()==tab[index].wdata.size()) and 397 "MEMC ERROR: Bad data argument in TRT set()"); 377 398 378 399 tab[index].valid = true; … … 386 407 tab[index].word_index = word_index; 387 408 tab[index].ll_key = ll_key; 409 tab[index].config = config; 388 410 for(size_t i=0; i<tab[index].wdata.size(); i++) 389 411 { … … 398 420 // The BE field in TRT is taken into account. 399 421 // Arguments : 400 // - index : the index of the transaction in the transaction tab 401 // - word_index : the index of the data in the line 402 // - data : a 64 bits value 403 // - error : invalid data 422 // - index : index of the entry in TRT 423 // - word : index of the 32 bits word in the line 424 // - data : 64 bits value (first data right) 404 425 ///////////////////////////////////////////////////////////////////// 405 426 void write_rsp(const size_t index, 406 427 const size_t word, 407 const wide_data_t data, 408 const bool rerror) 428 const wide_data_t data) 409 429 { 410 430 data_t value; 411 431 data_t mask; 412 432 413 if ( index >= size_tab ) 414 { 415 std::cout << "VCI_MEM_CACHE ERRROR " << tab_name 416 << " TRT entry out of range in write_rsp()" << std::endl; 417 exit(0); 418 } 419 if ( word > tab[index].wdata_be.size() ) 420 { 421 std::cout << "VCI_MEM_CACHE ERRROR " << tab_name 422 << " Bad word_index in write_rsp() in TRT" << std::endl; 423 exit(0); 424 } 425 if ( not tab[index].valid ) 426 { 427 std::cout << "VCI_MEM_CACHE ERRROR " << tab_name 428 << " TRT Entry invalid in write_rsp()" << std::endl; 429 exit(0); 430 } 431 if ( not tab[index].xram_read ) 432 { 433 std::cout << "VCI_MEM_CACHE ERRROR " << tab_name 434 << " TRT entry is not an XRAM GET in write_rsp()" << std::endl; 435 exit(0); 436 } 433 assert( (index < size_tab) and 434 "MEMC ERROR: The selected entry is out of range in TRT write_rsp()"); 435 436 assert( (word < tab[index].wdata_be.size()) and 437 "MEMC ERROR: Bad word index in TRT write_rsp()"); 438 439 assert( (tab[index].valid) and 440 "MEMC ERROR: TRT entry not valid in TRT write_rsp()"); 441 442 assert( (tab[index].xram_read ) and 443 "MEMC ERROR: TRT entry is not a GET in TRT write_rsp()"); 437 444 438 445 // first 32 bits word … … 445 452 mask = be_to_mask(tab[index].wdata_be[word+1]); 446 453 tab[index].wdata[word+1] = (tab[index].wdata[word+1] & mask) | (value & ~mask); 447 448 // error update 449 tab[index].rerror |= rerror; 450 } 451 454 } 452 455 ///////////////////////////////////////////////////////////////////// 453 456 // The erase() function erases an entry in the transaction tab. … … 457 460 void erase(const size_t index) 458 461 { 459 assert( (index < size_tab) 460 && "The selected entry is out of range in erase() Transaction Tab"); 462 assert( (index < size_tab) and 463 "MEMC ERROR: The selected entry is out of range in TRT erase()"); 464 461 465 tab[index].valid = false; 462 466 tab[index].rerror = false; 467 } 468 ///////////////////////////////////////////////////////////////////// 469 // The is_config() function returns the config flag value. 470 // Arguments : 471 // - index : the index of the entry in the transaction tab 472 ///////////////////////////////////////////////////////////////////// 473 bool is_config(const size_t index) 474 { 475 assert( (index < size_tab) and 476 "MEMC ERROR: The selected entry is out of range in TRT is_config()"); 477 478 return tab[index].config; 463 479 } 464 480 }; // end class TransactionTab -
branches/ODCCP/modules/vci_mem_cache/caba/source/src/vci_mem_cache.cpp
r492 r494 45 45 #define DEBUG_MEMC_WRITE 1 // detailed trace of WRITE FSM 46 46 #define DEBUG_MEMC_CAS 1 // detailed trace of CAS FSM 47 #define DEBUG_MEMC_IXR_CMD 1 // detailed trace of IXR_ RSPFSM47 #define DEBUG_MEMC_IXR_CMD 1 // detailed trace of IXR_CMD FSM 48 48 #define DEBUG_MEMC_IXR_RSP 1 // detailed trace of IXR_RSP FSM 49 49 #define DEBUG_MEMC_XRAM_RSP 1 // detailed trace of XRAM_RSP FSM … … 127 127 "MULTI_ACK_UPT_LOCK", 128 128 "MULTI_ACK_UPT_CLEAR", 129 "MULTI_ACK_WRITE_RSP", 130 "MULTI_ACK_CONFIG_ACK" 129 "MULTI_ACK_WRITE_RSP" 131 130 }; 132 131 const char *config_fsm_str[] = … … 134 133 "CONFIG_IDLE", 135 134 "CONFIG_LOOP", 135 "CONFIG_WAIT", 136 136 "CONFIG_RSP", 137 137 "CONFIG_DIR_REQ", 138 138 "CONFIG_DIR_ACCESS", 139 "CONFIG_ DIR_IVT_LOCK",139 "CONFIG_IVT_LOCK", 140 140 "CONFIG_BC_SEND", 141 "CONFIG_BC_WAIT", 142 "CONFIG_INV_SEND", 141 "CONFIG_INVAL_SEND", 143 142 "CONFIG_HEAP_REQ", 144 143 "CONFIG_HEAP_SCAN", 145 144 "CONFIG_HEAP_LAST", 146 "CONFIG_INV_WAIT" 145 "CONFIG_TRT_LOCK", 146 "CONFIG_TRT_SET", 147 "CONFIG_PUT_REQ" 147 148 }; 148 149 const char *read_fsm_str[] = … … 168 169 "WRITE_DIR_REQ", 169 170 "WRITE_DIR_LOCK", 170 "WRITE_DIR_READ",171 171 "WRITE_DIR_HIT", 172 172 "WRITE_UPT_LOCK", … … 180 180 "WRITE_MISS_TRT_SET", 181 181 "WRITE_MISS_XRAM_REQ", 182 "WRITE_BC_DIR_READ", 182 183 "WRITE_BC_TRT_LOCK", 183 184 "WRITE_BC_IVT_LOCK", … … 202 203 "XRAM_RSP_DIR_UPDT", 203 204 "XRAM_RSP_DIR_RSP", 204 "XRAM_RSP_I NVAL_LOCK",205 "XRAM_RSP_IVT_LOCK", 205 206 "XRAM_RSP_INVAL_WAIT", 206 207 "XRAM_RSP_INVAL", … … 219 220 "IXR_CMD_XRAM_IDLE", 220 221 "IXR_CMD_CLEANUP_IDLE", 221 "IXR_CMD_TRT_LOCK", 222 "IXR_CMD_READ", 223 "IXR_CMD_WRITE", 224 "IXR_CMD_CAS", 225 "IXR_CMD_XRAM", 226 "IXR_CMD_CLEANUP_DATA" 222 "IXR_CMD_CONFIG_IDLE", 223 "IXR_CMD_READ_TRT", 224 "IXR_CMD_WRITE_TRT", 225 "IXR_CMD_CAS_TRT", 226 "IXR_CMD_XRAM_TRT", 227 "IXR_CMD_CLEANUP_TRT", 228 "IXR_CMD_CONFIG_TRT", 229 "IXR_CMD_READ_SEND", 230 "IXR_CMD_WRITE_SEND", 231 "IXR_CMD_CAS_SEND", 232 "IXR_CMD_XRAM_SEND", 233 "IXR_CMD_CLEANUP_DATA_SEND", 234 "IXR_CMD_CONFIG_SEND" 227 235 }; 228 236 const char *cas_fsm_str[] = … … 267 275 "CLEANUP_IVT_CLEAR", 268 276 "CLEANUP_WRITE_RSP", 269 "CLEANUP_CONFIG_ACK",270 277 "CLEANUP_IXR_REQ", 271 278 "CLEANUP_WAIT", … … 290 297 "ALLOC_TRT_IXR_RSP", 291 298 "ALLOC_TRT_CLEANUP", 292 "ALLOC_TRT_IXR_CMD" 299 "ALLOC_TRT_IXR_CMD", 300 "ALLOC_TRT_CONFIG" 293 301 }; 294 302 const char *alloc_upt_fsm_str[] = 295 303 { 296 "ALLOC_UPT_CONFIG",297 304 "ALLOC_UPT_WRITE", 298 305 "ALLOC_UPT_CAS", … … 351 358 : soclib::caba::BaseModule(name), 352 359 353 m_monitor_ok(false),360 //m_monitor_ok(false), 354 361 355 362 p_clk( "p_clk" ), … … 394 401 m_broadcast_boundaries(0x7C1F), 395 402 396 r_tgt_cmd_fsm("r_tgt_cmd_fsm"),397 403 398 404 // FIFOs … … 421 427 m_cc_receive_to_multi_ack_fifo("m_cc_receive_to_multi_ack_fifo", 4), 422 428 429 r_tgt_cmd_fsm("r_tgt_cmd_fsm"), 430 423 431 r_config_fsm( "r_config_fsm" ), 424 432 … … 432 440 m_write_to_cc_send_inst_fifo("m_write_to_cc_send_inst_fifo",8), 433 441 m_write_to_cc_send_srcid_fifo("m_write_to_cc_send_srcid_fifo",8), 434 #if L1_MULTI_CACHE435 m_write_to_cc_send_cache_id_fifo("m_write_to_cc_send_cache_id_fifo",8),436 #endif437 442 438 443 r_multi_ack_fsm("r_multi_ack_fsm"), … … 444 449 m_cas_to_cc_send_inst_fifo("m_cas_to_cc_send_inst_fifo",8), 445 450 m_cas_to_cc_send_srcid_fifo("m_cas_to_cc_send_srcid_fifo",8), 446 #if L1_MULTI_CACHE447 m_cas_to_cc_send_cache_id_fifo("m_cas_to_cc_send_cache_id_fifo",8),448 #endif449 451 450 452 r_ixr_rsp_fsm("r_ixr_rsp_fsm"), … … 453 455 m_xram_rsp_to_cc_send_inst_fifo("m_xram_rsp_to_cc_send_inst_fifo",8), 454 456 m_xram_rsp_to_cc_send_srcid_fifo("m_xram_rsp_to_cc_send_srcid_fifo",8), 455 #if L1_MULTI_CACHE456 m_xram_rsp_to_cc_send_cache_id_fifo("m_xram_rsp_to_cc_send_cache_id_fifo",8),457 #endif458 457 459 458 r_ixr_cmd_fsm("r_ixr_cmd_fsm"), … … 524 523 r_xram_rsp_victim_data = new sc_signal<data_t>[nwords]; 525 524 r_xram_rsp_to_tgt_rsp_data = new sc_signal<data_t>[nwords]; 526 r_xram_rsp_to_ixr_cmd_data = new sc_signal<data_t>[nwords];527 525 528 526 // Allocation for READ FSM … … 535 533 r_write_to_cc_send_data = new sc_signal<data_t>[nwords]; 536 534 r_write_to_cc_send_be = new sc_signal<be_t>[nwords]; 537 r_write_to_ixr_cmd_data = new sc_signal<data_t>[nwords];538 535 539 536 // Allocation for CAS FSM 540 r_cas_to_ixr_cmd_data = new sc_signal<data_t>[nwords];541 537 r_cas_data = new sc_signal<data_t>[nwords]; 542 538 r_cas_rdata = new sc_signal<data_t>[2]; … … 544 540 // Allocation for ODCCP 545 541 r_cleanup_data = new sc_signal<data_t>[nwords]; 546 r_ixr_cmd_data = new sc_signal<data_t>[nwords];547 542 r_cleanup_to_ixr_cmd_data = new sc_signal<data_t>[nwords]; 548 543 544 // Allocation for IXR_CMD FSM 545 r_ixr_cmd_wdata = new sc_signal<data_t>[nwords]; 546 549 547 // Allocation for debug 550 m_debug_previous_data = new sc_signal<data_t>[nwords];551 m_debug_data = new sc_signal<data_t>[nwords];548 m_debug_previous_data = new data_t[nwords]; 549 m_debug_data = new data_t[nwords]; 552 550 553 551 SC_METHOD(transition); … … 560 558 } // end constructor 561 559 562 ///////////////////////////////////////////////////////////////////////563 tmpl(void) ::start_monitor(addr_t addr, addr_t length)564 ///////////////////////////////////////////////////////////////////////565 {566 m_monitor_ok = true;567 m_monitor_base = addr;568 m_monitor_length = length;569 }570 571 ///////////////////////////////////////////////////////////////////////572 tmpl(void) ::stop_monitor()573 ///////////////////////////////////////////////////////////////////////574 {575 m_monitor_ok = false;576 }577 578 ////////////////////////////////////////////////579 tmpl(void) ::check_monitor( addr_t addr,580 data_t data,581 bool read )582 ////////////////////////////////////////////////583 {584 if((addr >= m_monitor_base) and585 (addr < m_monitor_base + m_monitor_length))586 {587 if ( read ) std::cout << " Monitor MEMC Read ";588 else std::cout << " Monitor MEMC Write";589 std::cout << " / Address = " << std::hex << addr590 << " / Data = " << data591 << " at cycle " << std::dec << m_cpt_cycles << std::endl;592 }593 }594 560 595 561 ///////////////////////////////////////////////////// … … 601 567 DirectoryEntry entry = m_cache_directory.read_neutral(addr, &way, &set ); 602 568 569 // read data and compute data_change 603 570 bool data_change = false; 604 605 571 if ( entry.valid ) 606 572 { 607 m_cache_data.read_line( way, set, m_debug_data );608 609 for ( size_t i = 0 ; i<m_words ; i++ )610 {611 if ( m_debug_previous_valid and612 (m_debug_data[i].read() != m_debug_previous_data[i].read()) )613 614 m_debug_previous_data[i] = m_debug_data[i].read();573 for ( size_t word = 0 ; word<m_words ; word++ ) 574 { 575 m_debug_data[word] = m_cache_data.read(way, set, word); 576 if ( m_debug_previous_valid and 577 (m_debug_data[word] != m_debug_previous_data[word]) ) 578 { 579 data_change = true; 580 } 615 581 } 616 582 } 617 583 584 // print values if any change 618 585 if ( (entry.valid != m_debug_previous_valid) or 619 586 (entry.valid and (entry.count != m_debug_previous_count)) or … … 623 590 << " at cycle " << std::dec << m_cpt_cycles 624 591 << " for address " << std::hex << addr 625 << " / HIT= " << std::dec << entry.valid592 << " / VAL = " << std::dec << entry.valid 626 593 << " / WAY = " << way 627 594 << " / COUNT = " << entry.count 628 595 << " / DIRTY = " << entry.dirty 629 << " / DATA_CHANGE = " << entry.count596 << " / DATA_CHANGE = " << data_change 630 597 << std::endl; 631 } 598 std::cout << std::hex << " /0:" << m_debug_data[0] 599 << "/1:" << m_debug_data[1] 600 << "/2:" << m_debug_data[2] 601 << "/3:" << m_debug_data[3] 602 << "/4:" << m_debug_data[4] 603 << "/5:" << m_debug_data[5] 604 << "/6:" << m_debug_data[6] 605 << "/7:" << m_debug_data[7] 606 << "/8:" << m_debug_data[8] 607 << "/9:" << m_debug_data[9] 608 << "/A:" << m_debug_data[10] 609 << "/B:" << m_debug_data[11] 610 << "/C:" << m_debug_data[12] 611 << "/D:" << m_debug_data[13] 612 << "/E:" << m_debug_data[14] 613 << "/F:" << m_debug_data[15] 614 << std::endl; 615 } 616 617 // register values 632 618 m_debug_previous_count = entry.count; 633 619 m_debug_previous_valid = entry.valid; 634 620 m_debug_previous_dirty = entry.dirty; 621 for( size_t word=0 ; word<m_words ; word++ ) 622 m_debug_previous_data[word] = m_debug_data[word]; 635 623 } 636 624 … … 807 795 delete [] r_xram_rsp_victim_data; 808 796 delete [] r_xram_rsp_to_tgt_rsp_data; 809 delete [] r_xram_rsp_to_ixr_cmd_data;810 797 811 798 delete [] r_read_data; … … 889 876 m_config_to_cc_send_inst_fifo.init(); 890 877 m_config_to_cc_send_srcid_fifo.init(); 891 #if L1_MULTI_CACHE892 m_config_to_cc_send_cache_id_fifo.init();893 #endif894 878 895 879 r_tgt_cmd_to_tgt_rsp_req = false; … … 906 890 m_write_to_cc_send_inst_fifo.init(); 907 891 m_write_to_cc_send_srcid_fifo.init(); 908 #if L1_MULTI_CACHE909 m_write_to_cc_send_cache_id_fifo.init();910 #endif911 892 912 893 r_cleanup_to_tgt_rsp_req = false; … … 914 895 m_cc_receive_to_cleanup_fifo.init(); 915 896 916 r_multi_ack_to_tgt_rsp_req 897 r_multi_ack_to_tgt_rsp_req = false; 917 898 918 899 m_cc_receive_to_multi_ack_fifo.init(); … … 922 903 r_cas_lfsr = -1 ; 923 904 r_cas_to_ixr_cmd_req = false; 924 r_cas_to_cc_send_multi_req = false;925 r_cas_to_cc_send_brdcast_req = false;905 r_cas_to_cc_send_multi_req = false; 906 r_cas_to_cc_send_brdcast_req = false; 926 907 927 908 m_cas_to_cc_send_inst_fifo.init(); 928 909 m_cas_to_cc_send_srcid_fifo.init(); 929 #if L1_MULTI_CACHE930 m_cas_to_cc_send_cache_id_fifo.init();931 #endif932 910 933 911 for(size_t i=0; i<m_trt_lines ; i++) … … 945 923 m_xram_rsp_to_cc_send_inst_fifo.init(); 946 924 m_xram_rsp_to_cc_send_srcid_fifo.init(); 947 #if L1_MULTI_CACHE 948 m_xram_rsp_to_cc_send_cache_id_fifo.init(); 949 #endif 950 951 r_ixr_cmd_cpt = 0; 925 952 926 r_alloc_dir_reset_cpt = 0; 953 927 r_alloc_heap_reset_cpt = 0; … … 964 938 //r_xram_rsp_to_ixr_cmd_inval_ncc_pending = false; 965 939 r_cleanup_to_ixr_cmd_srcid = 0; 966 r_cleanup_to_ixr_cmd_ trdid= 0;940 r_cleanup_to_ixr_cmd_index = 0; 967 941 r_cleanup_to_ixr_cmd_pktid = 0; 968 942 r_cleanup_to_ixr_cmd_nline = 0; … … 971 945 r_cleanup_to_ixr_cmd_data[word] = 0; 972 946 r_cleanup_data[word] = 0; 973 r_ixr_cmd_ data[word] = 0;947 r_ixr_cmd_wdata[word] = 0; 974 948 } 975 949 … … 1053 1027 size_t write_to_cc_send_fifo_srcid = 0; 1054 1028 1055 #if L1_MULTI_CACHE1056 size_t write_to_cc_send_fifo_cache_id = 0;1057 #endif1058 1059 1029 bool xram_rsp_to_cc_send_fifo_put = false; 1060 1030 bool xram_rsp_to_cc_send_fifo_get = false; … … 1062 1032 size_t xram_rsp_to_cc_send_fifo_srcid = 0; 1063 1033 1064 #if L1_MULTI_CACHE1065 size_t xram_rsp_to_cc_send_fifo_cache_id = 0;1066 #endif1067 1068 1034 bool config_to_cc_send_fifo_put = false; 1069 1035 bool config_to_cc_send_fifo_get = false; … … 1075 1041 bool cas_to_cc_send_fifo_inst = false; 1076 1042 size_t cas_to_cc_send_fifo_srcid = 0; 1077 1078 #if L1_MULTI_CACHE1079 size_t cas_to_cc_send_fifo_cache_id = 0;1080 #endif1081 1043 1082 1044 m_debug = (m_cpt_cycles > m_debug_start_cycle) and m_debug_ok; … … 1129 1091 // - For MEMC_CMD_TYPE, the response is delayed until the operation is completed. 1130 1092 //////////////////////////////////////////////////////////////////////////////////// 1093 1094 //std::cout << std::endl << "tgt_cmd_fsm" << std::endl; 1131 1095 1132 1096 switch(r_tgt_cmd_fsm.read()) … … 1232 1196 case TGT_CMD_ERROR: // response error must be sent 1233 1197 1234 // wait if pending TGT_CMD request to TGT_RSP FSM1198 // wait if pending request 1235 1199 if(r_tgt_cmd_to_tgt_rsp_req.read()) break; 1236 1200 … … 1266 1230 size_t error; 1267 1231 uint32_t rdata = 0; // default value 1232 uint32_t wdata = p_vci_tgt.wdata.read(); 1268 1233 1269 1234 if ( (p_vci_tgt.cmd.read() == vci_param_int::CMD_READ) // get lock … … 1274 1239 error = 0; 1275 1240 r_config_lock = true; 1241 if ( rdata == 0 ) 1242 { 1243 r_tgt_cmd_srcid = p_vci_tgt.srcid.read(); 1244 r_tgt_cmd_trdid = p_vci_tgt.trdid.read(); 1245 r_tgt_cmd_pktid = p_vci_tgt.pktid.read(); 1246 } 1276 1247 } 1277 1248 else if ( (p_vci_tgt.cmd.read() == vci_param_int::CMD_WRITE) // release lock 1278 and (cell == MEMC_LOCK) ) 1249 and (cell == MEMC_LOCK) 1250 and (p_vci_tgt.srcid.read() == r_tgt_cmd_srcid.read()) ) 1279 1251 { 1280 1252 need_rsp = true; … … 1283 1255 } 1284 1256 else if ( (p_vci_tgt.cmd.read() == vci_param_int::CMD_WRITE) // set addr_lo 1285 and (cell == MEMC_ADDR_LO) ) 1286 { 1257 and (cell == MEMC_ADDR_LO) 1258 and (p_vci_tgt.srcid.read() == r_tgt_cmd_srcid.read()) ) 1259 { 1260 assert( ((wdata % (m_words*vci_param_int::B)) == 0) and 1261 "VCI_MEM_CACHE CONFIG ERROR: The buffer must be aligned on a cache line"); 1262 1287 1263 need_rsp = true; 1288 1264 error = 0; … … 1291 1267 } 1292 1268 else if ( (p_vci_tgt.cmd.read() == vci_param_int::CMD_WRITE) // set addr_hi 1293 and (cell == MEMC_ADDR_HI) ) 1269 and (cell == MEMC_ADDR_HI) 1270 and (p_vci_tgt.srcid.read() == r_tgt_cmd_srcid.read()) ) 1271 1294 1272 { 1295 1273 need_rsp = true; … … 1299 1277 } 1300 1278 else if ( (p_vci_tgt.cmd.read() == vci_param_int::CMD_WRITE) // set buf_lines 1301 and (cell == MEMC_BUF_LENGTH) ) 1279 and (cell == MEMC_BUF_LENGTH) 1280 and (p_vci_tgt.srcid.read() == r_tgt_cmd_srcid.read()) ) 1302 1281 { 1303 1282 need_rsp = true; 1304 1283 error = 0; 1305 1284 size_t lines = (size_t)(p_vci_tgt.wdata.read()/(m_words<<2)); 1306 if ( r_config_address.read()/(m_words*vci_param_int::B) ) lines++; 1307 r_config_nlines = lines; 1285 if ( r_config_address.read()%(m_words*4) ) lines++; 1286 r_config_cmd_lines = lines; 1287 r_config_rsp_lines = lines; 1308 1288 } 1309 1289 else if ( (p_vci_tgt.cmd.read() == vci_param_int::CMD_WRITE) // set cmd type 1310 and (cell == MEMC_CMD_TYPE) ) 1290 and (cell == MEMC_CMD_TYPE) 1291 and (p_vci_tgt.srcid.read() == r_tgt_cmd_srcid.read()) ) 1311 1292 { 1312 1293 need_rsp = false; 1313 1294 error = 0; 1314 1295 r_config_cmd = p_vci_tgt.wdata.read(); 1296 1297 // prepare delayed response from CONFIG FSM 1315 1298 r_config_srcid = p_vci_tgt.srcid.read(); 1316 1299 r_config_trdid = p_vci_tgt.trdid.read(); … … 1343 1326 << " address = " << std::hex << p_vci_tgt.address.read() 1344 1327 << " / wdata = " << p_vci_tgt.wdata.read() 1328 << " / need_rsp = " << need_rsp 1345 1329 << " / error = " << error << std::endl; 1346 1330 #endif … … 1446 1430 // MULTI_ACK FSM 1447 1431 ///////////////////////////////////////////////////////////////////////// 1448 // This FSM controls the response to the multicast update or multicast 1449 // inval coherence requests sent by the memory cache to the L1 caches and 1450 // update the UPT. 1432 // This FSM controls the response to the multicast update requests sent 1433 // by the memory cache to the L1 caches and update the UPT. 1451 1434 // 1452 1435 // - The FSM decrements the proper entry in UPT, … … 1454 1437 // - If required, it sends a request to the TGT_RSP FSM to complete 1455 1438 // a pending write transaction. 1456 // - If required, it sends an acknowledge to the CONFIG FSM to signal1457 // completion of a line inval.1458 1439 // 1459 1440 // All those multi-ack packets are one flit packet. 1460 // The index in the UPT is defined in the UPDTID field.1441 // The index in the UPT is defined in the TRDID field. 1461 1442 //////////////////////////////////////////////////////////////////////// 1443 1444 //std::cout << std::endl << "multi_ack_fsm" << std::endl; 1462 1445 1463 1446 switch(r_multi_ack_fsm.read()) … … 1574 1557 r_multi_ack_nline = m_upt.nline(r_multi_ack_upt_index.read()); 1575 1558 bool need_rsp = m_upt.need_rsp(r_multi_ack_upt_index.read()); 1576 bool need_ack = m_upt.need_ack(r_multi_ack_upt_index.read());1577 1559 1578 1560 // clear the UPT entry … … 1580 1562 1581 1563 if ( need_rsp ) r_multi_ack_fsm = MULTI_ACK_WRITE_RSP; 1582 else if ( need_ack ) r_multi_ack_fsm = MULTI_ACK_CONFIG_ACK;1583 1564 else r_multi_ack_fsm = MULTI_ACK_IDLE; 1584 1565 … … 1611 1592 break; 1612 1593 } 1613 //////////////////////////1614 case MULTI_ACK_CONFIG_ACK: // Signals multi-inval completion to CONFIG FSM1615 // Wait if pending request1616 {1617 if ( r_multi_ack_to_config_ack.read() ) break;1618 1619 r_multi_ack_to_config_ack = true;1620 r_multi_ack_fsm = MULTI_ACK_IDLE;1621 1622 #if DEBUG_MEMC_MULTI_ACK1623 if(m_debug)1624 std::cout << " <MEMC " << name() << " MULTI_ACK_CONFIG_ACK>"1625 << " Signals inval completion to CONFIG FSM" << std::endl;1626 #endif1627 break;1628 }1629 1594 } // end switch r_multi_ack_fsm 1630 1595 … … 1634 1599 // The CONFIG FSM handles the VCI configuration requests (INVAL & SYNC). 1635 1600 // The target buffer can have any size, and there is one single command for 1636 // all cache lines covered by the target buffer. 1637 // An INVAL or SYNC configuration request is defined by the followinf registers: 1638 // - bool r_config_cmd : INVAL / SYNC / NOP) 1601 // all cache lines covered by the target buffer. 1602 // 1603 // An INVAL or SYNC configuration operation is defined by the following registers: 1604 // - bool r_config_cmd : INVAL / SYNC / NOP 1639 1605 // - uint64_t r_config_address : buffer base address 1640 // - uint32_t r_config_nlines : number of lines covering buffer 1606 // - uint32_t r_config_cmd_lines : number of lines to be handled 1607 // - uint32_t r_config_rsp_lines : number of lines not completed 1641 1608 // 1642 1609 // For both INVAL and SYNC commands, the CONFIG FSM contains the loop handling 1643 // all cache lines covered by the target buffer. 1644 // 1610 // all cache lines covered by the buffer. The various lines of a given buffer 1611 // can be pipelined: the CONFIG FSM does not wait the response for line (n) to send 1612 // the command for line (n+1). It decrements the r_config_cmd_lines counter until 1613 // the last request has been registered in TRT (for a SYNC), or in IVT (for an INVAL). 1614 // 1645 1615 // - INVAL request: 1646 // For each line, it access to the DIR array.1616 // For each line, it access to the DIR. 1647 1617 // In case of miss, it does nothing, and a response is requested to TGT_RSP FSM. 1648 1618 // In case of hit, with no copies in L1 caches, the line is invalidated and 1649 1619 // a response is requested to TGT_RSP FSM. 1650 1620 // If there is copies, a multi-inval, or a broadcast-inval coherence transaction 1651 // is launched and registered in UPT. The multi-inval transaction is signaled 1652 // by the r_multi_ack_to config_ack or r_cleanup_to_config_ack flip-flops. 1653 // The config inval response is sent only when the last line has been invalidated. 1654 // 1621 // is launched and registered in UPT. The multi-inval transaction completion 1622 // is signaled by the CLEANUP FSM by decrementing the r_config_rsp_lines counter. 1623 // The CONFIG INVAL response is sent only when the last line has been invalidated. 1624 // TODO : The target buffer address must be aligned on a cache line boundary. 1625 // This constraint can be released, but it requires to make 2 PUT transactions 1626 // for the first and the last line... 1627 // 1655 1628 // - SYNC request: 1656 // 1657 // ... Not implemented yet ... 1629 // For each line, it access to the DIR. 1630 // In case of miss, it does nothing, and a response is requested to TGT_RSP FSM. 1631 // In case of hit, a PUT transaction is registered in TRT and a request is sent 1632 // to IXR_CMD FSM. The IXR_RSP FSM decrements the r_config_rsp_lines counter 1633 // when a PUT response is received. 1634 // The CONFIG SYNC response is sent only when the last PUT response is received. 1658 1635 // 1659 1636 // From the software point of view, a configuration request is a sequence 1660 // of 6 atomic accesses in an uncached segment: 1637 // of 6 atomic accesses in an uncached segment. A dedicated lock is used 1638 // to handle only one configuration command at a given time: 1661 1639 // - Read MEMC_LOCK : Get the lock 1662 1640 // - Write MEMC_ADDR_LO : Set the buffer address LSB … … 1667 1645 //////////////////////////////////////////////////////////////////////////////////// 1668 1646 1647 //std::cout << std::endl << "config_fsm" << std::endl; 1648 1669 1649 switch( r_config_fsm.read() ) 1670 1650 { … … 1679 1659 if(m_debug) 1680 1660 std::cout << " <MEMC " << name() << " CONFIG_IDLE> Config Request received" 1681 << " address = " << std::hex << r_config_address.read()1682 << " / nlines = " << std::dec << r_config_nlines.read()1661 << " / address = " << std::hex << r_config_address.read() 1662 << " / lines = " << std::dec << r_config_cmd_lines.read() 1683 1663 << " / type = " << r_config_cmd.read() << std::endl; 1684 1664 #endif … … 1687 1667 } 1688 1668 ///////////////// 1689 case CONFIG_LOOP: // test last line1690 { 1691 if ( r_config_ nlines.read() == 0 )1669 case CONFIG_LOOP: // test if last line to be handled 1670 { 1671 if ( r_config_cmd_lines.read() == 0 ) 1692 1672 { 1693 1673 r_config_cmd = MEMC_CMD_NOP; 1694 r_config_fsm = CONFIG_ RSP;1674 r_config_fsm = CONFIG_WAIT; 1695 1675 } 1696 1676 else … … 1702 1682 if(m_debug) 1703 1683 std::cout << " <MEMC " << name() << " CONFIG_LOOP>" 1704 << " address = " << std::hex << r_config_address.read()1705 << " / nlines = " << std::dec << r_config_nlines.read()1684 << " / address = " << std::hex << r_config_address.read() 1685 << " / lines not handled = " << std::dec << r_config_cmd_lines.read() 1706 1686 << " / command = " << r_config_cmd.read() << std::endl; 1707 1687 #endif 1708 1688 break; 1689 } 1690 ///////////////// 1691 case CONFIG_WAIT: // wait completion (last response) 1692 { 1693 if ( r_config_rsp_lines.read() == 0 ) // last response received 1694 { 1695 r_config_fsm = CONFIG_RSP; 1696 } 1697 1698 #if DEBUG_MEMC_CONFIG 1699 if(m_debug) 1700 std::cout << " <MEMC " << name() << " CONFIG_WAIT>" 1701 << " / lines to do = " << std::dec << r_config_rsp_lines.read() << std::endl; 1702 #endif 1703 break; 1704 } 1705 //////////////// 1706 case CONFIG_RSP: // request TGT_RSP FSM to return response 1707 { 1708 if ( not r_config_to_tgt_rsp_req.read() ) 1709 { 1710 r_config_to_tgt_rsp_srcid = r_config_srcid.read(); 1711 r_config_to_tgt_rsp_trdid = r_config_trdid.read(); 1712 r_config_to_tgt_rsp_pktid = r_config_pktid.read(); 1713 r_config_to_tgt_rsp_error = false; 1714 r_config_to_tgt_rsp_req = true; 1715 r_config_fsm = CONFIG_IDLE; 1716 1717 #if DEBUG_MEMC_CONFIG 1718 if(m_debug) 1719 std::cout << " <MEMC " << name() << " CONFIG_RSP> Request TGT_RSP FSM to return response:" 1720 << " error = " << r_config_to_tgt_rsp_error.read() 1721 << " / rsrcid = " << std::hex << r_config_srcid.read() 1722 << " / rtrdid = " << std::hex << r_config_trdid.read() 1723 << " / rpktid = " << std::hex << r_config_pktid.read() << std::endl; 1724 #endif 1725 } 1726 break; 1727 1709 1728 } 1710 1729 //////////////////// … … 1726 1745 case CONFIG_DIR_ACCESS: // Access directory and decode config command 1727 1746 { 1747 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CONFIG) and 1748 "MEMC ERROR in CONFIG_DIR_ACCESS state: bad DIR allocation"); 1749 1728 1750 size_t way = 0; 1729 1751 DirectoryEntry entry = m_cache_directory.read(r_config_address.read(), way); … … 1736 1758 r_config_dir_copy_srcid = entry.owner.srcid; 1737 1759 r_config_dir_is_cnt = entry.is_cnt; 1760 r_config_dir_lock = entry.lock; 1738 1761 r_config_dir_count = entry.count; 1739 r_config_dir_ next_ptr= entry.ptr;1740 1741 r_config_fsm = CONFIG_ DIR_IVT_LOCK;1762 r_config_dir_ptr = entry.ptr; 1763 1764 r_config_fsm = CONFIG_IVT_LOCK; 1742 1765 } 1743 1766 else if ( entry.valid and // hit & sync command … … 1745 1768 (r_config_cmd.read() == MEMC_CMD_SYNC) ) 1746 1769 { 1747 std::cout << "VCI_MEM_CACHE ERROR: " 1748 << "SYNC config request not implemented yet" << std::endl; 1749 exit(0); 1770 r_config_fsm = CONFIG_TRT_LOCK; 1750 1771 } 1751 else // return to LOOP1772 else // miss : return to LOOP 1752 1773 { 1753 r_config_nlines = r_config_nlines.read() - 1; 1754 r_config_address = r_config_address.read() + (m_words<<2); 1755 r_config_fsm = CONFIG_LOOP; 1774 r_config_cmd_lines = r_config_cmd_lines.read() - 1; 1775 r_config_rsp_lines = r_config_rsp_lines.read() - 1; 1776 r_config_cmd_lines = r_config_cmd_lines.read() - 1; 1777 r_config_address = r_config_address.read() + (m_words<<2); 1778 r_config_fsm = CONFIG_LOOP; 1756 1779 } 1757 1780 … … 1767 1790 break; 1768 1791 } 1769 ///////////////////////// 1770 case CONFIG_DIR_IVT_LOCK: // enter this state in case of INVAL command 1771 // Try to get both DIR & IVT locks, and return 1772 // to LOOP state if IVT full. 1773 // Register inval in IVT, and invalidate the 1774 // directory if IVT not full. 1775 { 1792 ///////////////////// 1793 case CONFIG_TRT_LOCK: // enter this state in case of SYNC command 1794 // to a dirty cache line 1795 // keep DIR lock, and try to get TRT lock 1796 // return to LOOP state if TRT full 1797 // reset dirty bit in DIR and register a PUT 1798 // trabsaction in TRT if not full. 1799 { 1800 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CONFIG) and 1801 "MEMC ERROR in CONFIG_TRT_LOCK state: bad DIR allocation"); 1802 1803 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_CONFIG ) 1804 { 1805 size_t index = 0; 1806 bool wok = not m_trt.full(index); 1807 1808 if ( not wok ) 1809 { 1810 r_config_fsm = CONFIG_LOOP; 1811 } 1812 else 1813 { 1814 size_t way = r_config_dir_way.read(); 1815 size_t set = m_y[r_config_address.read()]; 1816 1817 // reset dirty bit in DIR 1818 DirectoryEntry entry; 1819 entry.valid = true; 1820 entry.dirty = false; 1821 entry.tag = m_z[r_config_address.read()]; 1822 entry.is_cnt = r_config_dir_is_cnt.read(); 1823 entry.lock = r_config_dir_lock.read(); 1824 entry.ptr = r_config_dir_ptr.read(); 1825 entry.count = r_config_dir_count.read(); 1826 entry.owner.inst = r_config_dir_copy_inst.read(); 1827 entry.owner.srcid = r_config_dir_copy_srcid.read(); 1828 m_cache_directory.write( set, 1829 way, 1830 entry ); 1831 1832 r_config_trt_index = index; 1833 r_config_fsm = CONFIG_TRT_SET; 1834 } 1835 1836 #if DEBUG_MEMC_CONFIG 1837 if(m_debug) 1838 std::cout << " <MEMC " << name() << " CONFIG_TRT_LOCK> Access TRT: " 1839 << " wok = " << std::dec << wok 1840 << " index = " << index << std::endl; 1841 #endif 1842 } 1843 break; 1844 } 1845 //////////////////// 1846 case CONFIG_TRT_SET: // read data in cache 1847 // and post a PUT request in TRT 1848 { 1849 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CONFIG) and 1850 "MEMC ERROR in CONFIG_TRT_SET state: bad DIR allocation"); 1851 1852 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_CONFIG) and 1853 "MEMC ERROR in CONFIG_TRT_SET state: bad TRT allocation"); 1854 1855 // read data into cache 1856 size_t way = r_config_dir_way.read(); 1857 size_t set = m_y[r_config_address.read()]; 1858 1859 sc_signal<data_t> config_data[16]; 1860 m_cache_data.read_line( way, 1861 set, 1862 config_data ); 1863 1864 // post a PUT request in TRT 1865 std::vector<data_t> data_vector; 1866 data_vector.clear(); 1867 for(size_t i=0; i<m_words; i++) data_vector.push_back(config_data[i].read()); 1868 m_trt.set( r_config_trt_index.read(), 1869 false, // PUT 1870 m_nline[r_config_address.read()], // nline 1871 0, // srcid: unused 1872 0, // trdid: unused 1873 0, // pktid: unused 1874 false, // not proc_read 1875 0, // read_length: unused 1876 0, // word_index: unused 1877 std::vector<be_t>(m_words,0xF), 1878 data_vector); 1879 1880 #if DEBUG_MEMC_CONFIG 1881 if(m_debug) 1882 std::cout << " <MEMC " << name() << " CONFIG_TRT_SET> PUT request in TRT:" 1883 << " address = " << std::hex << r_config_address.read() 1884 << " index = " << std::dec << r_config_trt_index.read() << std::endl; 1885 #endif 1886 break; 1887 } 1888 //////////////////// 1889 case CONFIG_PUT_REQ: // PUT request to IXR_CMD_FSM 1890 { 1891 if ( not r_config_to_ixr_cmd_req.read() ) 1892 { 1893 r_config_to_ixr_cmd_req = true; 1894 r_config_to_ixr_cmd_index = r_config_trt_index.read(); 1895 1896 // prepare next iteration 1897 r_config_cmd_lines = r_config_cmd_lines.read() - 1; 1898 r_config_address = r_config_address.read() + (m_words<<2); 1899 r_config_fsm = CONFIG_LOOP; 1900 1901 #if DEBUG_MEMC_CONFIG 1902 if(m_debug) 1903 std::cout << " <MEMC " << name() << " CONFIG_PUT_REQ> PUT request to IXR_CMD_FSM" 1904 << " / address = " << std::hex << r_config_address.read() << std::endl; 1905 #endif 1906 } 1907 break; 1908 } 1909 ///////////////////// 1910 case CONFIG_IVT_LOCK: // enter this state in case of INVAL command 1911 // Keep DIR lock and Try to get IVT lock. 1912 // Return to LOOP state if IVT full. 1913 // Register inval in IVT, and invalidate the 1914 // directory if IVT not full. 1915 { 1916 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CONFIG) and 1917 "MEMC ERROR in CONFIG_IVT_LOCK state: bad DIR allocation"); 1918 1776 1919 if ( r_alloc_ivt_fsm.read() == ALLOC_IVT_CONFIG ) 1777 1920 { … … 1782 1925 { 1783 1926 m_cache_directory.inval( way, set ); 1784 r_config_nlines = r_config_nlines.read() - 1; 1785 r_config_address = r_config_address.read() + (m_words<<2); 1786 r_config_fsm = CONFIG_LOOP; 1927 r_config_cmd_lines = r_config_cmd_lines.read() - 1; 1928 r_config_rsp_lines = r_config_rsp_lines.read() - 1; 1929 r_config_address = r_config_address.read() + (m_words<<2); 1930 r_config_fsm = CONFIG_LOOP; 1787 1931 1788 1932 #if DEBUG_MEMC_CONFIG 1789 1933 if(m_debug) 1790 std::cout << " <MEMC " << name() << " CONFIG_ DIR_IVT_LOCK>"1934 std::cout << " <MEMC " << name() << " CONFIG_IVT_LOCK>" 1791 1935 << " No copies in L1 : inval DIR entry" << std::endl; 1792 1936 #endif … … 1819 1963 r_config_ivt_index = index; 1820 1964 if ( broadcast ) r_config_fsm = CONFIG_BC_SEND; 1821 else r_config_fsm = CONFIG_INV _SEND;1965 else r_config_fsm = CONFIG_INVAL_SEND; 1822 1966 1823 1967 #if DEBUG_MEMC_CONFIG 1824 1968 if(m_debug) 1825 std::cout << " <MEMC " << name() << " CONFIG_ DIR_IVT_LOCK>"1969 std::cout << " <MEMC " << name() << " CONFIG_IVT_LOCK>" 1826 1970 << " Inval DIR entry and register inval in IVT" 1827 << " :index = " << std::dec << index1971 << " / index = " << std::dec << index 1828 1972 << " / broadcast = " << broadcast << std::endl; 1829 1973 #endif … … 1835 1979 #if DEBUG_MEMC_CONFIG 1836 1980 if(m_debug) 1837 std::cout << " <MEMC " << name() << " CONFIG_ DIR_IVT_LOCK>"1981 std::cout << " <MEMC " << name() << " CONFIG_IVT_LOCK>" 1838 1982 << " IVT full : release DIR & IVT locks and retry" << std::endl; 1839 1983 #endif … … 1849 1993 not r_config_to_cc_send_brdcast_req.read() ) 1850 1994 { 1995 // post bc inval request 1851 1996 r_config_to_cc_send_multi_req = false; 1852 1997 r_config_to_cc_send_brdcast_req = true; 1853 1998 r_config_to_cc_send_trdid = r_config_ivt_index.read(); 1854 1999 r_config_to_cc_send_nline = m_nline[(addr_t)(r_config_address.read())]; 1855 r_cleanup_to_config_ack = false; 1856 r_config_fsm = CONFIG_BC_WAIT; 2000 2001 // prepare next iteration 2002 r_config_cmd_lines = r_config_cmd_lines.read() - 1; 2003 r_config_address = r_config_address.read() + (m_words<<2); 2004 r_config_fsm = CONFIG_LOOP; 1857 2005 1858 2006 #if DEBUG_MEMC_CONFIG … … 1865 2013 break; 1866 2014 } 1867 //////////////////// 1868 case CONFIG_BC_WAIT: // wait broadcast completion to return to LOOP 1869 { 1870 if ( r_cleanup_to_config_ack.read() ) 1871 { 1872 r_config_fsm = CONFIG_LOOP; 1873 r_config_nlines = r_config_nlines.read() - 1; 1874 r_config_address = r_config_address.read() + (m_words<<2); 1875 } 1876 1877 #if DEBUG_MEMC_CONFIG 1878 if(m_debug) 1879 std::cout << " <MEMC " << name() << " CONFIG_BC_WAIT> Waiting BC completion " 1880 << " done = " << r_cleanup_to_config_ack.read() 1881 << std::endl; 1882 #endif 1883 break; 1884 } 1885 ///////////////////// 1886 case CONFIG_INV_SEND: // Post a multi inval request to CC_SEND FSM 2015 /////////////////////// 2016 case CONFIG_INVAL_SEND: // Post a multi inval request to CC_SEND FSM 1887 2017 { 1888 2018 if( not r_config_to_cc_send_multi_req.read() and 1889 2019 not r_config_to_cc_send_brdcast_req.read() ) 1890 2020 { 2021 // post multi inval request 1891 2022 r_config_to_cc_send_multi_req = true; 1892 2023 r_config_to_cc_send_brdcast_req = false; 1893 2024 r_config_to_cc_send_trdid = r_config_ivt_index.read(); 1894 2025 r_config_to_cc_send_nline = m_nline[(addr_t)(r_config_address.read())]; 1895 r_multi_ack_to_config_ack = false; 1896 2026 2027 // post data into FIFO 1897 2028 config_to_cc_send_fifo_srcid = r_config_dir_copy_srcid.read(); 1898 2029 config_to_cc_send_fifo_inst = r_config_dir_copy_inst.read(); 1899 2030 config_to_cc_send_fifo_put = true; 1900 2031 1901 if ( r_config_dir_count.read() == 1 ) r_config_fsm = CONFIG_INV_WAIT; 1902 else r_config_fsm = CONFIG_HEAP_REQ; 2032 if ( r_config_dir_count.read() == 1 ) // one copy 2033 { 2034 // prepare next iteration 2035 r_config_cmd_lines = r_config_cmd_lines.read() - 1; 2036 r_config_address = r_config_address.read() + (m_words<<2); 2037 r_config_fsm = CONFIG_LOOP; 2038 } 2039 else // several copies 2040 { 2041 r_config_fsm = CONFIG_HEAP_REQ; 2042 } 1903 2043 1904 2044 #if DEBUG_MEMC_CONFIG 1905 2045 if(m_debug) 1906 std::cout << " <MEMC " << name() << " CONFIG_INV _SEND>"2046 std::cout << " <MEMC " << name() << " CONFIG_INVAL_SEND>" 1907 2047 << " Post multi inval request to CC_SEND FSM" 1908 2048 << " / address = " << std::hex << r_config_address.read() … … 1919 2059 { 1920 2060 r_config_fsm = CONFIG_HEAP_SCAN; 1921 r_config_heap_next = r_config_dir_ next_ptr.read();2061 r_config_heap_next = r_config_dir_ptr.read(); 1922 2062 } 1923 2063 … … 1966 2106 if ( m_heap.is_full() ) 1967 2107 { 1968 last_entry.next = r_config_dir_ next_ptr.read();2108 last_entry.next = r_config_dir_ptr.read(); 1969 2109 m_heap.unset_full(); 1970 2110 } … … 1974 2114 } 1975 2115 1976 m_heap.write_free_ptr( r_config_dir_ next_ptr.read() );2116 m_heap.write_free_ptr( r_config_dir_ptr.read() ); 1977 2117 m_heap.write( r_config_heap_next.read(), last_entry ); 1978 r_config_fsm = CONFIG_INV_WAIT; 2118 2119 // prepare next iteration 2120 r_config_cmd_lines = r_config_cmd_lines.read() - 1; 2121 r_config_address = r_config_address.read() + (m_words<<2); 2122 r_config_fsm = CONFIG_LOOP; 1979 2123 1980 2124 #if DEBUG_MEMC_CONFIG … … 1984 2128 #endif 1985 2129 break; 1986 }1987 /////////////////////1988 case CONFIG_INV_WAIT: // wait inval completion to return to LOOP1989 {1990 if ( r_multi_ack_to_config_ack.read() )1991 {1992 r_config_fsm = CONFIG_LOOP;1993 r_config_nlines = r_config_nlines.read() - 1;1994 r_config_address = r_config_address.read() + (m_words<<2);1995 }1996 1997 #if DEBUG_MEMC_CONFIG1998 if(m_debug)1999 std::cout << " <MEMC " << name() << " CONFIG_INV_WAIT> Waiting inval completion "2000 << " done = " << r_multi_ack_to_config_ack.read()2001 << std::endl;2002 #endif2003 break;2004 }2005 2006 ////////////////2007 case CONFIG_RSP: // request TGT_RSP FSM to return response2008 {2009 if ( not r_config_to_tgt_rsp_req.read() )2010 {2011 r_config_to_tgt_rsp_srcid = r_config_srcid.read();2012 r_config_to_tgt_rsp_trdid = r_config_trdid.read();2013 r_config_to_tgt_rsp_pktid = r_config_pktid.read();2014 r_config_to_tgt_rsp_error = false;2015 r_config_to_tgt_rsp_req = true;2016 r_config_fsm = CONFIG_IDLE;2017 2018 #if DEBUG_MEMC_CONFIG2019 if(m_debug)2020 std::cout << " <MEMC " << name() << " CONFIG_RSP> Request TGT_RSP FSM to return response:"2021 << " error = " << r_config_to_tgt_rsp_error.read()2022 << " / rsrcid = " << std::hex << r_config_srcid.read() << std::endl;2023 #endif2024 }2025 break;2026 2027 2130 } 2028 2131 } // end switch r_config_fsm … … 2051 2154 //////////////////////////////////////////////////////////////////////////////////// 2052 2155 2156 //std::cout << std::endl << "read_fsm" << std::endl; 2157 2053 2158 switch(r_read_fsm.read()) 2054 2159 { … … 2056 2161 case READ_IDLE: // waiting a read request 2057 2162 { 2058 if(m_cmd_read_addr_fifo.rok())2059 {2163 if(m_cmd_read_addr_fifo.rok()) 2164 { 2060 2165 2061 2166 #if DEBUG_MEMC_READ 2062 if(m_debug) 2063 std::cout << " <MEMC " << name() << " READ_IDLE> Read request" 2064 << " : address = " << std::hex << m_cmd_read_addr_fifo.read() 2065 << " / srcid = " << m_cmd_read_srcid_fifo.read() 2066 << " / trdid = " << m_cmd_read_trdid_fifo.read() 2067 << " / pktid = " << m_cmd_read_pktid_fifo.read() 2068 << " / nwords = " << std::dec << m_cmd_read_length_fifo.read() << std::endl; 2069 #endif 2070 r_read_fsm = READ_DIR_REQ; 2071 } 2072 break; 2073 } 2074 2167 if(m_debug) 2168 std::cout << " <MEMC " << name() << " READ_IDLE> Read request" 2169 << " : address = " << std::hex << m_cmd_read_addr_fifo.read() 2170 << " / srcid = " << m_cmd_read_srcid_fifo.read() 2171 << " / trdid = " << m_cmd_read_trdid_fifo.read() 2172 << " / pktid = " << m_cmd_read_pktid_fifo.read() 2173 << " / nwords = " << std::dec << m_cmd_read_length_fifo.read() << std::endl; 2174 #endif 2175 r_read_fsm = READ_DIR_REQ; 2176 } 2177 break; 2178 } 2075 2179 ////////////////// 2076 2180 case READ_DIR_REQ: // Get the lock to the directory … … 2095 2199 case READ_DIR_LOCK: // check directory for hit / miss 2096 2200 { 2097 if(r_alloc_dir_fsm.read() == ALLOC_DIR_READ) 2098 { 2201 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_READ) and 2202 "MEMC ERROR in READ_DIR_LOCK state: Bad DIR allocation"); 2203 2099 2204 size_t way = 0; 2100 DirectoryEntry entry = 2101 m_cache_directory.read(m_cmd_read_addr_fifo.read(), way); 2205 DirectoryEntry entry = m_cache_directory.read(m_cmd_read_addr_fifo.read(), way); 2206 2102 2207 // access the global table ONLY when we have an LL cmd 2103 2208 if((m_cmd_read_pktid_fifo.read() & 0x7) == TYPE_LL) 2104 2209 { 2105 r_read_ll_key = m_llsc_table.ll(m_cmd_read_addr_fifo.read());2210 r_read_ll_key = m_llsc_table.ll(m_cmd_read_addr_fifo.read()); 2106 2211 } 2107 2212 r_read_is_cnt = entry.is_cnt; … … 2112 2217 r_read_count = entry.count; 2113 2218 r_read_copy = entry.owner.srcid; 2114 2115 #if L1_MULTI_CACHE2116 r_read_copy_cache = entry.owner.cache_id;2117 #endif2118 2219 r_read_copy_inst = entry.owner.inst; 2119 2220 r_read_ptr = entry.ptr; // pointer to the heap … … 2125 2226 if(entry.valid) // hit 2126 2227 { 2127 // test if we need to register a new copy in the heap2128 if(entry.is_cnt or (entry.count == 0) or !cached_read)2129 {2130 r_read_fsm = READ_DIR_HIT;2131 }2132 else2133 {2134 r_read_fsm = READ_HEAP_REQ;2135 }2228 // test if we need to register a new copy in the heap 2229 if(entry.is_cnt or (entry.count == 0) or !cached_read) 2230 { 2231 r_read_fsm = READ_DIR_HIT; 2232 } 2233 else 2234 { 2235 r_read_fsm = READ_HEAP_REQ; 2236 } 2136 2237 } 2137 2238 else // miss 2138 2239 { 2139 r_read_fsm = READ_TRT_LOCK;2240 r_read_fsm = READ_TRT_LOCK; 2140 2241 } 2141 2242 … … 2152 2253 } 2153 2254 #endif 2154 } 2155 else 2156 { 2157 std::cout << "VCI_MEM_CACHE ERROR " << name() << " READ_DIR_LOCK state" 2158 << "Bad DIR allocation" << std::endl; 2159 exit(0); 2160 } 2161 break; 2162 } 2163 2255 break; 2256 } 2164 2257 ////////////////// 2165 2258 case READ_DIR_HIT: // read data in cache & update the directory … … 2170 2263 2171 2264 { 2172 if(r_alloc_dir_fsm.read() == ALLOC_DIR_READ) 2173 { 2265 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_READ) and 2266 "MEMC ERROR in READ_DIR_HIT state: Bad DIR allocation"); 2267 2174 2268 // check if this is an instruction read, this means pktid is either 2175 2269 // TYPE_READ_INS_UNC 0bX010 with TSAR encoding … … 2188 2282 m_cache_data.read_line(way, set, r_read_data); 2189 2283 2190 if(m_monitor_ok) check_monitor( m_cmd_read_addr_fifo.read(), r_read_data[0], true);2191 2192 2284 // update the cache directory 2193 2285 DirectoryEntry entry; … … 2209 2301 if(cached_read) // Cached read => we must update the copies 2210 2302 { 2211 if(!is_cnt) // Not counter mode 2212 { 2213 entry.owner.srcid = m_cmd_read_srcid_fifo.read(); 2214 #if L1_MULTI_CACHE 2215 entry.owner.cache_id = m_cmd_read_pktid_fifo.read(); 2216 #endif 2217 entry.owner.inst = inst_read; 2218 entry.count = r_read_count.read() + 1; 2219 } 2220 else // Counter mode 2221 { 2222 entry.owner.srcid = 0; 2223 #if L1_MULTI_CACHE 2224 entry.owner.cache_id = 0; 2225 #endif 2226 entry.owner.inst = false; 2227 entry.count = r_read_count.read() + 1; 2228 } 2303 if(!is_cnt) // Not counter mode 2304 { 2305 entry.owner.srcid = m_cmd_read_srcid_fifo.read(); 2306 entry.owner.inst = inst_read; 2307 entry.count = r_read_count.read() + 1; 2308 } 2309 else // Counter mode 2310 { 2311 entry.owner.srcid = 0; 2312 entry.owner.inst = false; 2313 entry.count = r_read_count.read() + 1; 2314 } 2229 2315 } 2230 2316 else // Uncached read 2231 2317 { 2232 entry.owner.srcid = r_read_copy.read(); 2233 #if L1_MULTI_CACHE 2234 entry.owner.cache_id = r_read_copy_cache.read(); 2235 #endif 2236 entry.owner.inst = r_read_copy_inst.read(); 2237 entry.count = r_read_count.read(); 2318 entry.owner.srcid = r_read_copy.read(); 2319 entry.owner.inst = r_read_copy_inst.read(); 2320 entry.count = r_read_count.read(); 2238 2321 } 2239 2322 2240 2323 #if DEBUG_MEMC_READ 2241 if(m_debug) 2242 std::cout << " <MEMC " << name() << " READ_DIR_HIT> Update directory entry:" 2243 << " addr = " << std::hex << m_cmd_read_addr_fifo.read() 2244 << " / set = " << std::dec << set 2245 << " / way = " << way 2246 << " / owner_id = " << std::hex << entry.owner.srcid 2247 << " / owner_ins = " << std::dec << entry.owner.inst 2248 << " / count = " << entry.count 2249 << " / is_cnt = " << entry.is_cnt << std::endl; 2250 #endif 2251 2252 if(m_monitor_ok) 2324 if(m_debug) 2325 std::cout << " <MEMC " << name() << " READ_DIR_HIT> Update directory entry:" 2326 << " addr = " << std::hex << m_cmd_read_addr_fifo.read() 2327 << " / set = " << std::dec << set 2328 << " / way = " << way 2329 << " / owner_id = " << std::hex << entry.owner.srcid 2330 << " / owner_ins = " << std::dec << entry.owner.inst 2331 << " / count = " << entry.count 2332 << " / is_cnt = " << entry.is_cnt << std::endl; 2333 #endif 2334 /*if(m_monitor_ok) 2253 2335 { 2254 2336 char buf[80]; … … 2257 2339 (int)((m_cmd_read_pktid_fifo.read()&0x2)!=0)); 2258 2340 check_monitor(m_cmd_read_addr_fifo.read(), r_read_data[0], true); 2259 } 2341 }*/ 2260 2342 m_cache_directory.write(set, way, entry); 2261 2343 r_read_fsm = READ_RSP; 2262 } 2263 break; 2344 break; 2264 2345 } 2265 2346 /////////////////// … … 2297 2378 2298 2379 m_cache_data.read_line(way, set, r_read_data); 2299 2300 if(m_monitor_ok) check_monitor( m_cmd_read_addr_fifo.read(), r_read_data[0], true);2301 2380 2302 2381 // update the cache directory … … 2312 2391 { 2313 2392 entry.owner.srcid = r_read_copy.read(); 2314 #if L1_MULTI_CACHE2315 entry.owner.cache_id = r_read_copy_cache.read();2316 #endif2317 2393 entry.owner.inst = r_read_copy_inst.read(); 2318 2394 entry.ptr = m_heap.next_free_ptr(); // set pointer on the heap … … 2321 2397 { 2322 2398 entry.owner.srcid = 0; 2323 #if L1_MULTI_CACHE2324 entry.owner.cache_id = 0;2325 #endif2326 2399 entry.owner.inst = false; 2327 2400 entry.ptr = 0; … … 2389 2462 HeapEntry heap_entry; 2390 2463 heap_entry.owner.srcid = m_cmd_read_srcid_fifo.read(); 2391 #if L1_MULTI_CACHE2392 heap_entry.owner.cache_id = m_cmd_read_pktid_fifo.read();2393 #endif2394 2464 heap_entry.owner.inst = ((m_cmd_read_pktid_fifo.read() & 0x2) != 0); 2395 2465 … … 2455 2525 HeapEntry last_entry; 2456 2526 last_entry.owner.srcid = 0; 2457 #if L1_MULTI_CACHE2458 last_entry.owner.cache_id = 0;2459 #endif2460 2527 last_entry.owner.inst = false; 2461 2528 … … 2483 2550 case READ_RSP: // request the TGT_RSP FSM to return data 2484 2551 { 2485 if(!r_read_to_tgt_rsp_req)2486 {2487 for(size_t i=0 ; i<m_words ; i++) r_read_to_tgt_rsp_data[i] = r_read_data[i];2488 r_read_to_tgt_rsp_word = m_x[(addr_t) m_cmd_read_addr_fifo.read()];2489 r_read_to_tgt_rsp_length = m_cmd_read_length_fifo.read();2490 r_read_to_tgt_rsp_srcid = m_cmd_read_srcid_fifo.read();2491 r_read_to_tgt_rsp_trdid = m_cmd_read_trdid_fifo.read();2492 r_read_to_tgt_rsp_pktid = m_cmd_read_pktid_fifo.read();2493 r_read_to_tgt_rsp_ll_key = r_read_ll_key.read();2494 cmd_read_fifo_get = true;2495 r_read_to_tgt_rsp_req = true;2496 r_read_fsm = READ_IDLE;2552 if(!r_read_to_tgt_rsp_req) 2553 { 2554 for(size_t i=0 ; i<m_words ; i++) r_read_to_tgt_rsp_data[i] = r_read_data[i]; 2555 r_read_to_tgt_rsp_word = m_x[(addr_t) m_cmd_read_addr_fifo.read()]; 2556 r_read_to_tgt_rsp_length = m_cmd_read_length_fifo.read(); 2557 r_read_to_tgt_rsp_srcid = m_cmd_read_srcid_fifo.read(); 2558 r_read_to_tgt_rsp_trdid = m_cmd_read_trdid_fifo.read(); 2559 r_read_to_tgt_rsp_pktid = m_cmd_read_pktid_fifo.read(); 2560 r_read_to_tgt_rsp_ll_key = r_read_ll_key.read(); 2561 cmd_read_fifo_get = true; 2562 r_read_to_tgt_rsp_req = true; 2563 r_read_fsm = READ_IDLE; 2497 2564 2498 2565 #if DEBUG_MEMC_READ … … 2503 2570 << " / nwords = " << std::dec << m_cmd_read_length_fifo.read() << std::endl; 2504 2571 #endif 2505 }2506 break;2572 } 2573 break; 2507 2574 } 2508 2575 /////////////////// … … 2525 2592 if(hit_read or !wok or hit_write) // missing line already requested or no space 2526 2593 { 2527 if(!wok) m_cpt_trt_full++;2594 if(!wok) m_cpt_trt_full++; 2528 2595 if(hit_read or hit_write) m_cpt_trt_rb++; 2529 2596 r_read_fsm = READ_IDLE; … … 2551 2618 break; 2552 2619 } 2553 2554 2620 ////////////////// 2555 2621 case READ_TRT_SET: // register get transaction in TRT 2556 2622 { 2557 if(r_alloc_trt_fsm.read() == ALLOC_TRT_READ)2558 {2559 m_trt.set(r_read_trt_index.read(),2560 true,2561 2562 2563 2564 2565 true,2566 2567 2568 2569 2570 r_read_ll_key.read());2623 if(r_alloc_trt_fsm.read() == ALLOC_TRT_READ) 2624 { 2625 m_trt.set( r_read_trt_index.read(), 2626 true, // GET 2627 m_nline[(addr_t)(m_cmd_read_addr_fifo.read())], 2628 m_cmd_read_srcid_fifo.read(), 2629 m_cmd_read_trdid_fifo.read(), 2630 m_cmd_read_pktid_fifo.read(), 2631 true, // proc read 2632 m_cmd_read_length_fifo.read(), 2633 m_x[(addr_t)(m_cmd_read_addr_fifo.read())], 2634 std::vector<be_t> (m_words,0), 2635 std::vector<data_t> (m_words,0), 2636 r_read_ll_key.read() ); 2571 2637 #if DEBUG_MEMC_READ 2572 2638 if(m_debug) 2573 std::cout << " <MEMC " << name() << " READ_TRT_SET> Write in Transaction Table:"2639 std::cout << " <MEMC " << name() << " READ_TRT_SET> Set a GET in TRT:" 2574 2640 << " address = " << std::hex << m_cmd_read_addr_fifo.read() 2575 2641 << " / srcid = " << std::hex << m_cmd_read_srcid_fifo.read() << std::endl; 2576 2642 #endif 2577 r_read_fsm = READ_TRT_REQ;2578 }2579 break;2643 r_read_fsm = READ_TRT_REQ; 2644 } 2645 break; 2580 2646 } 2581 2647 … … 2583 2649 case READ_TRT_REQ: // consume the read request in FIFO and send it to IXR_CMD_FSM 2584 2650 { 2585 if(not r_read_to_ixr_cmd_req) 2586 { 2587 cmd_read_fifo_get = true; 2588 r_read_to_ixr_cmd_req = true; 2589 r_read_to_ixr_cmd_nline = m_nline[(addr_t)(m_cmd_read_addr_fifo.read())]; 2590 r_read_to_ixr_cmd_trdid = r_read_trt_index.read(); 2591 r_read_fsm = READ_IDLE; 2651 if(not r_read_to_ixr_cmd_req) 2652 { 2653 cmd_read_fifo_get = true; 2654 r_read_to_ixr_cmd_req = true; 2655 r_read_to_ixr_cmd_index = r_read_trt_index.read(); 2656 r_read_fsm = READ_IDLE; 2592 2657 2593 2658 #if DEBUG_MEMC_READ … … 2596 2661 << std::hex << m_cmd_read_addr_fifo.read() << std::endl; 2597 2662 #endif 2598 }2599 break;2663 } 2664 break; 2600 2665 } 2601 2666 } // end switch read_fsm … … 2615 2680 // If the data is cached by other processors, a coherence transaction must 2616 2681 // be launched (sc requests always require a coherence transaction): 2617 // It is a multicast update if the line is not in counter mode , andthe processor2682 // It is a multicast update if the line is not in counter mode: the processor 2618 2683 // takes the lock protecting the Update Table (UPT) to register this transaction. 2619 // It is a broadcast invalidate if the line is in counter mode.2620 2684 // If the UPT is full, it releases the lock(s) and retry. Then, it sends 2621 2685 // a multi-update request to all owners of the line (but the writer), … … 2623 2687 // does not respond to the writing processor, as this response will be sent by 2624 2688 // the MULTI_ACK FSM when all update responses have been received. 2689 // It is a broadcast invalidate if the line is in counter mode: The line 2690 // should be erased in memory cache, and written in XRAM with a PUT transaction, 2691 // after registration in TRT. 2625 2692 // 2626 2693 // - In case of MISS, the WRITE FSM takes the lock protecting the transaction 2627 2694 // table (TRT). If a read transaction to the XRAM for this line already exists, 2628 2695 // it writes in the TRT (write buffer). Otherwise, if a TRT entry is free, 2629 // the WRITE FSM register a new transaction in TRT, and sends a read linerequest2696 // the WRITE FSM register a new transaction in TRT, and sends a GET request 2630 2697 // to the XRAM. If the TRT is full, it releases the lock, and waits. 2631 2698 // Finally, the WRITE FSM returns an aknowledge response to the writing processor. 2632 2699 ///////////////////////////////////////////////////////////////////////////////////// 2700 2701 //std::cout << std::endl << "write_fsm" << std::endl; 2633 2702 2634 2703 switch(r_write_fsm.read()) … … 2637 2706 case WRITE_IDLE: // copy first word of a write burst in local buffer 2638 2707 { 2639 if(m_cmd_write_addr_fifo.rok()) 2640 { 2641 if((m_cmd_write_pktid_fifo.read() & 0x7) == TYPE_SC) 2642 m_cpt_sc++; 2643 else 2644 { 2645 m_cpt_write++; 2646 m_cpt_write_cells++; 2647 } 2648 2649 // consume a word in the FIFO & write it in the local buffer 2650 cmd_write_fifo_get = true; 2651 size_t index = m_x[(addr_t)(m_cmd_write_addr_fifo.read())]; 2652 2653 r_write_address = (addr_t)(m_cmd_write_addr_fifo.read()); 2654 r_write_word_index = index; 2655 r_write_word_count = 1; 2656 r_write_data[index] = m_cmd_write_data_fifo.read(); 2657 r_write_srcid = m_cmd_write_srcid_fifo.read(); 2658 r_write_trdid = m_cmd_write_trdid_fifo.read(); 2659 r_write_pktid = m_cmd_write_pktid_fifo.read(); 2660 r_write_pending_sc = false; 2661 2662 // initialize the be field for all words 2663 for(size_t word=0 ; word<m_words ; word++) 2664 { 2665 if(word == index) r_write_be[word] = m_cmd_write_be_fifo.read(); 2666 else r_write_be[word] = 0x0; 2667 } 2668 2669 if (m_cmd_write_eop_fifo.read() or ((m_cmd_write_pktid_fifo.read() & 0x7) == TYPE_SC)) 2670 { 2671 r_write_fsm = WRITE_DIR_REQ; 2672 } 2673 else 2674 { 2675 r_write_fsm = WRITE_NEXT; 2676 } 2708 if(m_cmd_write_addr_fifo.rok()) 2709 { 2710 if((m_cmd_write_pktid_fifo.read() & 0x7) == TYPE_SC) 2711 { 2712 m_cpt_sc++; 2713 } 2714 else 2715 { 2716 m_cpt_write++; 2717 m_cpt_write_cells++; 2718 } 2719 2720 // consume a word in the FIFO & write it in the local buffer 2721 cmd_write_fifo_get = true; 2722 size_t index = m_x[(addr_t)(m_cmd_write_addr_fifo.read())]; 2723 2724 r_write_address = (addr_t)(m_cmd_write_addr_fifo.read()); 2725 r_write_word_index = index; 2726 r_write_word_count = 1; 2727 r_write_data[index] = m_cmd_write_data_fifo.read(); 2728 r_write_srcid = m_cmd_write_srcid_fifo.read(); 2729 r_write_trdid = m_cmd_write_trdid_fifo.read(); 2730 r_write_pktid = m_cmd_write_pktid_fifo.read(); 2731 r_write_pending_sc = false; 2732 2733 // initialize the be field for all words 2734 for(size_t word=0 ; word<m_words ; word++) 2735 { 2736 if(word == index) r_write_be[word] = m_cmd_write_be_fifo.read(); 2737 else r_write_be[word] = 0x0; 2738 } 2739 2740 if (m_cmd_write_eop_fifo.read() or ((m_cmd_write_pktid_fifo.read() & 0x7) == TYPE_SC)) 2741 { 2742 r_write_fsm = WRITE_DIR_REQ; 2743 } 2744 else 2745 { 2746 r_write_fsm = WRITE_NEXT; 2747 } 2677 2748 2678 2749 #if DEBUG_MEMC_WRITE … … 2683 2754 << " / data = " << m_cmd_write_data_fifo.read() << std::endl; 2684 2755 #endif 2685 } 2686 break; 2687 } 2688 2756 } 2757 break; 2758 } 2689 2759 //////////////// 2690 2760 case WRITE_NEXT: // copy next word of a write burst in local buffer 2691 2761 { 2692 if(m_cmd_write_addr_fifo.rok())2693 {2762 if(m_cmd_write_addr_fifo.rok()) 2763 { 2694 2764 2695 2765 #if DEBUG_MEMC_WRITE … … 2699 2769 << std::endl; 2700 2770 #endif 2701 m_cpt_write_cells++; 2702 2703 // check that the next word is in the same cache line 2704 if((m_nline[(addr_t)(r_write_address.read())] != 2705 m_nline[(addr_t)(m_cmd_write_addr_fifo.read())])) 2706 { 2707 std::cout << "VCI_MEM_CACHE ERROR " << name() << " WRITE_NEXT state" << std::endl 2708 << "all words in a write burst must be in same cache line" << std::endl; 2709 2710 exit(0); 2711 } 2712 2713 // consume a word in the FIFO & write it in the local buffer 2714 cmd_write_fifo_get = true; 2715 size_t index = r_write_word_index.read() + r_write_word_count.read(); 2716 2717 r_write_be[index] = m_cmd_write_be_fifo.read(); 2718 r_write_data[index] = m_cmd_write_data_fifo.read(); 2719 r_write_word_count = r_write_word_count.read() + 1; 2720 2721 if(m_cmd_write_eop_fifo.read()) 2722 { 2723 r_write_fsm = WRITE_DIR_REQ; 2724 } 2725 } 2726 break; 2727 } 2728 2729 //////////////////// 2730 case WRITE_DIR_REQ: 2731 { 2732 // Get the lock to the directory 2733 // and access the llsc_global_table 2734 if(r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE) 2735 { 2736 /////////////////////////////////////////////////////////////////////// 2737 // SC command treatment 2738 // We test the r_write_pending_sc register to know if we are returning 2739 // from the WAIT state. 2740 // In this case, the SC has already succeed and we cannot consume 2741 // another time from the FIFO. Also, we don't have to test another 2742 // time if the SC has succeed 2743 if(((r_write_pktid.read() & 0x7) == TYPE_SC) and not r_write_pending_sc.read()) 2744 { 2745 if(not m_cmd_write_addr_fifo.rok()) break; 2746 2747 assert(m_cmd_write_eop_fifo.read() and 2748 "Error in VCI_MEM_CACHE : " 2749 "invalid packet format for SC command"); 2750 2751 size_t index = r_write_word_index.read(); 2752 bool sc_success = m_llsc_table.sc(r_write_address.read() , 2771 m_cpt_write_cells++; 2772 2773 // check that the next word is in the same cache line 2774 assert( (m_nline[(addr_t)(r_write_address.read())] == 2775 m_nline[(addr_t)(m_cmd_write_addr_fifo.read())]) and 2776 "MEMC ERROR in WRITE_NEXT state: Illegal write burst"); 2777 2778 // consume a word in the FIFO & write it in the local buffer 2779 cmd_write_fifo_get = true; 2780 size_t index = r_write_word_index.read() + r_write_word_count.read(); 2781 2782 r_write_be[index] = m_cmd_write_be_fifo.read(); 2783 r_write_data[index] = m_cmd_write_data_fifo.read(); 2784 r_write_word_count = r_write_word_count.read() + 1; 2785 2786 if(m_cmd_write_eop_fifo.read()) r_write_fsm = WRITE_DIR_REQ; 2787 } 2788 break; 2789 } 2790 /////////////////// 2791 case WRITE_DIR_REQ: // Get the lock to the directory 2792 // and access the llsc_global_table 2793 { 2794 if( r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE ) 2795 { 2796 if(((r_write_pktid.read() & 0x7) == TYPE_SC) and not r_write_pending_sc.read()) 2797 { 2798 // We enter here if it is a new SC command 2799 // If r_write_pending_sc is set the SC is not new and has already been tested 2800 2801 if(not m_cmd_write_addr_fifo.rok()) break; 2802 2803 assert( m_cmd_write_eop_fifo.read() and 2804 "MEMC ERROR in WRITE_DIR_REQ state: invalid packet format for SC command"); 2805 2806 size_t index = r_write_word_index.read(); 2807 bool sc_success = m_llsc_table.sc(r_write_address.read() , 2753 2808 r_write_data[index].read()); 2754 2809 2755 // consume a word in the FIFO & write it in the local buffer 2756 cmd_write_fifo_get = true; 2757 r_write_data[index] = m_cmd_write_data_fifo.read(); 2758 r_write_sc_fail = not sc_success; 2759 r_write_pending_sc = true; 2760 2761 if(not sc_success) r_write_fsm = WRITE_RSP; 2762 else r_write_fsm = WRITE_DIR_LOCK; 2763 2764 break; 2765 } 2766 2767 /////////////////////////////////////////////////////////////////////// 2768 // WRITE command treatment or SC command returning from the WAIT state 2769 // In the second case, we must access the LL/SC global table to 2770 // erase any possible new reservation when we release the lock on the 2771 // directory 2772 m_llsc_table.sw(m_nline[(addr_t)r_write_address.read()],r_write_word_index.read(),r_write_word_index.read()+r_write_word_count.read()); 2773 2774 r_write_fsm = WRITE_DIR_LOCK; 2775 m_cpt_write_fsm_n_dir_lock++; 2810 // consume a word in the FIFO & write it in the local buffer 2811 cmd_write_fifo_get = true; 2812 r_write_data[index] = m_cmd_write_data_fifo.read(); 2813 r_write_sc_fail = not sc_success; 2814 r_write_pending_sc = true; 2815 2816 if(not sc_success) r_write_fsm = WRITE_RSP; 2817 else r_write_fsm = WRITE_DIR_LOCK; 2818 } 2819 else 2820 { 2821 // We enter here if it is a SW command or an already tested SC command 2822 2823 m_llsc_table.sw( m_nline[(addr_t)r_write_address.read()], 2824 r_write_word_index.read(), 2825 r_write_word_index.read() + r_write_word_count.read() ); 2826 2827 r_write_fsm = WRITE_DIR_LOCK; 2828 } 2776 2829 } 2777 2830 … … 2781 2834 << std::endl; 2782 2835 #endif 2783 2784 m_cpt_write_fsm_dir_lock++;2785 2786 2836 break; 2787 2837 } 2788 2789 2838 //////////////////// 2790 2839 case WRITE_DIR_LOCK: // access directory to check hit/miss 2791 2840 { 2792 if(r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE) 2793 { 2841 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE) and 2842 "MEMC ERROR in ALLOC_DIR_LOCK state: Bad DIR allocation"); 2843 2794 2844 size_t way = 0; 2795 2845 DirectoryEntry entry(m_cache_directory.read(r_write_address.read(), way)); … … 2797 2847 if(entry.valid) // hit 2798 2848 { 2799 // copy directory entry in local buffer in case of hit 2800 r_write_is_cnt = entry.is_cnt; 2801 r_write_lock = entry.lock; 2802 r_write_tag = entry.tag; 2803 r_write_copy = entry.owner.srcid; 2804 #if L1_MULTI_CACHE 2805 r_write_copy_cache = entry.owner.cache_id; 2806 #endif 2807 r_write_copy_inst = entry.owner.inst; 2808 r_write_count = entry.count; 2809 r_write_ptr = entry.ptr; 2810 r_write_way = way; 2811 2812 if(entry.is_cnt and entry.count) 2813 { 2814 r_write_fsm = WRITE_DIR_READ; 2815 } 2816 else 2817 { 2818 r_write_fsm = WRITE_DIR_HIT; 2819 } 2849 // copy directory entry in local buffer in case of hit 2850 r_write_is_cnt = entry.is_cnt; 2851 r_write_lock = entry.lock; 2852 r_write_tag = entry.tag; 2853 r_write_copy = entry.owner.srcid; 2854 r_write_copy_inst = entry.owner.inst; 2855 r_write_count = entry.count; 2856 r_write_ptr = entry.ptr; 2857 r_write_way = way; 2858 2859 if(entry.is_cnt and entry.count) r_write_fsm = WRITE_BC_DIR_READ; 2860 else r_write_fsm = WRITE_DIR_HIT; 2820 2861 } 2821 2862 else // miss 2822 2863 { 2823 r_write_fsm = WRITE_MISS_TRT_LOCK;2864 r_write_fsm = WRITE_MISS_TRT_LOCK; 2824 2865 } 2825 2866 … … 2838 2879 } 2839 2880 #endif 2840 } 2841 else 2842 { 2843 std::cout << "VCI_MEM_CACHE ERROR " << name() 2844 << " WRITE_DIR_LOCK state" << std::endl 2845 << "bad DIR allocation" << std::endl; 2846 2847 exit(0); 2848 } 2849 break; 2850 } 2851 //////////////////// 2852 case WRITE_DIR_READ: // read the cache and complete the buffer when be!=0xF 2853 { 2854 // update local buffer 2855 size_t set = m_y[(addr_t)(r_write_address.read())]; 2856 size_t way = r_write_way.read(); 2857 for(size_t word=0 ; word<m_words ; word++) 2858 { 2859 data_t mask = 0; 2860 if(r_write_be[word].read() & 0x1) mask = mask | 0x000000FF; 2861 if(r_write_be[word].read() & 0x2) mask = mask | 0x0000FF00; 2862 if(r_write_be[word].read() & 0x4) mask = mask | 0x00FF0000; 2863 if(r_write_be[word].read() & 0x8) mask = mask | 0xFF000000; 2864 2865 // complete only if mask is not null (for energy consumption) 2866 r_write_data[word] = (r_write_data[word].read() & mask) | 2867 (m_cache_data.read(way, set, word) & ~mask); 2868 2869 } // end for 2870 2871 // test if a coherence broadcast is required 2872 r_write_fsm = WRITE_BC_TRT_LOCK; 2873 2874 #if DEBUG_MEMC_WRITE 2875 if(m_debug) 2876 std::cout << " <MEMC " << name() << " WRITE_DIR_READ>" 2877 << " Read the cache to complete local buffer" << std::endl; 2878 #endif 2879 break; 2880 } 2881 2881 break; 2882 } 2882 2883 /////////////////// 2883 case WRITE_DIR_HIT: 2884 { 2885 // update the cache directory 2886 // update directory with Dirty bit 2887 DirectoryEntry entry; 2888 entry.valid = true; 2889 entry.dirty = true; 2890 entry.tag = r_write_tag.read(); 2891 entry.is_cnt = r_write_is_cnt.read(); 2892 entry.lock = r_write_lock.read(); 2893 entry.owner.srcid = r_write_copy.read(); 2894 #if L1_MULTI_CACHE 2895 entry.owner.cache_id = r_write_copy_cache.read(); 2896 #endif 2897 entry.owner.inst = r_write_copy_inst.read(); 2898 entry.count = r_write_count.read(); 2899 entry.ptr = r_write_ptr.read(); 2900 2901 size_t set = m_y[(addr_t)(r_write_address.read())]; 2902 size_t way = r_write_way.read(); 2903 2904 // update directory 2905 m_cache_directory.write(set, way, entry); 2906 2907 // owner is true when the the first registered copy is the writer itself 2908 bool owner = (((r_write_copy.read() == r_write_srcid.read()) 2909 #if L1_MULTI_CACHE 2910 and(r_write_copy_cache.read() ==r_write_pktid.read()) 2911 #endif 2912 ) and not r_write_copy_inst.read()); 2913 2914 // no_update is true when there is no need for coherence transaction 2915 // (tests for sc requests) 2916 bool no_update = ( (r_write_count.read() == 0) or 2884 case WRITE_DIR_HIT: // update the cache directory with Dirty bit 2885 // and update data cache 2886 { 2887 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE) and 2888 "MEMC ERROR in ALLOC_DIR_HIT state: Bad DIR allocation"); 2889 2890 DirectoryEntry entry; 2891 entry.valid = true; 2892 entry.dirty = true; 2893 entry.tag = r_write_tag.read(); 2894 entry.is_cnt = r_write_is_cnt.read(); 2895 entry.lock = r_write_lock.read(); 2896 entry.owner.srcid = r_write_copy.read(); 2897 entry.owner.inst = r_write_copy_inst.read(); 2898 entry.count = r_write_count.read(); 2899 entry.ptr = r_write_ptr.read(); 2900 2901 size_t set = m_y[(addr_t)(r_write_address.read())]; 2902 size_t way = r_write_way.read(); 2903 2904 // update directory 2905 m_cache_directory.write(set, way, entry); 2906 2907 // owner is true when the the first registered copy is the writer itself 2908 bool owner = ( (r_write_copy.read() == r_write_srcid.read()) 2909 and not r_write_copy_inst.read() ); 2910 2911 // no_update is true when there is no need for coherence transaction 2912 bool no_update = ( (r_write_count.read() == 0) or 2917 2913 (owner and (r_write_count.read() ==1) and 2918 2914 (r_write_pktid.read() != TYPE_SC))); 2919 2915 2920 // write data in the cache if no coherence transaction 2921 if(no_update) 2922 { 2923 for(size_t word=0 ; word<m_words ; word++) 2924 { 2925 m_cache_data.write(way, set, word, r_write_data[word].read(), r_write_be[word].read()); 2926 2927 if(m_monitor_ok) 2928 { 2929 addr_t address = (r_write_address.read() & ~(addr_t) 0x3F) | word<<2; 2930 check_monitor( address, r_write_data[word].read(), false); 2931 } 2932 } 2933 } 2934 2935 if(owner and not no_update and(r_write_pktid.read() != TYPE_SC)) 2936 { 2937 r_write_count = r_write_count.read() - 1; 2938 } 2939 2940 if(no_update) 2941 // Write transaction completed 2942 { 2943 r_write_fsm = WRITE_RSP; 2944 } 2945 else 2946 // coherence update required 2947 { 2948 if(!r_write_to_cc_send_multi_req.read() and 2949 !r_write_to_cc_send_brdcast_req.read()) 2950 { 2951 r_write_fsm = WRITE_UPT_LOCK; 2952 } 2953 else 2954 { 2955 r_write_fsm = WRITE_WAIT; 2956 } 2957 } 2916 // write data in the cache if no coherence transaction 2917 if(no_update) 2918 { 2919 for(size_t word=0 ; word<m_words ; word++) 2920 { 2921 m_cache_data.write( way, 2922 set, 2923 word, 2924 r_write_data[word].read(), 2925 r_write_be[word].read()); 2926 } 2927 } 2928 2929 if(owner and not no_update and(r_write_pktid.read() != TYPE_SC)) 2930 { 2931 r_write_count = r_write_count.read() - 1; 2932 } 2933 2934 if(no_update) // Write transaction completed 2935 { 2936 r_write_fsm = WRITE_RSP; 2937 } 2938 else // coherence update required 2939 { 2940 if(!r_write_to_cc_send_multi_req.read() and 2941 !r_write_to_cc_send_brdcast_req.read()) 2942 { 2943 r_write_fsm = WRITE_UPT_LOCK; 2944 } 2945 else 2946 { 2947 r_write_fsm = WRITE_WAIT; 2948 } 2949 } 2958 2950 2959 2951 #if DEBUG_MEMC_WRITE 2960 2952 if(m_debug) 2961 2953 { 2962 if(no_update) 2963 { 2964 std::cout << " <MEMC " << name() 2965 << " WRITE_DIR_HIT> Write into cache / No coherence transaction" 2966 << std::endl; 2967 } 2968 else 2969 { 2970 std::cout << " <MEMC " << name() << " WRITE_DIR_HIT> Coherence update required:" 2971 << " is_cnt = " << r_write_is_cnt.read() 2972 << " nb_copies = " << std::dec << r_write_count.read() << std::endl; 2973 if(owner) std::cout << " ... but the first copy is the writer" << std::endl; 2974 } 2954 if(no_update) 2955 { 2956 std::cout << " <MEMC " << name() 2957 << " WRITE_DIR_HIT> Write into cache / No coherence transaction" << std::endl; 2975 2958 } 2976 #endif 2977 break; 2959 else 2960 { 2961 std::cout << " <MEMC " << name() << " WRITE_DIR_HIT> Coherence update required:" 2962 << " is_cnt = " << r_write_is_cnt.read() 2963 << " nb_copies = " << std::dec << r_write_count.read() << std::endl; 2964 if(owner) std::cout << " ... but the first copy is the writer" << std::endl; 2965 } 2966 } 2967 #endif 2968 break; 2978 2969 } 2979 2970 //////////////////// 2980 2971 case WRITE_UPT_LOCK: // Try to register the update request in UPT 2981 2972 { 2982 if(r_alloc_upt_fsm.read() == ALLOC_UPT_WRITE) 2983 { 2984 bool wok = false; 2985 size_t index = 0; 2986 size_t srcid = r_write_srcid.read(); 2987 size_t trdid = r_write_trdid.read(); 2988 size_t pktid = r_write_pktid.read(); 2989 addr_t nline = m_nline[(addr_t)(r_write_address.read())]; 2990 size_t nb_copies = r_write_count.read(); 2991 size_t set = m_y[(addr_t)(r_write_address.read())]; 2992 size_t way = r_write_way.read(); 2993 2994 wok = m_upt.set(true, // it's an update transaction 2995 false, // it's not a broadcast 2996 true, // response required 2997 false, // no acknowledge required 2998 srcid, 2999 trdid, 3000 pktid, 3001 nline, 3002 nb_copies, 3003 index); 3004 if(wok) // write data in cache 3005 { 3006 for(size_t word=0 ; word<m_words ; word++) 3007 { 3008 m_cache_data.write(way, 3009 set, 3010 word, 3011 r_write_data[word].read(), 3012 r_write_be[word].read()); 3013 3014 if(m_monitor_ok) 2973 if(r_alloc_upt_fsm.read() == ALLOC_UPT_WRITE) 2974 { 2975 bool wok = false; 2976 size_t index = 0; 2977 size_t srcid = r_write_srcid.read(); 2978 size_t trdid = r_write_trdid.read(); 2979 size_t pktid = r_write_pktid.read(); 2980 addr_t nline = m_nline[(addr_t)(r_write_address.read())]; 2981 size_t nb_copies = r_write_count.read(); 2982 size_t set = m_y[(addr_t)(r_write_address.read())]; 2983 size_t way = r_write_way.read(); 2984 2985 wok = m_upt.set( true, // it's an update transaction 2986 false, // it's not a broadcast 2987 true, // response required 2988 false, // no acknowledge required 2989 srcid, 2990 trdid, 2991 pktid, 2992 nline, 2993 nb_copies, 2994 index); 2995 2996 if( wok ) // write data in cache 3015 2997 { 3016 addr_t address = (r_write_address.read() & ~(addr_t) 0x3F) | word<<2; 3017 check_monitor( address, r_write_data[word].read(), false); 2998 for(size_t word=0 ; word<m_words ; word++) 2999 { 3000 m_cache_data.write( way, 3001 set, 3002 word, 3003 r_write_data[word].read(), 3004 r_write_be[word].read()); 3005 } 3018 3006 } 3019 }3020 }3021 3007 3022 3008 #if DEBUG_MEMC_WRITE 3023 if(m_debug )3009 if(m_debug and wok) 3024 3010 { 3025 if(wok) 3026 { 3027 std::cout << " <MEMC " << name() 3028 << " WRITE_UPT_LOCK> Register the multicast update in UPT / " 3029 << " nb_copies = " << r_write_count.read() << std::endl; 3030 } 3011 std::cout << " <MEMC " << name() 3012 << " WRITE_UPT_LOCK> Register the multicast update in UPT / " 3013 << " nb_copies = " << r_write_count.read() << std::endl; 3031 3014 } 3032 3015 #endif 3033 r_write_upt_index = index; 3034 // releases the lock protecting UPT and the DIR if no entry... 3035 if(wok) r_write_fsm = WRITE_UPT_HEAP_LOCK; 3036 else r_write_fsm = WRITE_WAIT; 3037 m_cpt_write_fsm_n_upt_lock++; 3038 } 3039 3040 m_cpt_write_fsm_upt_lock++; 3041 3042 break; 3016 r_write_upt_index = index; 3017 // releases the lock protecting UPT and the DIR if no entry... 3018 if(wok) r_write_fsm = WRITE_UPT_HEAP_LOCK; 3019 else r_write_fsm = WRITE_WAIT; 3020 } 3021 break; 3043 3022 } 3044 3023 … … 3086 3065 for(size_t i=min ; i<max ; i++) r_write_to_cc_send_data[i] = r_write_data[i]; 3087 3066 3088 if((r_write_copy.read() != r_write_srcid.read()) or(r_write_pktid.read() == TYPE_SC) or 3089 #if L1_MULTI_CACHE 3090 (r_write_copy_cache.read() != r_write_pktid.read()) or 3091 #endif 3092 r_write_copy_inst.read()) 3067 if( (r_write_copy.read() != r_write_srcid.read()) or 3068 (r_write_pktid.read() == TYPE_SC) or r_write_copy_inst.read()) 3093 3069 { 3094 3070 // put the first srcid in the fifo … … 3096 3072 write_to_cc_send_fifo_inst = r_write_copy_inst.read(); 3097 3073 write_to_cc_send_fifo_srcid = r_write_copy.read(); 3098 #if L1_MULTI_CACHE3099 write_to_cc_send_fifo_cache_id= r_write_copy_cache.read();3100 #endif3101 3074 if(r_write_count.read() == 1) 3102 3075 { … … 3149 3122 bool dec_upt_counter; 3150 3123 3151 if(((entry.owner.srcid != r_write_srcid.read()) or (r_write_pktid.read() == TYPE_SC)) or 3152 #if L1_MULTI_CACHE 3153 (entry.owner.cache_id != r_write_pktid.read()) or 3154 #endif 3155 entry.owner.inst) // put the next srcid in the fifo 3124 // put the next srcid in the fifo 3125 if( (entry.owner.srcid != r_write_srcid.read()) or 3126 (r_write_pktid.read() == TYPE_SC) or entry.owner.inst) 3156 3127 { 3157 3128 dec_upt_counter = false; … … 3159 3130 write_to_cc_send_fifo_inst = entry.owner.inst; 3160 3131 write_to_cc_send_fifo_srcid = entry.owner.srcid; 3161 #if L1_MULTI_CACHE3162 write_to_cc_send_fifo_cache_id = entry.owner.cache_id;3163 #endif3164 3132 3165 3133 #if DEBUG_MEMC_WRITE … … 3231 3199 3232 3200 /////////////// 3233 case WRITE_RSP: 3234 { 3235 // Post a request to TGT_RSP FSM to acknowledge the write 3236 // In order to increase the Write requests throughput, 3237 // we don't wait to return in the IDLE state to consume 3238 // a new request in the write FIFO 3239 3201 case WRITE_RSP: // Post a request to TGT_RSP FSM to acknowledge the write 3202 // In order to increase the Write requests throughput, 3203 // we don't wait to return in the IDLE state to consume 3204 // a new request in the write FIFO 3205 { 3240 3206 if(!r_write_to_tgt_rsp_req.read()) 3241 3207 { … … 3330 3296 bool hit_write = m_trt.hit_write(m_nline[addr]); 3331 3297 #endif 3332 bool wok = !m_trt.full(wok_index);3298 bool wok = not m_trt.full(wok_index); 3333 3299 3334 3300 if(hit_read) // register the modified data in TRT … … 3418 3384 data_vector.push_back(r_write_data[i]); 3419 3385 } 3420 m_trt.write_data_mask( r_write_trt_index.read(),3421 3422 data_vector);3386 m_trt.write_data_mask( r_write_trt_index.read(), 3387 be_vector, 3388 data_vector ); 3423 3389 r_write_fsm = WRITE_RSP; 3424 3390 … … 3430 3396 break; 3431 3397 } 3432 3433 3398 ///////////////////////// 3434 3399 case WRITE_MISS_XRAM_REQ: // send a GET request to IXR_CMD FSM 3435 3400 { 3436 if( !r_write_to_ixr_cmd_req)3401 if( not r_write_to_ixr_cmd_req.read() ) 3437 3402 { 3438 3403 r_write_to_ixr_cmd_req = true; 3439 r_write_to_ixr_cmd_write = false; 3440 r_write_to_ixr_cmd_nline = m_nline[(addr_t)(r_write_address.read())]; 3441 r_write_to_ixr_cmd_trdid = r_write_trt_index.read(); 3404 r_write_to_ixr_cmd_put = false; 3405 r_write_to_ixr_cmd_index = r_write_trt_index.read(); 3442 3406 r_write_fsm = WRITE_RSP; 3443 3407 … … 3449 3413 break; 3450 3414 } 3451 3452 3415 /////////////////////// 3453 case WRITE_BC_TRT_LOCK: // Check TRT not full 3454 { 3455 if(r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE) 3456 { 3457 size_t wok_index = 0; 3458 bool wok = !m_trt.full(wok_index); 3459 if(wok) // set a new entry in TRT 3460 { 3461 r_write_trt_index = wok_index; 3462 r_write_fsm = WRITE_BC_IVT_LOCK; 3463 } 3464 else // wait an empty entry in TRT 3465 { 3466 r_write_fsm = WRITE_WAIT; 3467 } 3416 case WRITE_BC_DIR_READ: // enter this state if a broadcast-inval is required 3417 // the cache line must be erased in mem-cache, and written 3418 // into XRAM. we read the cache and complete the buffer 3419 { 3420 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE) and 3421 "MEMC ERROR in WRITE_BC_DIR_READ state: Bad DIR allocation"); 3422 3423 // update local buffer 3424 size_t set = m_y[(addr_t)(r_write_address.read())]; 3425 size_t way = r_write_way.read(); 3426 for(size_t word=0 ; word<m_words ; word++) 3427 { 3428 data_t mask = 0; 3429 if(r_write_be[word].read() & 0x1) mask = mask | 0x000000FF; 3430 if(r_write_be[word].read() & 0x2) mask = mask | 0x0000FF00; 3431 if(r_write_be[word].read() & 0x4) mask = mask | 0x00FF0000; 3432 if(r_write_be[word].read() & 0x8) mask = mask | 0xFF000000; 3433 3434 // complete only if mask is not null (for energy consumption) 3435 r_write_data[word] = (r_write_data[word].read() & mask) | 3436 (m_cache_data.read(way, set, word) & ~mask); 3437 } // end for 3438 3439 r_write_fsm = WRITE_BC_TRT_LOCK; 3440 3441 #if DEBUG_MEMC_WRITE 3442 if(m_debug) 3443 std::cout << " <MEMC " << name() << " WRITE_BC_DIR_READ>" 3444 << " Read the cache to complete local buffer" << std::endl; 3445 #endif 3446 break; 3447 } 3448 /////////////////////// 3449 case WRITE_BC_TRT_LOCK: // get TRT lock to check TRT not full 3450 { 3451 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE) and 3452 "MEMC ERROR in WRITE_BC_TRT_LOCK state: Bad DIR allocation"); 3453 3454 if(r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE) 3455 { 3456 size_t wok_index = 0; 3457 bool wok = not m_trt.full(wok_index); 3458 if( wok ) 3459 { 3460 r_write_trt_index = wok_index; 3461 r_write_fsm = WRITE_BC_IVT_LOCK; 3462 } 3463 else // wait an empty slot in TRT 3464 { 3465 r_write_fsm = WRITE_WAIT; 3466 } 3468 3467 3469 3468 #if DEBUG_MEMC_WRITE … … 3479 3478 break; 3480 3479 } 3481 3482 3480 ////////////////////// 3483 case WRITE_BC_IVT_LOCK: // register BC transaction in IVT 3484 { 3485 if(r_alloc_ivt_fsm.read() == ALLOC_IVT_WRITE) 3486 { 3487 bool wok = false; 3488 size_t index = 0; 3489 size_t srcid = r_write_srcid.read(); 3490 size_t trdid = r_write_trdid.read(); 3491 size_t pktid = r_write_pktid.read(); 3492 addr_t nline = m_nline[(addr_t)(r_write_address.read())]; 3493 size_t nb_copies = r_write_count.read(); 3494 3495 wok = m_ivt.set(false, // it's an inval transaction 3496 true, // it's a broadcast 3497 true, // response required 3498 false, // no acknowledge required 3499 srcid, 3500 trdid, 3501 pktid, 3502 nline, 3503 nb_copies, 3504 index); 3481 case WRITE_BC_IVT_LOCK: // get IVT lock and register BC transaction in IVT 3482 { 3483 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE) and 3484 "MEMC ERROR in WRITE_BC_IVT_LOCK state: Bad DIR allocation"); 3485 3486 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE) and 3487 "MEMC ERROR in WRITE_BC_IVT_LOCK state: Bad TRT allocation"); 3488 3489 if(r_alloc_ivt_fsm.read() == ALLOC_IVT_WRITE) 3490 { 3491 bool wok = false; 3492 size_t index = 0; 3493 size_t srcid = r_write_srcid.read(); 3494 size_t trdid = r_write_trdid.read(); 3495 size_t pktid = r_write_pktid.read(); 3496 addr_t nline = m_nline[(addr_t)(r_write_address.read())]; 3497 size_t nb_copies = r_write_count.read(); 3498 3499 wok = m_ivt.set(false, // it's an inval transaction 3500 true, // it's a broadcast 3501 true, // response required 3502 false, // no acknowledge required 3503 srcid, 3504 trdid, 3505 pktid, 3506 nline, 3507 nb_copies, 3508 index); 3505 3509 #if DEBUG_MEMC_WRITE 3506 3510 if( m_debug and wok ) … … 3508 3512 << " / nb_copies = " << r_write_count.read() << std::endl; 3509 3513 #endif 3510 r_write_upt_index = index; 3511 3512 if(wok) r_write_fsm = WRITE_BC_DIR_INVAL; 3513 else r_write_fsm = WRITE_WAIT; 3514 m_cpt_write_fsm_n_upt_lock++; 3515 } 3516 3517 m_cpt_write_fsm_upt_lock++; 3518 3519 break; 3520 } 3521 3514 r_write_upt_index = index; 3515 3516 if( wok ) r_write_fsm = WRITE_BC_DIR_INVAL; 3517 else r_write_fsm = WRITE_WAIT; 3518 } 3519 break; 3520 } 3522 3521 //////////////////////// 3523 case WRITE_BC_DIR_INVAL: 3524 { 3525 // Register a put transaction to XRAM in TRT 3526 // and invalidate the line in directory 3527 if((r_alloc_trt_fsm.read() != ALLOC_TRT_WRITE) or 3528 (r_alloc_ivt_fsm.read() != ALLOC_IVT_WRITE) or 3529 (r_alloc_dir_fsm.read() != ALLOC_DIR_WRITE)) 3530 { 3531 std::cout << "VCI_MEM_CACHE ERROR " << name() << " WRITE_BC_DIR_INVAL state" << std::endl; 3532 std::cout << "bad TRT, DIR, or IVT allocation" << std::endl; 3533 exit(0); 3534 } 3535 3536 // register a write request to XRAM in TRT 3537 m_trt.set(r_write_trt_index.read(), 3538 false, // write request to XRAM 3539 m_nline[(addr_t)(r_write_address.read())], 3540 0, 3541 0, 3542 0, 3543 false, // not a processor read 3544 0, // not a single word 3545 0, // word index 3546 std::vector<be_t> (m_words,0), 3547 std::vector<data_t> (m_words,0)); 3548 3549 // invalidate directory entry 3550 DirectoryEntry entry; 3551 entry.valid = false; 3552 entry.dirty = false; 3553 entry.tag = 0; 3554 entry.is_cnt = false; 3555 entry.lock = false; 3556 entry.owner.srcid = 0; 3557 #if L1_MULTI_CACHE 3558 entry.owner.cache_id= 0; 3559 #endif 3560 entry.owner.inst = false; 3561 entry.ptr = 0; 3562 entry.count = 0; 3563 size_t set = m_y[(addr_t)(r_write_address.read())]; 3564 size_t way = r_write_way.read(); 3565 3566 m_cache_directory.write(set, way, entry); 3522 case WRITE_BC_DIR_INVAL: // Register a put transaction in TRT 3523 // and invalidate the line in directory 3524 { 3525 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE) and 3526 "MEMC ERROR in WRITE_BC_DIR_INVAL state: Bad DIR allocation"); 3527 3528 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE) and 3529 "MEMC ERROR in WRITE_BC_DIR_INVAL state: Bad TRT allocation"); 3530 3531 assert( (r_alloc_ivt_fsm.read() == ALLOC_IVT_WRITE) and 3532 "MEMC ERROR in WRITE_BC_DIR_INVAL state: Bad IVT allocation"); 3533 3534 // register PUT request in TRT 3535 std::vector<data_t> data_vector; 3536 data_vector.clear(); 3537 for(size_t i=0; i<m_words; i++) data_vector.push_back(r_write_data[i].read()); 3538 m_trt.set( r_write_trt_index.read(), 3539 false, // PUT request 3540 m_nline[(addr_t)(r_write_address.read())], 3541 0, // unused 3542 0, // unused 3543 0, // unused 3544 false, // not a processor read 3545 0, // unused 3546 0, // unused 3547 std::vector<be_t> (m_words,0), 3548 data_vector ); 3549 3550 // invalidate directory entry 3551 DirectoryEntry entry; 3552 entry.valid = false; 3553 entry.dirty = false; 3554 entry.tag = 0; 3555 entry.is_cnt = false; 3556 entry.lock = false; 3557 entry.owner.srcid = 0; 3558 entry.owner.inst = false; 3559 entry.ptr = 0; 3560 entry.count = 0; 3561 size_t set = m_y[(addr_t)(r_write_address.read())]; 3562 size_t way = r_write_way.read(); 3563 3564 m_cache_directory.write(set, way, entry); 3567 3565 3568 3566 #if DEBUG_MEMC_WRITE 3569 3567 if(m_debug) 3570 std::cout << " <MEMC " << name() << " WRITE_BC_DIR_INVAL> Inval idate the directory entry: @ ="3571 << r_write_address.read() << " / register the put transaction in TRT:"<< std::endl;3572 #endif 3573 r_write_fsm = WRITE_BC_CC_SEND;3574 break;3568 std::cout << " <MEMC " << name() << " WRITE_BC_DIR_INVAL> Inval DIR and register in TRT:" 3569 << " address = " << r_write_address.read() << std::endl; 3570 #endif 3571 r_write_fsm = WRITE_BC_CC_SEND; 3572 break; 3575 3573 } 3576 3574 … … 3578 3576 case WRITE_BC_CC_SEND: // Post a coherence broadcast request to CC_SEND FSM 3579 3577 { 3580 if(!r_write_to_cc_send_multi_req.read() and !r_write_to_cc_send_brdcast_req.read())3581 {3582 r_write_to_cc_send_multi_req = false;3583 r_write_to_cc_send_brdcast_req = true;3584 r_write_to_cc_send_trdid = r_write_upt_index.read();3585 r_write_to_cc_send_nline = m_nline[(addr_t)(r_write_address.read())];3586 r_write_to_cc_send_index = 0;3587 r_write_to_cc_send_count = 0;3588 3589 for(size_t i=0; i<m_words ; i++)3590 {3591 r_write_to_cc_send_be[i]=0;3592 r_write_to_cc_send_data[i] = 0;3593 }3594 r_write_fsm = WRITE_BC_XRAM_REQ;3578 if(!r_write_to_cc_send_multi_req.read() and !r_write_to_cc_send_brdcast_req.read()) 3579 { 3580 r_write_to_cc_send_multi_req = false; 3581 r_write_to_cc_send_brdcast_req = true; 3582 r_write_to_cc_send_trdid = r_write_upt_index.read(); 3583 r_write_to_cc_send_nline = m_nline[(addr_t)(r_write_address.read())]; 3584 r_write_to_cc_send_index = 0; 3585 r_write_to_cc_send_count = 0; 3586 3587 for(size_t i=0; i<m_words ; i++) // Ã quoi sert ce for? (AG) 3588 { 3589 r_write_to_cc_send_be[i]=0; 3590 r_write_to_cc_send_data[i] = 0; 3591 } 3592 r_write_fsm = WRITE_BC_XRAM_REQ; 3595 3593 3596 3594 #if DEBUG_MEMC_WRITE … … 3599 3597 << " WRITE_BC_CC_SEND> Post a broadcast request to CC_SEND FSM" << std::endl; 3600 3598 #endif 3601 }3602 break;3599 } 3600 break; 3603 3601 } 3604 3602 3605 3603 /////////////////////// 3606 case WRITE_BC_XRAM_REQ: // Post a put request to IXR_CMD FSM 3607 { 3608 if(!r_write_to_ixr_cmd_req) 3609 { 3610 r_write_to_ixr_cmd_req = true; 3611 r_write_to_ixr_cmd_write = true; 3612 r_write_to_ixr_cmd_nline = m_nline[(addr_t)(r_write_address.read())]; 3613 r_write_to_ixr_cmd_trdid = r_write_trt_index.read(); 3614 3615 for(size_t i=0; i<m_words; i++) r_write_to_ixr_cmd_data[i] = r_write_data[i]; 3616 3617 r_write_fsm = WRITE_IDLE; 3604 case WRITE_BC_XRAM_REQ: // Post a PUT request to IXR_CMD FSM 3605 { 3606 if( not r_write_to_ixr_cmd_req.read() ) 3607 { 3608 r_write_to_ixr_cmd_req = true; 3609 r_write_to_ixr_cmd_put = true; 3610 r_write_to_ixr_cmd_index = r_write_trt_index.read(); 3611 r_write_fsm = WRITE_IDLE; 3618 3612 3619 3613 #if DEBUG_MEMC_WRITE … … 3622 3616 << " WRITE_BC_XRAM_REQ> Post a put request to IXR_CMD FSM" << std::endl; 3623 3617 #endif 3624 }3625 break;3618 } 3619 break; 3626 3620 } 3627 3621 } // end switch r_write_fsm … … 3631 3625 /////////////////////////////////////////////////////////////////////// 3632 3626 // The IXR_CMD fsm controls the command packets to the XRAM : 3633 // It handles requests from the READ, WRITE, CAS, XRAM_RSP FSMs3634 // with a round-robin priority.3627 // It handles requests from 5 FSMs with a round-robin priority: 3628 // READ > WRITE > CAS > XRAM_RSP > CONFIG 3635 3629 // 3636 // - It sends a single flit VCI read request to the XRAM in case of MISS 3637 // posted by the READ, WRITE or CAS FSMs : the TRDID field contains 3638 // the Transaction Tab index. 3639 // The VCI response is a multi-flit packet : the N cells contain 3640 // the N data words. 3630 // - It sends a single flit VCI read to the XRAM in case of 3631 // GET request posted by the READ, WRITE or CAS FSMs. 3632 // - It sends a multi-flit VCI write in case of PUT request posted by 3633 // the XRAM_RSP, WRITE, CAS, or CONFIG FSMs. 3641 3634 // 3642 // - It sends a multi-flit VCI write when the XRAM_RSP FSM, WRITE FSM 3643 // or CAS FSM request to save a dirty line to the XRAM. 3644 // The VCI response is a single flit packet. 3635 // For each client, there is three steps: 3636 // - IXR_CMD_*_IDLE : round-robin allocation to a client 3637 // - IXR_CMD_*_TRT : access to TRT for address and data 3638 // - IXR_CMD_*_SEND : send the PUT or GET VCI command 3639 // 3640 // The address and data to be written (for a PUT) are stored in TRT. 3641 // The trdid field contains always the TRT entry index. 3645 3642 //////////////////////////////////////////////////////////////////////// 3643 3644 //std::cout << std::endl << "ixr_cmd_fsm" << std::endl; 3646 3645 3647 3646 switch(r_ixr_cmd_fsm.read()) 3648 3647 { 3649 /////////////////////// /3648 /////////////////////// 3650 3649 case IXR_CMD_READ_IDLE: 3651 if (r_write_to_ixr_cmd_req ) r_ixr_cmd_fsm = IXR_CMD_WRITE;3652 else if(r_cas_to_ixr_cmd_req ) r_ixr_cmd_fsm = IXR_CMD_CAS;3653 else if(r_xram_rsp_to_ixr_cmd_req ) r_ixr_cmd_fsm = IXR_CMD_XRAM;3650 if (r_write_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_WRITE_TRT; 3651 else if(r_cas_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CAS_TRT; 3652 else if(r_xram_rsp_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_XRAM_TRT; 3654 3653 #if ODCCP_NON_INCLUSIVE 3655 else if(r_cleanup_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_CLEANUP_DATA; 3654 else if(r_cleanup_to_ixr_cmd_req.read()) 3655 { 3656 r_ixr_cmd_fsm = IXR_CMD_CLEANUP_DATA_SEND; 3657 r_ixr_cmd_word = 0; 3658 } 3656 3659 #else 3657 else if(r_cleanup_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_TRT_LOCK; 3658 #endif 3659 else if(r_read_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_READ; 3660 else if(r_cleanup_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CLEANUP_TRT; 3661 #endif 3662 else if(r_config_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CONFIG_TRT; 3663 else if(r_read_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_READ_TRT; 3660 3664 break; 3661 3665 //////////////////////// 3662 3666 case IXR_CMD_WRITE_IDLE: 3663 if (r_cas_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_CAS;3664 else if(r_xram_rsp_to_ixr_cmd_req ) r_ixr_cmd_fsm = IXR_CMD_XRAM;3667 if (r_cas_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CAS_TRT; 3668 else if(r_xram_rsp_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_XRAM_TRT; 3665 3669 #if ODCCP_NON_INCLUSIVE 3666 else if(r_cleanup_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_CLEANUP_DATA; 3670 else if(r_cleanup_to_ixr_cmd_req.read()) 3671 { 3672 r_ixr_cmd_fsm = IXR_CMD_CLEANUP_DATA_SEND; 3673 r_ixr_cmd_word = 0; 3674 } 3667 3675 #else 3668 else if(r_cleanup_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_TRT_LOCK; 3669 #endif 3670 else if(r_read_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_READ; 3671 else if(r_write_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_WRITE; 3676 else if(r_cleanup_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_CLEANUP_TRT; 3677 #endif 3678 else if(r_config_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CONFIG_TRT; 3679 else if(r_read_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_READ_TRT; 3680 else if(r_write_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_WRITE_TRT; 3672 3681 break; 3673 ////////////////////// //3682 ////////////////////// 3674 3683 case IXR_CMD_CAS_IDLE: 3675 if (r_xram_rsp_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_XRAM;3684 if (r_xram_rsp_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_XRAM_TRT; 3676 3685 #if ODCCP_NON_INCLUSIVE 3677 else if(r_cleanup_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_CLEANUP_DATA; 3686 else if(r_cleanup_to_ixr_cmd_req.read()) 3687 { 3688 r_ixr_cmd_fsm = IXR_CMD_CLEANUP_DATA_SEND; 3689 r_ixr_cmd_word = 0; 3690 } 3678 3691 #else 3679 else if(r_cleanup_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_TRT_LOCK; 3680 #endif 3681 else if(r_read_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_READ; 3682 else if(r_write_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_WRITE; 3683 else if(r_cas_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_CAS; 3692 else if(r_cleanup_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_CLEANUP_TRT; 3693 #endif 3694 else if(r_config_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CONFIG_TRT; 3695 else if(r_read_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_READ_TRT; 3696 else if(r_write_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_WRITE_TRT; 3697 else if(r_cas_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CAS_TRT; 3684 3698 break; 3685 /////////////////////// /3699 /////////////////////// 3686 3700 case IXR_CMD_XRAM_IDLE: 3687 3701 #if ODCCP_NON_INCLUSIVE 3688 if(r_cleanup_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_CLEANUP_DATA; 3702 if(r_cleanup_to_ixr_cmd_req.read()) 3703 { 3704 r_ixr_cmd_fsm = IXR_CMD_CLEANUP_DATA_SEND; 3705 r_ixr_cmd_word = 0; 3706 } 3689 3707 #else 3690 if(r_cleanup_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_TRT_LOCK; 3691 #endif 3692 else if(r_read_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_READ; 3693 else if(r_write_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_WRITE; 3694 else if(r_cas_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_CAS; 3695 else if(r_xram_rsp_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_XRAM; 3708 if(r_cleanup_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_CLEANUP_TRT; 3709 #endif 3710 else if(r_config_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CONFIG_TRT; 3711 else if(r_read_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_READ_TRT; 3712 else if(r_write_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_WRITE_TRT; 3713 else if(r_cas_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CAS_TRT; 3714 else if(r_xram_rsp_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_XRAM_TRT; 3696 3715 break; 3697 3716 //////////////////////// 3698 3717 case IXR_CMD_CLEANUP_IDLE: 3699 if(r_read_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_READ; 3700 else if(r_write_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_WRITE; 3701 else if(r_cas_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_CAS; 3702 else if(r_xram_rsp_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_XRAM; 3718 if(r_config_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CONFIG_TRT; 3719 else if(r_read_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_READ_TRT; 3720 else if(r_write_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_WRITE_TRT; 3721 else if(r_cas_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CAS_TRT; 3722 else if(r_xram_rsp_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_XRAM_TRT; 3703 3723 #if ODCCP_NON_INCLUSIVE 3704 else if(r_cleanup_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_CLEANUP_DATA; 3724 else if(r_cleanup_to_ixr_cmd_req.read()) 3725 { 3726 r_ixr_cmd_fsm = IXR_CMD_CLEANUP_DATA_SEND; 3727 r_ixr_cmd_word = 0; 3728 } 3705 3729 #else 3706 else if(r_cleanup_to_ixr_cmd_req ) r_ixr_cmd_fsm = IXR_CMD_TRT_LOCK;3730 else if(r_cleanup_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CLEANUP_TRT; 3707 3731 #endif 3708 3732 break; 3709 3733 ///////////////////////// 3734 case IXR_CMD_CONFIG_IDLE: 3735 if (r_read_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_READ_TRT; 3736 else if(r_write_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_WRITE_TRT; 3737 else if(r_cas_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CAS_TRT; 3738 else if(r_xram_rsp_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_XRAM_TRT; 3739 #if ODCCP_NON_INCLUSIVE 3740 else if(r_cleanup_to_ixr_cmd_req.read()) 3741 { 3742 r_ixr_cmd_fsm = IXR_CMD_CLEANUP_DATA_SEND; 3743 r_ixr_cmd_word = 0; 3744 } 3745 #else 3746 else if(r_cleanup_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CLEANUP_TRT; 3747 #endif 3748 else if(r_config_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CONFIG_TRT; 3749 break; 3750 3751 3752 ////////////////////// 3753 case IXR_CMD_READ_TRT: // access TRT for a GET 3754 { 3755 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_CMD ) 3756 { 3757 TransactionTabEntry entry = m_trt.read( r_read_to_ixr_cmd_index.read() ); 3758 r_ixr_cmd_address = entry.nline * (m_words<<2); 3759 r_ixr_cmd_trdid = r_read_to_ixr_cmd_index.read(); 3760 r_ixr_cmd_get = true; 3761 r_ixr_cmd_word = 0; 3762 r_ixr_cmd_fsm = IXR_CMD_READ_SEND; 3763 for( size_t i=0 ; i<m_words ; i++ ) r_ixr_cmd_wdata[i] = entry.wdata[i]; 3764 3765 #if DEBUG_MEMC_IXR_CMD 3766 if(m_debug) 3767 std::cout << " <MEMC " << name() << " IXR_CMD_READ_TRT> TRT access" 3768 << " index = " << std::dec << r_read_to_ixr_cmd_index.read() 3769 << " / address = " << std::hex << (entry.nline*(m_words<<2)) << std::endl; 3770 #endif 3771 } 3772 break; 3773 } 3774 /////////////////////// 3775 case IXR_CMD_WRITE_TRT: // access TRT for a PUT or a GET 3776 { 3777 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_CMD ) 3778 { 3779 TransactionTabEntry entry = m_trt.read( r_write_to_ixr_cmd_index.read() ); 3780 r_ixr_cmd_address = entry.nline * (m_words<<2); 3781 r_ixr_cmd_trdid = r_write_to_ixr_cmd_index.read(); 3782 r_ixr_cmd_get = entry.xram_read; 3783 r_ixr_cmd_word = 0; 3784 r_ixr_cmd_fsm = IXR_CMD_WRITE_SEND; 3785 for( size_t i=0 ; i<m_words ; i++ ) r_ixr_cmd_wdata[i] = entry.wdata[i]; 3786 3787 #if DEBUG_MEMC_IXR_CMD 3788 if(m_debug) 3789 std::cout << " <MEMC " << name() << " IXR_CMD_WRITE_TRT> TRT access" 3790 << " index = " << std::dec << r_write_to_ixr_cmd_index.read() 3791 << " / address = " << std::hex << (entry.nline*(m_words<<2)) << std::endl; 3792 #endif 3793 } 3794 break; 3795 } 3710 3796 ///////////////////// 3711 case IXR_CMD_TRT_LOCK: 3712 { 3713 TransactionTabEntry entry; 3714 3715 if(r_alloc_trt_fsm.read() != ALLOC_TRT_IXR_CMD) break; 3716 entry.copy( m_trt.read(r_cleanup_to_ixr_cmd_trdid.read())); 3717 for(size_t i=0; i < m_words; i++) 3718 { 3719 r_ixr_cmd_data[i] = entry.wdata[i]; 3720 } 3721 3722 r_ixr_cmd_fsm = IXR_CMD_CLEANUP_DATA; 3723 break; 3724 } 3725 3726 ////////////////// // send a get from READ FSM 3727 case IXR_CMD_READ: 3728 { 3729 if(p_vci_ixr.cmdack) 3730 { 3731 r_ixr_cmd_fsm = IXR_CMD_READ_IDLE; 3732 r_read_to_ixr_cmd_req = false; 3797 case IXR_CMD_CAS_TRT: // access TRT for a PUT or a GET 3798 { 3799 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_CMD ) 3800 { 3801 TransactionTabEntry entry = m_trt.read( r_cas_to_ixr_cmd_index.read() ); 3802 r_ixr_cmd_address = entry.nline * (m_words<<2); 3803 r_ixr_cmd_trdid = r_cas_to_ixr_cmd_index.read(); 3804 r_ixr_cmd_get = entry.xram_read; 3805 r_ixr_cmd_word = 0; 3806 r_ixr_cmd_fsm = IXR_CMD_CAS_SEND; 3807 for( size_t i=0 ; i<m_words ; i++ ) r_ixr_cmd_wdata[i] = entry.wdata[i]; 3733 3808 3734 3809 #if DEBUG_MEMC_IXR_CMD 3735 3810 if(m_debug) 3736 std::cout << " <MEMC " << name() << " IXR_CMD_READ>" 3737 << " Send a get request to xram / address = " << std::hex 3738 << (addr_t)(r_read_to_ixr_cmd_nline.read()*m_words*4) << std::endl; 3739 #endif 3740 } 3741 break; 3742 } 3743 /////////////////// 3744 case IXR_CMD_WRITE: // send a put or get from WRITE FSM 3745 { 3746 if(p_vci_ixr.cmdack) 3747 { 3748 if(r_write_to_ixr_cmd_write.read()) // PUT 3749 { 3750 if(r_ixr_cmd_cpt.read() == (m_words - 2)) 3751 { 3752 r_ixr_cmd_cpt = 0; 3753 r_ixr_cmd_fsm = IXR_CMD_WRITE_IDLE; 3754 r_write_to_ixr_cmd_req = false; 3755 } 3756 else 3757 { 3758 r_ixr_cmd_cpt = r_ixr_cmd_cpt + 2; 3759 } 3811 std::cout << " <MEMC " << name() << " IXR_CMD_CAS_TRT> TRT access" 3812 << " index = " << std::dec << r_cas_to_ixr_cmd_index.read() 3813 << " / address = " << std::hex << (entry.nline*(m_words<<2)) << std::endl; 3814 #endif 3815 } 3816 break; 3817 } 3818 ////////////////////// 3819 case IXR_CMD_XRAM_TRT: // access TRT for a PUT 3820 { 3821 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_CMD ) 3822 { 3823 TransactionTabEntry entry = m_trt.read( r_xram_rsp_to_ixr_cmd_index.read() ); 3824 r_ixr_cmd_address = entry.nline * (m_words<<2); 3825 r_ixr_cmd_trdid = r_xram_rsp_to_ixr_cmd_index.read(); 3826 r_ixr_cmd_get = false; 3827 r_ixr_cmd_word = 0; 3828 r_ixr_cmd_fsm = IXR_CMD_XRAM_SEND; 3829 for( size_t i=0 ; i<m_words ; i++ ) r_ixr_cmd_wdata[i] = entry.wdata[i]; 3760 3830 3761 3831 #if DEBUG_MEMC_IXR_CMD 3762 3832 if(m_debug) 3763 std::cout << " <MEMC " << name() << " IXR_CMD_WRITE>" 3764 << " Send a put request to xram / address = " << std::hex 3765 << (addr_t)((r_write_to_ixr_cmd_nline.read() * m_words + 3766 r_ixr_cmd_cpt.read()) * 4 ) << std::endl; 3767 #endif 3768 } 3769 else // GET 3770 { 3771 r_ixr_cmd_fsm = IXR_CMD_WRITE_IDLE; 3772 r_write_to_ixr_cmd_req = false; 3833 std::cout << " <MEMC " << name() << " IXR_CMD_XRAM_TRT> TRT access" 3834 << " index = " << std::dec << r_xram_rsp_to_ixr_cmd_index.read() 3835 << " / address = " << std::hex << (entry.nline*(m_words<<2)) << std::endl; 3836 #endif 3837 } 3838 break; 3839 } 3840 ////////////////////// 3841 case IXR_CMD_CLEANUP_TRT: // access TRT for a PUT 3842 { 3843 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_CMD ) 3844 { 3845 3846 TransactionTabEntry entry = m_trt.read( r_cleanup_to_ixr_cmd_index.read() ); 3847 r_ixr_cmd_address = entry.nline * (m_words<<2); 3848 r_ixr_cmd_trdid = r_cleanup_to_ixr_cmd_index.read(); 3849 r_ixr_cmd_get = false; 3850 r_ixr_cmd_word = 0; 3851 r_ixr_cmd_fsm = IXR_CMD_CLEANUP_DATA_SEND; 3852 for( size_t i=0 ; i<m_words ; i++ ) r_ixr_cmd_wdata[i] = entry.wdata[i]; 3773 3853 3774 3854 #if DEBUG_MEMC_IXR_CMD 3775 3855 if(m_debug) 3776 std::cout << " <MEMC " << name() << " IXR_CMD_WRITE>" 3777 << " Send a get request to xram / address = " << std::hex 3778 << (addr_t)(r_write_to_ixr_cmd_nline.read()*m_words*4) << std::endl; 3779 #endif 3780 } 3781 } 3782 break; 3783 } 3784 ///////////////// 3785 case IXR_CMD_CAS: // send a put or get command from CAS FSM 3786 { 3787 if(p_vci_ixr.cmdack) 3788 { 3789 if(r_cas_to_ixr_cmd_write.read()) // PUT 3790 { 3791 if(r_ixr_cmd_cpt.read() == (m_words - 2)) 3792 { 3793 r_ixr_cmd_cpt = 0; 3794 r_ixr_cmd_fsm = IXR_CMD_CAS_IDLE; 3795 r_cas_to_ixr_cmd_req = false; 3796 } 3797 else 3798 { 3799 r_ixr_cmd_cpt = r_ixr_cmd_cpt + 2; 3800 } 3856 std::cout << " <MEMC " << name() << " IXR_CMD_CLEANUP_TRT> TRT access" 3857 << " index = " << std::dec << r_cleanup_to_ixr_cmd_index.read() 3858 << " / address = " << std::hex << (entry.nline*(m_words<<2)) << std::endl; 3859 #endif 3860 } 3861 break; 3862 } 3863 //////////////////////// 3864 case IXR_CMD_CONFIG_TRT: // access TRT for a PUT 3865 { 3866 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_CMD ) 3867 { 3868 TransactionTabEntry entry = m_trt.read( r_config_to_ixr_cmd_index.read() ); 3869 r_ixr_cmd_address = entry.nline * (m_words<<2); 3870 r_ixr_cmd_trdid = r_config_to_ixr_cmd_index.read(); 3871 r_ixr_cmd_get = false; 3872 r_ixr_cmd_word = 0; 3873 r_ixr_cmd_fsm = IXR_CMD_CONFIG_SEND; 3874 for( size_t i=0 ; i<m_words ; i++ ) r_ixr_cmd_wdata[i] = entry.wdata[i]; 3801 3875 3802 3876 #if DEBUG_MEMC_IXR_CMD 3803 3877 if(m_debug) 3804 std::cout << " <MEMC " << name() << " IXR_CMD_CAS>" 3805 << " Send a put request to xram / address = " << std::hex 3806 << (addr_t)( (r_cas_to_ixr_cmd_nline.read() * m_words + 3807 r_ixr_cmd_cpt.read()) * 4 ) << std::endl; 3808 #endif 3809 } 3810 else // GET 3811 { 3812 r_ixr_cmd_fsm = IXR_CMD_CAS_IDLE; 3813 r_cas_to_ixr_cmd_req = false; 3878 std::cout << " <MEMC " << name() << " IXR_CMD_CONFIG_TRT> TRT access" 3879 << " index = " << std::dec << r_config_to_ixr_cmd_index.read() 3880 << " / address = " << std::hex << (entry.nline*(m_words<<2)) << std::endl; 3881 #endif 3882 } 3883 break; 3884 } 3885 3886 /////////////////////// 3887 case IXR_CMD_READ_SEND: // send a get from READ FSM 3888 { 3889 if(p_vci_ixr.cmdack) 3890 { 3891 r_ixr_cmd_fsm = IXR_CMD_READ_IDLE; 3892 r_read_to_ixr_cmd_req = false; 3814 3893 3815 3894 #if DEBUG_MEMC_IXR_CMD 3816 3895 if(m_debug) 3817 std::cout << " <MEMC " << name() << " IXR_CMD_CAS>" 3818 << " Send a get request to xram / address = " << std::hex 3819 << (addr_t)(r_cas_to_ixr_cmd_nline.read()*m_words*4) << std::endl; 3820 #endif 3821 } 3822 } 3823 break; 3824 } 3825 ////////////////// 3826 case IXR_CMD_XRAM: // send a put from XRAM_RSP FSM 3827 { 3828 if(p_vci_ixr.cmdack) 3829 { 3830 if(r_ixr_cmd_cpt.read() == (m_words - 2)) 3831 { 3832 r_ixr_cmd_cpt = 0; 3833 r_ixr_cmd_fsm = IXR_CMD_XRAM_IDLE; 3834 r_xram_rsp_to_ixr_cmd_req = false; 3835 } 3836 else 3837 { 3838 r_ixr_cmd_cpt = r_ixr_cmd_cpt + 2; 3839 } 3896 std::cout << " <MEMC " << name() << " IXR_CMD_READ_SEND> GET request:" << std::hex 3897 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2) << std::endl; 3898 #endif 3899 } 3900 break; 3901 } 3902 //////////////////////// 3903 case IXR_CMD_WRITE_SEND: // send a put or get from WRITE FSM 3904 { 3905 if(p_vci_ixr.cmdack) 3906 { 3907 if(r_write_to_ixr_cmd_put.read()) // PUT 3908 { 3909 if(r_ixr_cmd_word.read() == (m_words - 2)) 3910 { 3911 r_ixr_cmd_fsm = IXR_CMD_WRITE_IDLE; 3912 r_write_to_ixr_cmd_req = false; 3913 } 3914 else 3915 { 3916 r_ixr_cmd_word = r_ixr_cmd_word.read() + 2; 3917 } 3840 3918 3841 3919 #if DEBUG_MEMC_IXR_CMD 3842 3920 if(m_debug) 3843 std::cout << " <MEMC " << name() << " IXR_CMD_XRAM>" 3844 << " Send a put request to xram / address = " << std::hex 3845 << (addr_t)( (r_xram_rsp_to_ixr_cmd_nline.read() * m_words + 3846 r_ixr_cmd_cpt.read()) * 4 ) << std::endl; 3847 #endif 3848 } 3849 break; 3921 std::cout << " <MEMC " << name() << " IXR_CMD_WRITE_SEND> PUT request:" << std::hex 3922 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2) << std::endl; 3923 #endif 3924 } 3925 else // GET 3926 { 3927 r_ixr_cmd_fsm = IXR_CMD_WRITE_IDLE; 3928 r_write_to_ixr_cmd_req = false; 3929 3930 #if DEBUG_MEMC_IXR_CMD 3931 if(m_debug) 3932 std::cout << " <MEMC " << name() << " IXR_CMD_WRITE_SEND> GET request:" << std::hex 3933 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2) << std::endl; 3934 #endif 3935 } 3936 } 3937 break; 3938 } 3939 ////////////////////// 3940 case IXR_CMD_CAS_SEND: // send a put or get command from CAS FSM 3941 { 3942 if(p_vci_ixr.cmdack) 3943 { 3944 if(r_cas_to_ixr_cmd_put.read()) // PUT 3945 { 3946 if(r_ixr_cmd_word.read() == (m_words - 2)) 3947 { 3948 r_ixr_cmd_fsm = IXR_CMD_CAS_IDLE; 3949 r_cas_to_ixr_cmd_req = false; 3950 } 3951 else 3952 { 3953 r_ixr_cmd_word = r_ixr_cmd_word.read() + 2; 3954 } 3955 3956 #if DEBUG_MEMC_IXR_CMD 3957 if(m_debug) 3958 std::cout << " <MEMC " << name() << " IXR_CMD_CAS_SEND> PUT request:" << std::hex 3959 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2) << std::endl; 3960 #endif 3961 } 3962 else // GET 3963 { 3964 r_ixr_cmd_fsm = IXR_CMD_CAS_IDLE; 3965 r_cas_to_ixr_cmd_req = false; 3966 3967 #if DEBUG_MEMC_IXR_CMD 3968 if(m_debug) 3969 std::cout << " <MEMC " << name() << " IXR_CMD_CAS_SEND> GET request:" << std::hex 3970 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2) << std::endl; 3971 #endif 3972 } 3973 } 3974 break; 3975 } 3976 /////////////////////// 3977 case IXR_CMD_XRAM_SEND: // send a put from XRAM_RSP FSM 3978 { 3979 if(p_vci_ixr.cmdack.read()) 3980 { 3981 if(r_ixr_cmd_word.read() == (m_words - 2)) 3982 { 3983 r_ixr_cmd_fsm = IXR_CMD_XRAM_IDLE; 3984 r_xram_rsp_to_ixr_cmd_req = false; 3985 } 3986 else 3987 { 3988 r_ixr_cmd_word = r_ixr_cmd_word.read() + 2; 3989 } 3990 #if DEBUG_MEMC_IXR_CMD 3991 if(m_debug) 3992 std::cout << " <MEMC " << name() << " IXR_CMD_XRAM_SEND> PUT request:" << std::hex 3993 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2) << std::endl; 3994 #endif 3995 } 3996 break; 3850 3997 } 3851 3998 3852 3999 //////////////////////// 3853 case IXR_CMD_CLEANUP_DATA: // send a put command to XRAM 3854 if(p_vci_ixr.cmdack) 3855 { 3856 if(r_ixr_cmd_cpt.read() == (m_words - 2)) 3857 { 3858 r_ixr_cmd_cpt = 0; 4000 case IXR_CMD_CLEANUP_DATA_SEND: // send a put command to XRAM 4001 { 4002 if(p_vci_ixr.cmdack.read()) 4003 { 4004 /*ODCCP*/ //std::cout << "IXR_CMD_CLEANUP_DATA_SEND STATE at cycle : " << std::dec << m_cpt_cycles << std::endl; 4005 if(r_ixr_cmd_word.read() == (m_words - 2)) 4006 { 4007 /*ODCCP*/ //std::cout << "IXR_CMD_CLEANUP_DATA_SEND GO TO IXR_CMD_CLEANUP_IDLE" << std::endl; 3859 4008 r_ixr_cmd_fsm = IXR_CMD_CLEANUP_IDLE; 3860 4009 r_cleanup_to_ixr_cmd_req = false; 4010 //r_ixr_cmd_word = 0; 3861 4011 //r_xram_rsp_to_ixr_cmd_inval_ncc_pending = false; 3862 4012 } 3863 4013 else 3864 4014 { 3865 r_ixr_cmd_ cpt = r_ixr_cmd_cpt.read() + 2;4015 r_ixr_cmd_word = r_ixr_cmd_word.read() + 2; 3866 4016 } 3867 4017 … … 3869 4019 if(m_debug) 3870 4020 { 3871 std::cout << " <MEMC " << name() << ".IXR_CMD_CLEANUP_DATA > Send a put request to xram" << std::endl;4021 std::cout << " <MEMC " << name() << ".IXR_CMD_CLEANUP_DATA_SEND> Send a put request to xram" << std::endl; 3872 4022 } 3873 4023 #endif 3874 4024 } 3875 4025 break; 3876 4026 } 4027 4028 ///////////////////////// 4029 case IXR_CMD_CONFIG_SEND: // send a put from CONFIG FSM 4030 { 4031 if(p_vci_ixr.cmdack.read()) 4032 { 4033 if(r_ixr_cmd_word.read() == (m_words - 2)) 4034 { 4035 r_ixr_cmd_fsm = IXR_CMD_CONFIG_IDLE; 4036 r_config_to_ixr_cmd_req = false; 4037 } 4038 else 4039 { 4040 r_ixr_cmd_word = r_ixr_cmd_word.read() + 2; 4041 } 4042 4043 #if DEBUG_MEMC_IXR_CMD 4044 if(m_debug) 4045 std::cout << " <MEMC " << name() << " IXR_CMD_CONFIG_SEND> PUT request:" << std::hex 4046 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2) << std::endl; 4047 #endif 4048 } 4049 break; 4050 } 3877 4051 } // end switch r_ixr_cmd_fsm 3878 4052 … … 3881 4055 //////////////////////////////////////////////////////////////////////////// 3882 4056 // The IXR_RSP FSM receives the response packets from the XRAM, 3883 // for both put transaction, and gettransaction.4057 // for both PUT transaction, and GET transaction. 3884 4058 // 3885 // - A response to a putrequest is a single-cell VCI packet.3886 // The T ransaction Tabindex is contained in the RTRDID field.4059 // - A response to a PUT request is a single-cell VCI packet. 4060 // The TRT index is contained in the RTRDID field. 3887 4061 // The FSM takes the lock protecting the TRT, and the corresponding 3888 // entry is erased. 4062 // entry is erased. If an acknowledge was required (in case of software SYNC) 4063 // the r_config_rsp_lines counter is decremented. 3889 4064 // 3890 // - A response to a getrequest is a multi-cell VCI packet.3891 // The T ransaction Tabindex is contained in the RTRDID field.4065 // - A response to a GET request is a multi-cell VCI packet. 4066 // The TRT index is contained in the RTRDID field. 3892 4067 // The N cells contain the N words of the cache line in the RDATA field. 3893 4068 // The FSM takes the lock protecting the TRT to store the line in the TRT 3894 4069 // (taking into account the write requests already stored in the TRT). 3895 // When the line is completely written, the corresponding rok signal is set. 4070 // When the line is completely written, the r_ixr_rsp_to_xram_rsp_rok[index] 4071 // signal is set to inform the XRAM_RSP FSM. 3896 4072 /////////////////////////////////////////////////////////////////////////////// 4073 4074 //std::cout << std::endl << "ixr_rsp_fsm" << std::endl; 3897 4075 3898 4076 switch(r_ixr_rsp_fsm.read()) 3899 4077 { 3900 ////////////////// 3901 case IXR_RSP_IDLE: // test transaction type: PUT/GET 3902 { 3903 if(p_vci_ixr.rspval.read()) 3904 { 3905 r_ixr_rsp_cpt = 0; 3906 r_ixr_rsp_trt_index = p_vci_ixr.rtrdid.read(); 3907 if(p_vci_ixr.reop.read() and !(p_vci_ixr.rerror.read() &0x1)) // PUT transaction 3908 { 3909 r_ixr_rsp_fsm = IXR_RSP_ACK; 4078 ////////////////// 4079 case IXR_RSP_IDLE: // test transaction type: PUT/GET 4080 { 4081 if(p_vci_ixr.rspval.read()) 4082 { 4083 r_ixr_rsp_cpt = 0; 4084 r_ixr_rsp_trt_index = p_vci_ixr.rtrdid.read(); 4085 4086 assert( ((p_vci_ixr.rerror.read() & 0x1) == 0) and 4087 "MEMC ERROR in IXR_RSP state: XRAM response error !"); 4088 4089 if(p_vci_ixr.reop.read()) // PUT 4090 { 4091 #if ODCCP_NON_INCLUSIVE 4092 if (p_vci_ixr.rtrdid.read() == m_trt_lines) 4093 r_ixr_rsp_fsm = IXR_RSP_ACK; 4094 else 4095 r_ixr_rsp_fsm = IXR_RSP_TRT_ERASE; 4096 #else 4097 r_ixr_rsp_fsm = IXR_RSP_TRT_ERASE; 4098 #endif 3910 4099 3911 4100 #if DEBUG_MEMC_IXR_RSP … … 3914 4103 << " IXR_RSP_IDLE> Response from XRAM to a put transaction" << std::endl; 3915 4104 #endif 3916 }3917 else // GET transaction3918 {3919 r_ixr_rsp_fsm = IXR_RSP_TRT_READ;4105 } 4106 else // GET 4107 { 4108 r_ixr_rsp_fsm = IXR_RSP_TRT_READ; 3920 4109 3921 4110 #if DEBUG_MEMC_IXR_RSP … … 3924 4113 << " IXR_RSP_IDLE> Response from XRAM to a get transaction" << std::endl; 3925 4114 #endif 3926 } 3927 } 3928 break; 3929 } 3930 ///////////////// 3931 case IXR_RSP_ACK: // Aknowledge the VCI response for a PUT 3932 { 3933 #if ODCCP_NON_INCLUSIVE 3934 if(p_vci_ixr.rspval.read()) 3935 { 3936 if (r_ixr_rsp_trt_index.read() == m_trt_lines) 3937 r_ixr_rsp_fsm = IXR_RSP_IDLE; 3938 else 3939 r_ixr_rsp_fsm = IXR_RSP_TRT_ERASE; 3940 } 3941 #else 3942 if(p_vci_ixr.rspval.read()) r_ixr_rsp_fsm = IXR_RSP_TRT_ERASE; 3943 #endif 3944 3945 #if DEBUG_MEMC_IXR_RSP 3946 if(m_debug) 3947 std::cout << " <MEMC " << name() << " IXR_RSP_ACK>" << std::endl; 3948 #endif 3949 break; 3950 } 3951 //////////////////////// 3952 case IXR_RSP_TRT_ERASE: // erase the entry in the TRT 3953 { 3954 if(r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_RSP) 3955 { 3956 m_trt.erase(r_ixr_rsp_trt_index.read()); 4115 } 4116 } 4117 break; 4118 } 4119 //////////////////////// 4120 case IXR_RSP_ACK: // Acknowledge PUT transaction 4121 { 3957 4122 r_ixr_rsp_fsm = IXR_RSP_IDLE; 4123 break; 4124 } 4125 //////////////////////// 4126 case IXR_RSP_TRT_ERASE: // erase the entry in the TRT 4127 // decrease the line counter if config request 4128 { 4129 if(r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_RSP) 4130 { 4131 size_t index = r_ixr_rsp_trt_index.read(); 4132 if (m_trt.is_config(index) ) r_config_rsp_lines = r_config_rsp_lines.read() - 1; 4133 m_trt.erase(index); 4134 r_ixr_rsp_fsm = IXR_RSP_IDLE; 3958 4135 3959 4136 #if DEBUG_MEMC_IXR_RSP … … 3962 4139 << r_ixr_rsp_trt_index.read() << std::endl; 3963 4140 #endif 3964 m_cpt_ixr_fsm_n_trt_lock++; 3965 } 3966 3967 m_cpt_ixr_fsm_trt_lock++; 3968 3969 break; 3970 } 3971 ////////////////////// 3972 case IXR_RSP_TRT_READ: // write a 64 bits data in the TRT 3973 { 3974 if((r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_RSP) and p_vci_ixr.rspval) 3975 { 3976 size_t index = r_ixr_rsp_trt_index.read(); 3977 bool eop = p_vci_ixr.reop.read(); 3978 wide_data_t data = p_vci_ixr.rdata.read(); 3979 bool error = ((p_vci_ixr.rerror.read() & 0x1) == 1); 3980 3981 assert(((eop == (r_ixr_rsp_cpt.read() == (m_words-2))) or p_vci_ixr.rerror.read()) 3982 and "Error in VCI_MEM_CACHE : invalid length for a response from XRAM"); 3983 3984 m_trt.write_rsp( index, 3985 r_ixr_rsp_cpt.read(), 3986 data, 3987 error); 3988 3989 r_ixr_rsp_cpt = r_ixr_rsp_cpt.read() + 2; 3990 3991 if(eop) 3992 { 3993 r_ixr_rsp_to_xram_rsp_rok[r_ixr_rsp_trt_index.read()]=true; 3994 /*if(p_vci_ixr.rpktid.read()&0xF == 0x9) 3995 r_ixr_rsp_to_xram_rsp_no_coherent[r_ixr_rsp_trt_index.read()] = true; 3996 else 3997 r_ixr_rsp_to_xram_rsp_no_coherent[r_ixr_rsp_trt_index.read()] = false;*/ 3998 r_ixr_rsp_fsm = IXR_RSP_IDLE; 3999 } 4141 } 4142 break; 4143 } 4144 ////////////////////// 4145 case IXR_RSP_TRT_READ: // write a 64 bits data word in TRT 4146 { 4147 if((r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_RSP) and p_vci_ixr.rspval) 4148 { 4149 size_t index = r_ixr_rsp_trt_index.read(); 4150 size_t word = r_ixr_rsp_cpt.read(); 4151 bool eop = p_vci_ixr.reop.read(); 4152 wide_data_t data = p_vci_ixr.rdata.read(); 4153 bool error = ((p_vci_ixr.rerror.read() & 0x1) == 1); 4154 4155 assert(((eop == (word == (m_words-2))) or error) and 4156 "MEMC ERROR in IXR_RSP_TRT_READ state : invalid response from XRAM"); 4157 4158 m_trt.write_rsp( index, 4159 word, 4160 data ); 4161 4162 r_ixr_rsp_cpt = word + 2; 4163 4164 if(eop) 4165 { 4166 r_ixr_rsp_to_xram_rsp_rok[r_ixr_rsp_trt_index.read()]=true; 4167 /*if(p_vci_ixr.rpktid.read()&0xF == 0x9) 4168 r_ixr_rsp_to_xram_rsp_no_coherent[r_ixr_rsp_trt_index.read()] = true; 4169 else 4170 r_ixr_rsp_to_xram_rsp_no_coherent[r_ixr_rsp_trt_index.read()] = false;*/ 4171 r_ixr_rsp_fsm = IXR_RSP_IDLE; 4172 } 4000 4173 4001 4174 #if DEBUG_MEMC_IXR_RSP 4002 4175 if(m_debug) 4003 std::cout << " <MEMC " << name() << " IXR_RSP_TRT_READ> Writing a wordin TRT : "4176 std::cout << " <MEMC " << name() << " IXR_RSP_TRT_READ> Writing 2 words in TRT : " 4004 4177 << " index = " << std::dec << index 4005 << " / word = " << r_ixr_rsp_cpt.read()4178 << " / word = " << word 4006 4179 << " / data = " << std::hex << data << std::endl; 4007 4180 #endif 4008 m_cpt_ixr_fsm_n_trt_lock++; 4009 } 4010 m_cpt_ixr_fsm_trt_lock++; 4011 break; 4012 } 4181 } 4182 break; 4183 } 4013 4184 } // end swich r_ixr_rsp_fsm 4014 4185 … … 4016 4187 // XRAM_RSP FSM 4017 4188 //////////////////////////////////////////////////////////////////////////// 4018 // The XRAM_RSP FSM handles the incoming cache lines from the XRAM.4189 // The XRAM_RSP FSM handles the incoming cache lines after an XRAM GET. 4019 4190 // The cache line has been written in the TRT by the IXR_CMD_FSM. 4020 4191 // As the IXR_RSP FSM and the XRAM_RSP FSM are running in parallel, 4021 // there is as many flip-flops r_ixr_rsp_to_xram_rsp_rok[i] 4022 // as the number of entries in the TRT, that are handled with 4023 // a round-robin priority... 4192 // there is as many flip-flops r_ixr_rsp_to_xram_rsp_rok[i] as the number 4193 // of entries in the TRT, that are handled with a round-robin priority... 4024 4194 // 4025 // When a response is available, the corresponding TRT entry 4026 // is copied in a local buffer to be written in the cache. 4027 // The FSM takes the lock protecting the TRT, and the lock protecting the DIR. 4028 // It selects a cache slot and writes the line in the cache. 4195 // The FSM takes the lock protecting TRT, and the lock protecting DIR. 4196 // The selected TRT entry is copied in the local buffer r_xram_rsp_trt_buf. 4197 // It selects a cache slot and save the victim line in another local buffer 4198 // r_xram_rsp_victim_***. 4199 // It writes the line extracted from TRT in the cache. 4029 4200 // If it was a read MISS, the XRAM_RSP FSM send a request to the TGT_RSP 4030 4201 // FSM to return the cache line to the registered processor. … … 4036 4207 /////////////////////////////////////////////////////////////////////////////// 4037 4208 4209 //std::cout << std::endl << "xram_rsp_fsm" << std::endl; 4210 4038 4211 switch(r_xram_rsp_fsm.read()) 4039 4212 { … … 4041 4214 case XRAM_RSP_IDLE: // scan the XRAM responses / select a TRT index (round robin) 4042 4215 { 4043 size_t ptr = r_xram_rsp_trt_index.read(); 4044 size_t lines = m_trt_lines; 4045 4046 for(size_t i=0 ; i<lines ; i++) 4047 { 4048 size_t index = (i+ptr+1) %lines; 4049 if(r_ixr_rsp_to_xram_rsp_rok[index]) 4050 { 4051 r_xram_rsp_trt_index = index; 4052 r_ixr_rsp_to_xram_rsp_rok[index] = false; 4053 r_xram_rsp_fsm = XRAM_RSP_DIR_LOCK; 4216 size_t old = r_xram_rsp_trt_index.read(); 4217 size_t lines = m_trt_lines; 4218 for(size_t i=0 ; i<lines ; i++) 4219 { 4220 size_t index = (i+old+1) %lines; 4221 if(r_ixr_rsp_to_xram_rsp_rok[index]) 4222 { 4223 r_xram_rsp_trt_index = index; 4224 r_ixr_rsp_to_xram_rsp_rok[index] = false; 4225 r_xram_rsp_fsm = XRAM_RSP_DIR_LOCK; 4054 4226 4055 4227 #if DEBUG_MEMC_XRAM_RSP … … 4059 4231 << " index = " << std::dec << index << std::endl; 4060 4232 #endif 4061 break;4062 }4063 }4064 break;4233 break; 4234 } 4235 } 4236 break; 4065 4237 } 4066 4238 /////////////////////// … … 4068 4240 // Copy the TRT entry in a local buffer 4069 4241 { 4070 if((r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP) and 4071 (r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP)) 4072 { 4073 // copy the TRT entry in the r_xram_rsp_trt_buf local buffer 4074 size_t index = r_xram_rsp_trt_index.read(); 4075 r_xram_rsp_trt_buf.copy( m_trt.read(index) ); 4076 4077 r_xram_rsp_fsm = XRAM_RSP_TRT_COPY; 4242 if( (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP) and 4243 (r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP) ) 4244 { 4245 // copy the TRT entry in the r_xram_rsp_trt_buf local buffer 4246 size_t index = r_xram_rsp_trt_index.read(); 4247 r_xram_rsp_trt_buf.copy( m_trt.read(index) ); 4248 r_xram_rsp_fsm = XRAM_RSP_TRT_COPY; 4078 4249 4079 4250 #if DEBUG_MEMC_XRAM_RSP … … 4082 4253 << " Get access to DIR and TRT" << std::endl; 4083 4254 #endif 4084 m_cpt_xram_rsp_fsm_n_dir_lock++; 4085 m_cpt_xram_rsp_fsm_n_trt_lock++; 4086 } 4087 m_cpt_xram_rsp_fsm_dir_lock++; 4088 m_cpt_xram_rsp_fsm_trt_lock++; 4089 break; 4255 } 4256 break; 4090 4257 } 4091 4258 /////////////////////// … … 4093 4260 // and copy it in a local buffer 4094 4261 { 4095 if ( (r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP) and 4096 (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP) ) 4097 { 4262 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP) and 4263 "MEMC ERROR in XRAM_RSP_TRT_COPY state: Bad DIR allocation"); 4264 4265 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP) and 4266 "MEMC ERROR in XRAM_RSP_TRT_COPY state: Bad TRT allocation"); 4267 4098 4268 // selects & extracts a victim line from cache 4099 4269 size_t way = 0; … … 4108 4278 #endif 4109 4279 4110 // copy the victim line in a local buffer 4280 // copy the victim line in a local buffer (both data dir) 4111 4281 m_cache_data.read_line(way, set, r_xram_rsp_victim_data); 4112 4282 4113 4283 r_xram_rsp_victim_copy = victim.owner.srcid; 4114 4115 #if L1_MULTI_CACHE4116 r_xram_rsp_victim_copy_cache= victim.owner.cache_id;4117 #endif4118 4284 4119 4285 r_xram_rsp_victim_coherent = victim.coherent; … … 4128 4294 r_xram_rsp_victim_dirty = victim.dirty; 4129 4295 4130 4131 if(!r_xram_rsp_trt_buf.rerror) 4132 { 4133 #if ODCCP_NON_INCLUSIVE 4134 r_xram_rsp_fsm = XRAM_RSP_INVAL_LOCK; 4135 #else 4136 /*ODCCP*/ //if victim is no coherent and there is an inval no coherent pending we wait 4137 /*if(!victim.coherent and r_xram_rsp_to_ixr_cmd_inval_ncc_pending.read()) 4138 { 4139 r_xram_rsp_fsm = XRAM_RSP_INVAL_WAIT; 4140 } 4141 else 4142 {*/ 4143 r_xram_rsp_fsm = XRAM_RSP_INVAL_LOCK; 4144 //} 4145 #endif 4146 } 4147 else 4148 { 4149 r_xram_rsp_fsm = XRAM_RSP_ERROR_ERASE; 4150 } 4296 if( not r_xram_rsp_trt_buf.rerror ) r_xram_rsp_fsm = XRAM_RSP_IVT_LOCK; 4297 else r_xram_rsp_fsm = XRAM_RSP_ERROR_ERASE; 4151 4298 4152 4299 #if DEBUG_MEMC_XRAM_RSP 4153 4300 if(m_debug) 4154 4301 std::cout << " <MEMC " << name() << " XRAM_RSP_TRT_COPY>" 4155 << " Select a slot: "4302 << " Select a victim slot: " 4156 4303 << " way = " << std::dec << way 4157 4304 << " / set = " << set … … 4160 4307 << " / inval_required = " << inval << std::endl; 4161 4308 #endif 4162 }4163 else4164 {4165 std::cout << "VCI_MEM_CACHE ERROR " << name() << " XRAM_RSP_TRT_COPY"4166 << " bad TRT or DIR allocation" << std::endl;4167 exit(0);4168 }4169 break;4170 } 4171 /////////////////////////4172 case XRAM_RSP_INVAL_LOCK: // Take the IVT lock to check a possible pending inval4173 { 4174 if(r_alloc_ivt_fsm == ALLOC_IVT_XRAM_RSP)4175 {4176 size_t index = 0;4177 if(m_ivt.search_inval(r_xram_rsp_trt_buf.nline, index)) // pending inval4178 {4179 r_xram_rsp_fsm = XRAM_RSP_INVAL_WAIT;4309 break; 4310 } 4311 /////////////////////// 4312 case XRAM_RSP_IVT_LOCK: // Keep DIR and TRT locks and take the IVT lock 4313 // to check a possible pending inval 4314 { 4315 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP) and 4316 "MEMC ERROR in XRAM_RSP_IVT_LOCK state: Bad DIR allocation"); 4317 4318 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP) and 4319 "MEMC ERROR in XRAM_RSP_IVT_LOCK state: Bad TRT allocation"); 4320 4321 if(r_alloc_ivt_fsm == ALLOC_IVT_XRAM_RSP) 4322 { 4323 size_t index = 0; 4324 if(m_ivt.search_inval(r_xram_rsp_trt_buf.nline, index)) // pending inval 4325 { 4326 r_xram_rsp_fsm = XRAM_RSP_INVAL_WAIT; 4180 4327 4181 4328 #if DEBUG_MEMC_XRAM_RSP 4182 4329 if(m_debug) 4183 std::cout << " <MEMC " << name() << " XRAM_RSP_I NVAL_LOCK>"4330 std::cout << " <MEMC " << name() << " XRAM_RSP_IVT_LOCK>" 4184 4331 << " Get acces to IVT, but line invalidation registered" 4185 << " / nline = " << std::hex << r_xram_rsp_trt_buf.nline4332 << " / address = " << std::hex << r_xram_rsp_trt_buf.nline*m_words*4 4186 4333 << " / index = " << std::dec << index << std::endl; 4187 4334 #endif 4188 4335 4189 }4190 else if(m_ivt.is_full() and r_xram_rsp_victim_inval.read()) // IVT full4191 {4192 r_xram_rsp_fsm = XRAM_RSP_INVAL_WAIT;4336 } 4337 else if(m_ivt.is_full() and r_xram_rsp_victim_inval.read()) // IVT full 4338 { 4339 r_xram_rsp_fsm = XRAM_RSP_INVAL_WAIT; 4193 4340 4194 4341 #if DEBUG_MEMC_XRAM_RSP 4195 4342 if(m_debug) 4196 std::cout << " <MEMC " << name() << " XRAM_RSP_I NVAL_LOCK>"4343 std::cout << " <MEMC " << name() << " XRAM_RSP_IVT_LOCK>" 4197 4344 << " Get acces to IVT, but inval required and IVT full" << std::endl; 4198 4345 #endif 4199 }4200 else4201 {4202 r_xram_rsp_fsm = XRAM_RSP_DIR_UPDT;4346 } 4347 else 4348 { 4349 r_xram_rsp_fsm = XRAM_RSP_DIR_UPDT; 4203 4350 4204 4351 #if DEBUG_MEMC_XRAM_RSP 4205 4352 if(m_debug) 4206 std::cout << " <MEMC " << name() << " XRAM_RSP_INVAL_LOCK>" 4207 << " Get acces to IVT" << std::endl; 4208 #endif 4209 } 4210 m_cpt_xram_rsp_fsm_n_upt_lock++; 4211 } 4212 4213 m_cpt_xram_rsp_fsm_upt_lock++; 4214 4215 break; 4353 std::cout << " <MEMC " << name() << " XRAM_RSP_IVT_LOCK>" 4354 << " Get acces to IVT / no pending inval request" << std::endl; 4355 #endif 4356 } 4357 } 4358 break; 4216 4359 } 4217 4360 ///////////////////////// … … 4224 4367 << " Release all locks and retry" << std::endl; 4225 4368 #endif 4226 r_xram_rsp_fsm = XRAM_RSP_DIR_LOCK;4227 break;4369 r_xram_rsp_fsm = XRAM_RSP_DIR_LOCK; 4370 break; 4228 4371 } 4229 4372 /////////////////////// 4230 case XRAM_RSP_DIR_UPDT: // updates the cache (both data & directory) 4231 // and possibly set an inval request in IVT 4232 { 4233 // check if this is an instruction read, this means pktid is either 4234 // TYPE_READ_INS_UNC 0bX010 with TSAR encoding 4235 // TYPE_READ_INS_MISS 0bX011 with TSAR encoding 4236 bool inst_read = (r_xram_rsp_trt_buf.pktid & 0x2) and r_xram_rsp_trt_buf.proc_read; 4237 4238 // check if this is a cached read, this means pktid is either 4239 // TYPE_READ_DATA_MISS 0bX001 with TSAR encoding 4240 // TYPE_READ_INS_MISS 0bX011 with TSAR encoding 4241 bool cached_read = (r_xram_rsp_trt_buf.pktid & 0x1) and r_xram_rsp_trt_buf.proc_read; 4242 4243 bool dirty = false; 4244 4245 // update cache data 4246 size_t set = r_xram_rsp_victim_set.read(); 4247 size_t way = r_xram_rsp_victim_way.read(); 4248 for(size_t word=0; word<m_words ; word++) 4249 { 4250 m_cache_data.write(way, set, word, r_xram_rsp_trt_buf.wdata[word]); 4251 4252 dirty = dirty or (r_xram_rsp_trt_buf.wdata_be[word] != 0); 4253 4254 if(m_monitor_ok) 4255 { 4256 addr_t address = r_xram_rsp_trt_buf.nline<<6 | word<<2; 4257 check_monitor( address, r_xram_rsp_trt_buf.wdata[word], false); 4258 } 4259 } 4373 case XRAM_RSP_DIR_UPDT: // updates the cache (both data & directory), 4374 // erases the TRT entry if victim not dirty, 4375 // and set inval request in IVT if required 4376 { 4377 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP) and 4378 "MEMC ERROR in XRAM_RSP_DIR_UPDT state: Bad DIR allocation"); 4379 4380 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP) and 4381 "MEMC ERROR in XRAM_RSP_DIR_UPDT state: Bad TRT allocation"); 4382 4383 assert( (r_alloc_ivt_fsm.read() == ALLOC_IVT_XRAM_RSP) and 4384 "MEMC ERROR in XRAM_RSP_DIR_UPDT state: Bad IVT allocation"); 4385 4386 // check if this is an instruction read, this means pktid is either 4387 // TYPE_READ_INS_UNC 0bX010 with TSAR encoding 4388 // TYPE_READ_INS_MISS 0bX011 with TSAR encoding 4389 bool inst_read = (r_xram_rsp_trt_buf.pktid & 0x2) and r_xram_rsp_trt_buf.proc_read; 4390 4391 // check if this is a cached read, this means pktid is either 4392 // TYPE_READ_DATA_MISS 0bX001 with TSAR encoding 4393 // TYPE_READ_INS_MISS 0bX011 with TSAR encoding 4394 bool cached_read = (r_xram_rsp_trt_buf.pktid & 0x1) and r_xram_rsp_trt_buf.proc_read; 4395 4396 bool dirty = false; 4397 4398 // update cache data 4399 size_t set = r_xram_rsp_victim_set.read(); 4400 size_t way = r_xram_rsp_victim_way.read(); 4401 4402 for(size_t word=0; word<m_words ; word++) 4403 { 4404 m_cache_data.write(way, set, word, r_xram_rsp_trt_buf.wdata[word]); 4405 dirty = dirty or (r_xram_rsp_trt_buf.wdata_be[word] != 0); 4406 } 4260 4407 4261 4408 // update cache directory … … 4297 4444 m_cache_directory.write(set, way, entry); 4298 4445 4299 // request an invalidation request in IVT for victim line4300 if(r_xram_rsp_victim_inval.read())4301 {4302 bool broadcast = r_xram_rsp_victim_is_cnt.read();4303 size_t index = 0;4304 size_t count_copies = r_xram_rsp_victim_count.read();4446 // register invalid request in IVT for victim line if required 4447 if(r_xram_rsp_victim_inval.read()) 4448 { 4449 bool broadcast = r_xram_rsp_victim_is_cnt.read(); 4450 size_t index = 0; 4451 size_t count_copies = r_xram_rsp_victim_count.read(); 4305 4452 4306 bool wok = m_ivt.set(false, // it's an inval transaction 4307 broadcast, // set broadcast bit 4308 false, // no response required 4309 false, // no acknowledge required 4310 0, // srcid 4311 0, // trdid 4312 0, // pktid 4313 r_xram_rsp_victim_nline.read(), 4314 count_copies, 4315 index); 4316 4317 r_xram_rsp_ivt_index = index; 4318 4319 if(!wok) 4320 { 4321 std::cout << "VCI_MEM_CACHE ERROR " << name() << " XRAM_RSP_DIR_UPDT" 4322 << " invalidate_tab entry free but write unsuccessful" << std::endl; 4323 exit(0); 4324 } 4325 } 4453 bool wok = m_ivt.set(false, // it's an inval transaction 4454 broadcast, // set broadcast bit 4455 false, // no response required 4456 false, // no acknowledge required 4457 0, // srcid 4458 0, // trdid 4459 0, // pktid 4460 r_xram_rsp_victim_nline.read(), 4461 count_copies, 4462 index); 4463 4464 r_xram_rsp_ivt_index = index; 4465 4466 assert( wok and 4467 "MEMC ERROR in XRAM_RSP_DIR_UPDT state: IVT should not be full"); 4468 } 4326 4469 4327 4470 #if DEBUG_MEMC_XRAM_RSP … … 4337 4480 << " / is_cnt = " << entry.is_cnt << std::endl; 4338 4481 if(r_xram_rsp_victim_inval.read()) 4339 std::cout << " Invalidation request for victim line"4340 << std::hex << r_xram_rsp_victim_nline.read() 4482 std::cout << " Invalidation request for address " 4483 << std::hex << r_xram_rsp_victim_nline.read()*m_words*4 4341 4484 << " / broadcast = " << r_xram_rsp_victim_is_cnt.read() << std::endl; 4342 4485 } … … 4367 4510 case XRAM_RSP_TRT_DIRTY: // set the TRT entry (PUT to XRAM) if the victim is dirty 4368 4511 { 4369 if(r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP) 4370 { 4371 std::vector<data_t> data_vector; 4372 data_vector.clear(); 4373 4374 for(size_t i=0; i<m_words; i++) 4375 { 4376 data_vector.push_back(r_xram_rsp_victim_data[i]); 4377 } 4378 /*m_trt.set(r_xram_rsp_trt_index.read(), 4379 false, // write to XRAM 4380 r_xram_rsp_victim_nline.read(), // line index 4381 0, 4382 0, 4383 0, 4384 false, 4385 0, 4386 0, 4387 std::vector<be_t> (m_words,0), 4388 std::vector<data_t> (m_words,0));*/ 4389 4390 m_trt.set(r_xram_rsp_trt_index.read(), 4391 false, // write to XRAM 4392 r_xram_rsp_victim_nline.read(), // line index 4393 0, 4394 0, 4395 0, 4396 false, 4397 0, 4398 0, 4399 std::vector<be_t> (m_words,0), 4400 data_vector); 4512 if(r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP) 4513 { 4514 std::vector<data_t> data_vector; 4515 data_vector.clear(); 4516 for(size_t i=0; i<m_words; i++) 4517 { 4518 data_vector.push_back(r_xram_rsp_victim_data[i].read()); 4519 } 4520 m_trt.set( r_xram_rsp_trt_index.read(), 4521 false, // PUT 4522 r_xram_rsp_victim_nline.read(), // line index 4523 0, // unused 4524 0, // unused 4525 0, // unused 4526 false, // not proc_read 4527 0, // unused 4528 0, // unused 4529 std::vector<be_t>(m_words,0xF), 4530 data_vector); 4401 4531 4402 4532 #if DEBUG_MEMC_XRAM_RSP … … 4404 4534 std::cout << " <MEMC " << name() << " XRAM_RSP_TRT_DIRTY>" 4405 4535 << " Set TRT entry for the put transaction" 4406 << " / dirty victim line = " << r_xram_rsp_victim_nline.read() << std::endl; 4407 #endif 4408 if(r_xram_rsp_trt_buf.proc_read) r_xram_rsp_fsm = XRAM_RSP_DIR_RSP; 4409 else if(r_xram_rsp_victim_inval.read()) r_xram_rsp_fsm = XRAM_RSP_INVAL; 4410 else r_xram_rsp_fsm = XRAM_RSP_WRITE_DIRTY; 4411 m_cpt_xram_rsp_fsm_n_trt_lock++; 4412 } 4413 4414 m_cpt_xram_rsp_fsm_trt_lock++; 4415 4416 break; 4536 << " / address = " << (r_xram_rsp_victim_nline.read()*m_words*4) << std::endl; 4537 #endif 4538 if(r_xram_rsp_trt_buf.proc_read) r_xram_rsp_fsm = XRAM_RSP_DIR_RSP; 4539 else if(r_xram_rsp_victim_inval.read()) r_xram_rsp_fsm = XRAM_RSP_INVAL; 4540 else r_xram_rsp_fsm = XRAM_RSP_WRITE_DIRTY; 4541 } 4542 break; 4417 4543 } 4418 4544 ////////////////////// 4419 4545 case XRAM_RSP_DIR_RSP: // Request a response to TGT_RSP FSM 4420 4546 { 4421 if(!r_xram_rsp_to_tgt_rsp_req.read())4422 {4423 r_xram_rsp_to_tgt_rsp_srcid = r_xram_rsp_trt_buf.srcid;4424 r_xram_rsp_to_tgt_rsp_trdid = r_xram_rsp_trt_buf.trdid;4425 r_xram_rsp_to_tgt_rsp_pktid = r_xram_rsp_trt_buf.pktid;4426 for(size_t i=0; i < m_words; i++)4427 {4428 r_xram_rsp_to_tgt_rsp_data[i] = r_xram_rsp_trt_buf.wdata[i];4429 }4430 r_xram_rsp_to_tgt_rsp_word = r_xram_rsp_trt_buf.word_index;4431 r_xram_rsp_to_tgt_rsp_length = r_xram_rsp_trt_buf.read_length;4432 r_xram_rsp_to_tgt_rsp_ll_key = r_xram_rsp_trt_buf.ll_key;4433 r_xram_rsp_to_tgt_rsp_rerror = false;4434 r_xram_rsp_to_tgt_rsp_req = true;4547 if ( not r_xram_rsp_to_tgt_rsp_req.read() ) 4548 { 4549 r_xram_rsp_to_tgt_rsp_srcid = r_xram_rsp_trt_buf.srcid; 4550 r_xram_rsp_to_tgt_rsp_trdid = r_xram_rsp_trt_buf.trdid; 4551 r_xram_rsp_to_tgt_rsp_pktid = r_xram_rsp_trt_buf.pktid; 4552 for(size_t i=0; i < m_words; i++) 4553 { 4554 r_xram_rsp_to_tgt_rsp_data[i] = r_xram_rsp_trt_buf.wdata[i]; 4555 } 4556 r_xram_rsp_to_tgt_rsp_word = r_xram_rsp_trt_buf.word_index; 4557 r_xram_rsp_to_tgt_rsp_length = r_xram_rsp_trt_buf.read_length; 4558 r_xram_rsp_to_tgt_rsp_ll_key = r_xram_rsp_trt_buf.ll_key; 4559 r_xram_rsp_to_tgt_rsp_rerror = false; 4560 r_xram_rsp_to_tgt_rsp_req = true; 4435 4561 4436 4562 #if ODCCP_NON_INCLUSIVE … … 4454 4580 << " / nwords = " << std::dec << r_xram_rsp_trt_buf.read_length << std::endl; 4455 4581 #endif 4456 }4457 break;4582 } 4583 break; 4458 4584 } 4459 4585 //////////////////// … … 4473 4599 xram_rsp_to_cc_send_fifo_srcid = r_xram_rsp_victim_copy.read(); 4474 4600 xram_rsp_to_cc_send_fifo_inst = r_xram_rsp_victim_copy_inst.read(); 4475 #if L1_MULTI_CACHE4476 xram_rsp_to_cc_send_fifo_cache_id = r_xram_rsp_victim_copy_cache.read();4477 #endif4478 4601 xram_rsp_to_cc_send_fifo_put = multi_req; 4479 r_xram_rsp_next_ptr 4602 r_xram_rsp_next_ptr = r_xram_rsp_victim_ptr.read(); 4480 4603 4481 4604 #if ODCCP_NON_INCLUSIVE … … 4494 4617 std::cout << " <MEMC " << name() << " XRAM_RSP_INVAL>" 4495 4618 << " Send an inval request to CC_SEND FSM" 4496 << " / victim line = " << r_xram_rsp_victim_nline.read()<< std::endl;4619 << " / address = " << r_xram_rsp_victim_nline.read()*m_words*4 << std::endl; 4497 4620 #endif 4498 4621 } … … 4506 4629 4507 4630 r_xram_rsp_to_ixr_cmd_req = true; 4508 r_xram_rsp_to_ixr_cmd_nline = r_xram_rsp_victim_nline.read(); 4509 r_xram_rsp_to_ixr_cmd_trdid = r_xram_rsp_trt_index.read(); 4510 for(size_t i=0; i<m_words ; i++) 4511 { 4512 r_xram_rsp_to_ixr_cmd_data[i] = r_xram_rsp_victim_data[i]; 4513 } 4514 m_cpt_write_dirty++; 4515 4631 //r_xram_rsp_to_ixr_cmd_nline = r_xram_rsp_victim_nline.read(); 4632 r_xram_rsp_to_ixr_cmd_index = r_xram_rsp_trt_index.read(); 4633 /*for(size_t i=0; i<m_words ; i++) 4634 { 4635 r_xram_rsp_to_ixr_cmd_data[i] = r_xram_rsp_victim_data[i]; 4636 }*/ 4516 4637 #if (ODCCP_NON_INCLUSIVE == 0) 4517 4638 // if victim is no coherent, we dont request a ixr command … … 4525 4646 #endif 4526 4647 4527 bool multi_req = !r_xram_rsp_victim_is_cnt.read() and r_xram_rsp_victim_inval.read(); 4648 m_cpt_write_dirty++; 4649 4650 bool multi_req = not r_xram_rsp_victim_is_cnt.read() and 4651 r_xram_rsp_victim_inval.read(); 4528 4652 bool not_last_multi_req = multi_req and (r_xram_rsp_victim_count.read() != 1); 4529 4653 … … 4535 4659 std::cout << " <MEMC " << name() << " XRAM_RSP_WRITE_DIRTY>" 4536 4660 << " Send the put request to IXR_CMD FSM" 4537 << " / victim line = " << r_xram_rsp_victim_nline.read()<< std::endl;4538 #endif 4539 }4540 break;4661 << " / address = " << r_xram_rsp_victim_nline.read()*m_words*4 << std::endl; 4662 #endif 4663 } 4664 break; 4541 4665 } 4542 4666 ///////////////////////// 4543 4667 case XRAM_RSP_HEAP_REQ: // Get the lock to the HEAP 4544 4668 { 4545 if(r_alloc_heap_fsm.read() == ALLOC_HEAP_XRAM_RSP) 4546 { 4547 r_xram_rsp_fsm = XRAM_RSP_HEAP_ERASE; 4548 m_cpt_xram_rsp_fsm_n_heap_lock++; 4549 } 4669 if(r_alloc_heap_fsm.read() == ALLOC_HEAP_XRAM_RSP) 4670 { 4671 r_xram_rsp_fsm = XRAM_RSP_HEAP_ERASE; 4672 } 4550 4673 4551 4674 #if DEBUG_MEMC_XRAM_RSP … … 4554 4677 << " Requesting HEAP lock" << std::endl; 4555 4678 #endif 4556 4557 m_cpt_xram_rsp_fsm_heap_lock++; 4558 4559 break; 4679 break; 4560 4680 } 4561 4681 ///////////////////////// … … 4567 4687 4568 4688 xram_rsp_to_cc_send_fifo_srcid = entry.owner.srcid; 4569 #if L1_MULTI_CACHE4570 xram_rsp_to_cc_send_fifo_cache_id = entry.owner.cache_id;4571 #endif4572 4689 xram_rsp_to_cc_send_fifo_inst = entry.owner.inst; 4573 4690 xram_rsp_to_cc_send_fifo_put = true; … … 4613 4730 HeapEntry last_entry; 4614 4731 last_entry.owner.srcid = 0; 4615 #if L1_MULTI_CACHE4616 last_entry.owner.cache_id = 0;4617 #endif4618 4732 last_entry.owner.inst = false; 4619 4733 if(m_heap.is_full()) … … 4639 4753 break;