Changeset 823 for branches/RWT
- Timestamp:
- Sep 30, 2014, 3:32:13 PM (10 years ago)
- Location:
- branches/RWT
- Files:
-
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
branches/RWT/lib/generic_cache_tsar/include/generic_cache.h
r814 r823 89 89 typedef uint32_t be_t; 90 90 91 data_t 92 addr_t 93 int 94 bool 95 96 size_t 97 size_t 98 size_t 91 data_t *r_data ; 92 addr_t *r_tag ; 93 int *r_state; 94 bool *r_lru ; 95 96 size_t m_ways; 97 size_t m_sets; 98 size_t m_words; 99 99 100 100 const soclib::common::AddressMaskingTable<addr_t> m_x ; … … 105 105 inline data_t &cache_data(size_t way, size_t set, size_t word) 106 106 { 107 return r_data[(way *m_sets*m_words)+(set*m_words)+word];107 return r_data[(way * m_sets * m_words) + (set * m_words) + word]; 108 108 } 109 109 … … 111 111 inline addr_t &cache_tag(size_t way, size_t set) 112 112 { 113 return r_tag[(way *m_sets)+set];113 return r_tag[(way * m_sets) + set]; 114 114 } 115 115 … … 117 117 inline bool &cache_lru(size_t way, size_t set) 118 118 { 119 return r_lru[(way *m_sets)+set];119 return r_lru[(way * m_sets) + set]; 120 120 } 121 121 … … 123 123 inline int &cache_state(size_t way, size_t set) 124 124 { 125 return r_state[(way *m_sets)+set];125 return r_state[(way * m_sets) + set]; 126 126 } 127 127 … … 149 149 { 150 150 data_t mask = 0; 151 if ( (be & 0x1) == 0x1) mask = mask | 0x000000FF;152 if ( (be & 0x2) == 0x2) mask = mask | 0x0000FF00;153 if ( (be & 0x4) == 0x4) mask = mask | 0x00FF0000;154 if ( (be & 0x8) == 0x8) mask = mask | 0xFF000000;151 if ((be & 0x1) == 0x1) mask = mask | 0x000000FF; 152 if ((be & 0x2) == 0x2) mask = mask | 0x0000FF00; 153 if ((be & 0x4) == 0x4) mask = mask | 0x00FF0000; 154 if ((be & 0x8) == 0x8) mask = mask | 0xFF000000; 155 155 return mask; 156 156 } … … 160 160 ////////////////////////////////////////// 161 161 GenericCache(const std::string &name, 162 163 164 162 size_t nways, 163 size_t nsets, 164 size_t nwords) 165 165 : m_ways(nways), 166 166 m_sets(nsets), … … 169 169 #define l2 soclib::common::uint32_log2 170 170 171 m_x( 172 m_y( 173 m_z( 174 171 m_x(l2(nwords), l2(sizeof(data_t))), 172 m_y(l2(nsets), l2(nwords) + l2(sizeof(data_t))), 173 m_z(8*sizeof(addr_t) - l2(nsets) - l2(nwords) - l2(sizeof(data_t)), 174 l2(nsets) + l2(nwords) + l2(sizeof(data_t))) 175 175 #undef l2 176 176 { … … 196 196 #endif 197 197 198 r_data = new data_t[nways *nsets*nwords];199 r_tag = new addr_t[nways *nsets];200 r_state = new int[nways *nsets];201 r_lru = new bool[nways *nsets];198 r_data = new data_t[nways * nsets * nwords]; 199 r_tag = new addr_t[nways * nsets]; 200 r_state = new int[nways * nsets]; 201 r_lru = new bool[nways * nsets]; 202 202 } 203 203 … … 214 214 inline void reset() 215 215 { 216 std::memset(r_data, 0, sizeof(*r_data) *m_ways*m_sets*m_words);217 std::memset(r_tag, 0, sizeof(*r_tag) *m_ways*m_sets);218 std::memset(r_state, CACHE_SLOT_STATE_EMPTY, sizeof(*r_state) *m_ways*m_sets);219 std::memset(r_lru, 0, sizeof(*r_lru) *m_ways*m_sets);216 std::memset(r_data, 0, sizeof(*r_data) * m_ways * m_sets * m_words); 217 std::memset(r_tag, 0, sizeof(*r_tag) * m_ways * m_sets); 218 std::memset(r_state, CACHE_SLOT_STATE_EMPTY, sizeof(*r_state) * m_ways * m_sets); 219 std::memset(r_lru, 0, sizeof(*r_lru) * m_ways * m_sets); 220 220 } 221 221 … … 239 239 for (size_t way = 0; way < m_ways; way++) 240 240 { 241 if ((tag == cache_tag(way, set)) 242 && ((cache_state(way, set) == CACHE_SLOT_STATE_VALID_CC) or (cache_state(way, set) == CACHE_SLOT_STATE_VALID_NCC)) ) 241 if ((tag == cache_tag(way, set)) and 242 ((cache_state(way, set) == CACHE_SLOT_STATE_VALID_CC) or 243 (cache_state(way, set) == CACHE_SLOT_STATE_VALID_NCC))) 243 244 { 244 245 *dt = cache_data(way, set, word); … … 269 270 { 270 271 if ((tag == cache_tag(way, set)) and 271 ((cache_state(way, set) == CACHE_SLOT_STATE_VALID_CC)or (cache_state(way, set) == CACHE_SLOT_STATE_VALID_NCC))) 272 ((cache_state(way, set) == CACHE_SLOT_STATE_VALID_CC) or 273 (cache_state(way, set) == CACHE_SLOT_STATE_VALID_NCC))) 272 274 { 273 275 *selway = way; … … 314 316 if (tag == cache_tag(way, set)) // matching tag 315 317 { 316 317 318 if (cache_state(way, set) == CACHE_SLOT_STATE_VALID_CC) 318 319 { … … 362 363 for (size_t way = 0; way < m_ways; way++) 363 364 { 364 if ((tag == cache_tag(way, set)) 365 && ((cache_state(way, set) == CACHE_SLOT_STATE_VALID_CC) or (cache_state(way, set) == CACHE_SLOT_STATE_VALID_NCC) )) 365 if ((tag == cache_tag(way, set)) and 366 ((cache_state(way, set) == CACHE_SLOT_STATE_VALID_CC) or 367 (cache_state(way, set) == CACHE_SLOT_STATE_VALID_NCC))) 366 368 { 367 369 *selway = way; 368 370 *selset = set; 369 371 *selword = word; 370 *dt = cache_data(way, set, word);372 *dt = cache_data(way, set, word); 371 373 return true; 372 374 } … … 384 386 // This function is used by the cc_vcache to get a 64 bits page table entry. 385 387 ///////////////////////////////////////////////////////////////////////////// 386 inline bool read( 387 388 389 390 391 388 inline bool read(addr_t ad, 389 data_t* dt, 390 data_t* dt_next, 391 size_t* selway, 392 size_t* selset, 393 size_t* selword) 392 394 { 393 395 const addr_t tag = m_z[ad]; … … 397 399 for (size_t way = 0; way < m_ways; way++) 398 400 { 399 if ((tag == cache_tag(way, set)) 400 &&((cache_state(way, set) == CACHE_SLOT_STATE_VALID_CC)))401 if ((tag == cache_tag(way, set)) and 402 ((cache_state(way, set) == CACHE_SLOT_STATE_VALID_CC))) 401 403 { 402 404 *dt = cache_data(way, set, word); … … 507 509 for (size_t way = 0; way < m_ways; way++) 508 510 { 509 if ((tag == cache_tag(way, set)) 510 && ((cache_state(way, set) == CACHE_SLOT_STATE_VALID_CC)or(cache_state(way, set) == CACHE_SLOT_STATE_VALID_NCC))) 511 if ((tag == cache_tag(way, set)) and 512 ((cache_state(way, set) == CACHE_SLOT_STATE_VALID_CC) or 513 (cache_state(way, set) == CACHE_SLOT_STATE_VALID_NCC))) 511 514 { 512 515 *selway = way; … … 614 617 addr_t* nline) 615 618 { 616 if ((cache_state(way,set) == CACHE_SLOT_STATE_VALID_CC) or (cache_state(way,set) == CACHE_SLOT_STATE_VALID_NCC)) 619 if ((cache_state(way,set) == CACHE_SLOT_STATE_VALID_CC) or 620 (cache_state(way,set) == CACHE_SLOT_STATE_VALID_NCC)) 617 621 { 618 622 cache_state(way,set) = CACHE_SLOT_STATE_EMPTY; 619 *nline = (data_t) cache_tag(way,set)* m_sets + set;623 *nline = (data_t) cache_tag(way, set) * m_sets + set; 620 624 return true; 621 625 } … … 643 647 644 648 // Search first empty slot 645 for (size_t _way = 0 ; _way < m_ways && !found ; _way++) 646 { 647 if ((cache_state(_way, *set) != CACHE_SLOT_STATE_VALID_CC) and (cache_state(_way, *set) != CACHE_SLOT_STATE_VALID_NCC )) // empty 649 for (size_t _way = 0; _way < m_ways && !found; _way++) 650 { 651 // empty 652 if ((cache_state(_way, *set) != CACHE_SLOT_STATE_VALID_CC) and 653 (cache_state(_way, *set) != CACHE_SLOT_STATE_VALID_NCC)) 648 654 { 649 655 found = true; … … 656 662 if (!found) 657 663 { 658 for (size_t _way = 0 ; _way < m_ways && !found; _way++)664 for (size_t _way = 0; _way < m_ways && !found; _way++) 659 665 { 660 666 if (not cache_lru(_way, *set)) … … 668 674 669 675 assert(found && "all ways can't be new at the same time"); 670 *victim = (addr_t) ((cache_tag(*way, *set) * m_sets) + *set);676 *victim = (addr_t) ((cache_tag(*way, *set) * m_sets) + *set); 671 677 return cleanup; 672 678 } … … 693 699 694 700 // Search first empty slot 695 for (size_t _way = 0 ; _way < m_ways && !(*found); _way++)701 for (size_t _way = 0; _way < m_ways && !(*found); _way++) 696 702 { 697 703 if (cache_state(_way, _set) == CACHE_SLOT_STATE_EMPTY) … … 706 712 707 713 // Search first not zombi old slot 708 for (size_t _way = 0 ; _way < m_ways && !(*found); _way++)714 for (size_t _way = 0; _way < m_ways && !(*found); _way++) 709 715 { 710 716 if (not cache_lru(_way, _set) and 711 717 (cache_state(_way, _set) != CACHE_SLOT_STATE_ZOMBI)) 712 718 { 713 719 *found = true; … … 720 726 } 721 727 // Search first not zombi slot 722 for (size_t _way = 0 ; _way < m_ways && !(*found); _way++)728 for (size_t _way = 0; _way < m_ways && !(*found); _way++) 723 729 { 724 730 if (cache_state(_way, _set) != CACHE_SLOT_STATE_ZOMBI) … … 746 752 size_t set) 747 753 { 748 addr_t tag= m_z[ad];749 750 cache_tag(way, set) 754 addr_t tag = m_z[ad]; 755 756 cache_tag(way, set) = tag; 751 757 cache_state(way, set) = CACHE_SLOT_STATE_VALID_CC; 752 758 cache_set_lru(way, set); … … 764 770 addr_t tag = m_z[ad]; 765 771 766 assert( ((state == CACHE_SLOT_STATE_VALID_CC) or767 768 769 (state == CACHE_SLOT_STATE_EMPTY)) and772 assert(((state == CACHE_SLOT_STATE_VALID_CC) or 773 (state == CACHE_SLOT_STATE_VALID_NCC) or 774 (state == CACHE_SLOT_STATE_ZOMBI) or 775 (state == CACHE_SLOT_STATE_EMPTY)) and 770 776 "illegal slot state argument in Generic Cache write_dir()"); 771 777 772 assert( 778 assert((way < m_ways) and 773 779 "too large way index argument in Generic Cache write_dir()"); 774 780 775 assert( 781 assert((set < m_sets) and 776 782 "too large set index argument in Generic Cache write_dir()"); 777 783 … … 779 785 cache_state(way, set) = state; 780 786 781 if ((state == CACHE_SLOT_STATE_VALID_CC) or (state == CACHE_SLOT_STATE_VALID_NCC)) cache_set_lru(way, set); 787 if ((state == CACHE_SLOT_STATE_VALID_CC) or (state == CACHE_SLOT_STATE_VALID_NCC)) 788 cache_set_lru(way, set); 782 789 } 783 790 … … 791 798 int state) 792 799 { 793 assert( ((state == CACHE_SLOT_STATE_VALID_CC) or794 795 796 800 assert(((state == CACHE_SLOT_STATE_VALID_CC) or 801 (state == CACHE_SLOT_STATE_VALID_NCC) or 802 (state == CACHE_SLOT_STATE_ZOMBI) or 803 (state == CACHE_SLOT_STATE_EMPTY) ) and 797 804 "illegal slot state argument in Generic Cache write_dir()"); 798 805 799 assert( 806 assert((way < m_ways) and 800 807 "too large way index argument in Generic Cache write_dir()"); 801 808 802 assert( 809 assert((set < m_sets) and 803 810 "too large set index argument in Generic Cache write_dir()"); 804 811 805 812 cache_state(way, set) = state; 806 813 807 if ( (state == CACHE_SLOT_STATE_VALID_CC) or (state == CACHE_SLOT_STATE_VALID_NCC) ) cache_set_lru(way, set); 814 if ((state == CACHE_SLOT_STATE_VALID_CC) or (state == CACHE_SLOT_STATE_VALID_NCC)) 815 cache_set_lru(way, set); 808 816 } 809 817 … … 820 828 addr_t tag = m_z[ad]; 821 829 822 cache_tag(way, set) 830 cache_tag(way, set) = tag; 823 831 cache_state(way, set) = CACHE_SLOT_STATE_VALID_CC; 824 832 cache_set_lru(way, set); 825 for (size_t word = 0 ; word < m_words; word++)826 { 827 cache_data(way, set, word) = buf[word] 833 for (size_t word = 0; word < m_words; word++) 834 { 835 cache_data(way, set, word) = buf[word]; 828 836 } 829 837 } … … 832 840 void fileTrace(FILE* file) 833 841 { 834 for (size_t nway = 0 ; nway < m_ways; nway++)835 { 836 for (size_t nset = 0 ; nset < m_sets; nset++)837 { 838 fprintf(file, "%d / ", (int) cache_state(nway, nset));839 fprintf(file, "way %d / ", (int) nway);840 fprintf(file, "set %d / ", (int) nset);842 for (size_t nway = 0; nway < m_ways; nway++) 843 { 844 for (size_t nset = 0; nset < m_sets; nset++) 845 { 846 fprintf(file, "%d / ", (int) cache_state(nway, nset)); 847 fprintf(file, "way %d / ", (int) nway); 848 fprintf(file, "set %d / ", (int) nset); 841 849 fprintf(file, "@ = %08zX / ", 842 850 ((cache_tag(nway, nset) * m_sets + nset) * m_words * 4)); 843 for (size_t nword = m_words ; nword > 0; nword--)851 for (size_t nword = m_words; nword > 0; nword--) 844 852 { 845 853 unsigned int data = cache_data(nway, nset, nword - 1); … … 854 862 inline void printTrace() 855 863 { 856 for (size_t way = 0; way < m_ways 857 { 858 for (size_t set = 0 ; set < m_sets; set++)859 { 860 addr_t addr = (((addr_t) cache_tag(way,set))*m_words*m_sets+m_words*set)*4;864 for (size_t way = 0; way < m_ways; way++) 865 { 866 for (size_t set = 0; set < m_sets; set++) 867 { 868 addr_t addr = (((addr_t) cache_tag(way, set)) * m_words * m_sets + m_words * set) * 4; 861 869 std::cout << std::dec << cache_state(way, set) 862 870 << " | way " << way … … 864 872 << std::hex << " | @ " << addr; 865 873 866 for (size_t word = 0 ; word < m_words; word++)867 { 868 std::cout << " | " << cache_data(way, set,word) ;874 for (size_t word = 0; word < m_words; word++) 875 { 876 std::cout << " | " << cache_data(way, set, word) ; 869 877 } 870 878 std::cout << std::dec << std::endl ; -
branches/RWT/modules/vci_mem_cache/caba/source/include/mem_cache_directory.h
r814 r823 18 18 class LruEntry { 19 19 20 public: 21 22 bool recent; 23 20 public: 21 22 bool recent; 23 24 void init() 25 { 26 recent=false; 27 } 28 29 }; // end class LruEntry 30 31 //////////////////////////////////////////////////////////////////////// 32 // An Owner 33 //////////////////////////////////////////////////////////////////////// 34 class Owner{ 35 36 public: 37 38 // Fields 39 bool inst; // Is the owner an ICache ? 40 size_t srcid; // The SRCID of the owner 41 42 //////////////////////// 43 // Constructors 44 //////////////////////// 45 Owner(bool i_inst, 46 size_t i_srcid) 47 { 48 inst = i_inst; 49 srcid = i_srcid; 50 } 51 52 Owner(const Owner &a) 53 { 54 inst = a.inst; 55 srcid = a.srcid; 56 } 57 58 Owner() 59 { 60 inst = false; 61 srcid = 0; 62 } 63 // end constructors 64 65 }; // end class Owner 66 67 68 //////////////////////////////////////////////////////////////////////// 69 // A directory entry 70 //////////////////////////////////////////////////////////////////////// 71 class DirectoryEntry { 72 73 typedef uint32_t tag_t; 74 75 public: 76 77 bool valid; // entry valid 78 bool cache_coherent; // WB or WT policy 79 bool is_cnt; // directory entry is in counter mode 80 bool dirty; // entry dirty 81 bool lock; // entry locked 82 tag_t tag; // tag of the entry 83 size_t count; // number of copies 84 Owner owner; // an owner of the line 85 size_t ptr; // pointer to the next owner 86 87 DirectoryEntry() 88 { 89 valid = false; 90 cache_coherent = false; 91 is_cnt = false; 92 dirty = false; 93 lock = false; 94 tag = 0; 95 count = 0; 96 owner.inst = 0; 97 owner.srcid = 0; 98 ptr = 0; 99 } 100 101 DirectoryEntry(const DirectoryEntry &source) 102 { 103 valid = source.valid; 104 cache_coherent = source.cache_coherent; 105 is_cnt = source.is_cnt; 106 dirty = source.dirty; 107 lock = source.lock; 108 tag = source.tag; 109 count = source.count; 110 owner = source.owner; 111 ptr = source.ptr; 112 } 113 114 ///////////////////////////////////////////////////////////////////// 115 // The init() function initializes the entry 116 ///////////////////////////////////////////////////////////////////// 24 117 void init() 25 118 { 26 recent=false; 119 valid = false; 120 cache_coherent = false; 121 is_cnt = false; 122 dirty = false; 123 lock = false; 124 count = 0; 27 125 } 28 126 29 }; // end class LruEntry 30 31 //////////////////////////////////////////////////////////////////////// 32 // An Owner 33 //////////////////////////////////////////////////////////////////////// 34 class Owner{ 35 36 public: 37 // Fields 38 bool inst; // Is the owner an ICache ? 39 size_t srcid; // The SRCID of the owner 40 41 //////////////////////// 42 // Constructors 43 //////////////////////// 44 Owner(bool i_inst, 45 size_t i_srcid) 46 { 47 inst = i_inst; 48 srcid = i_srcid; 127 ///////////////////////////////////////////////////////////////////// 128 // The copy() function copies an existing source entry to a target 129 ///////////////////////////////////////////////////////////////////// 130 void copy(const DirectoryEntry &source) 131 { 132 valid = source.valid; 133 cache_coherent = source.cache_coherent; 134 is_cnt = source.is_cnt; 135 dirty = source.dirty; 136 lock = source.lock; 137 tag = source.tag; 138 count = source.count; 139 owner = source.owner; 140 ptr = source.ptr; 49 141 } 50 142 51 Owner(const Owner &a) 52 { 53 inst = a.inst; 54 srcid = a.srcid; 143 //////////////////////////////////////////////////////////////////// 144 // The print() function prints the entry 145 //////////////////////////////////////////////////////////////////// 146 void print() 147 { 148 std::cout << "Valid = " << valid 149 << " ; COHERENCE = " << cache_coherent 150 << " ; IS COUNT = " << is_cnt 151 << " ; Dirty = " << dirty 152 << " ; Lock = " << lock 153 << " ; Tag = " << std::hex << tag << std::dec 154 << " ; Count = " << count 155 << " ; Owner = " << owner.srcid 156 << " " << owner.inst 157 << " ; Pointer = " << ptr << std::endl; 55 158 } 56 159 57 Owner() 58 { 59 inst = false; 60 srcid = 0; 160 }; // end class DirectoryEntry 161 162 //////////////////////////////////////////////////////////////////////// 163 // The directory 164 //////////////////////////////////////////////////////////////////////// 165 class CacheDirectory { 166 167 typedef sc_dt::sc_uint<40> addr_t; 168 typedef uint32_t data_t; 169 typedef uint32_t tag_t; 170 171 private: 172 173 // Directory constants 174 size_t m_ways; 175 size_t m_sets; 176 size_t m_words; 177 size_t m_width; 178 uint32_t lfsr; 179 180 // the directory & lru tables 181 DirectoryEntry **m_dir_tab; 182 LruEntry **m_lru_tab; 183 184 public: 185 186 //////////////////////// 187 // Constructor 188 //////////////////////// 189 CacheDirectory(size_t ways, size_t sets, size_t words, size_t address_width) 190 { 191 m_ways = ways; 192 m_sets = sets; 193 m_words = words; 194 m_width = address_width; 195 lfsr = -1; 196 197 m_dir_tab = new DirectoryEntry*[sets]; 198 for (size_t i = 0; i < sets; i++) { 199 m_dir_tab[i] = new DirectoryEntry[ways]; 200 for (size_t j = 0; j < ways; j++) { 201 m_dir_tab[i][j].init(); 202 } 203 } 204 205 m_lru_tab = new LruEntry*[sets]; 206 for (size_t i = 0; i < sets; i++) { 207 m_lru_tab[i] = new LruEntry[ways]; 208 for (size_t j = 0; j < ways; j++) { 209 m_lru_tab[i][j].init(); 210 } 211 } 212 } // end constructor 213 214 ///////////////// 215 // Destructor 216 ///////////////// 217 ~CacheDirectory() 218 { 219 for (size_t i = 0; i < m_sets; i++) { 220 delete [] m_dir_tab[i]; 221 delete [] m_lru_tab[i]; 222 } 223 delete [] m_dir_tab; 224 delete [] m_lru_tab; 225 } // end destructor 226 227 ///////////////////////////////////////////////////////////////////// 228 // The read() function reads a directory entry. In case of hit, the 229 // LRU is updated. 230 // Arguments : 231 // - address : the address of the entry 232 // - way : (return argument) the way of the entry in case of hit 233 // The function returns a copy of a (valid or invalid) entry 234 ///////////////////////////////////////////////////////////////////// 235 DirectoryEntry read(const addr_t &address, size_t &way) 236 { 237 238 #define L2 soclib::common::uint32_log2 239 const size_t set = (size_t) (address >> (L2(m_words) + 2)) & (m_sets - 1); 240 const tag_t tag = (tag_t) (address >> (L2(m_sets) + L2(m_words) + 2)); 241 #undef L2 242 243 bool hit = false; 244 245 for (size_t i = 0; i < m_ways; i++) { 246 bool equal = (m_dir_tab[set][i].tag == tag); 247 bool valid = m_dir_tab[set][i].valid; 248 hit = equal && valid; 249 if (hit) { 250 way = i; 251 break; 252 } 253 } 254 if (hit) { 255 m_lru_tab[set][way].recent = true; 256 return DirectoryEntry(m_dir_tab[set][way]); 257 } else { 258 return DirectoryEntry(); 259 } 260 } // end read() 261 262 ///////////////////////////////////////////////////////////////////// 263 // The inval function invalidate an entry defined by the set and 264 // way arguments. 265 ///////////////////////////////////////////////////////////////////// 266 void inval( const size_t &way, const size_t &set ) 267 { 268 m_dir_tab[set][way].init(); 61 269 } 62 // end constructors 63 64 }; // end class Owner 65 66 67 //////////////////////////////////////////////////////////////////////// 68 // A directory entry 69 //////////////////////////////////////////////////////////////////////// 70 class DirectoryEntry { 71 72 typedef uint32_t tag_t; 73 74 public: 75 76 bool valid; // entry valid 77 bool cache_coherent; // WB or WT policy 78 bool is_cnt; // directory entry is in counter mode 79 bool dirty; // entry dirty 80 bool lock; // entry locked 81 tag_t tag; // tag of the entry 82 size_t count; // number of copies 83 Owner owner; // an owner of the line 84 size_t ptr; // pointer to the next owner 85 86 DirectoryEntry() 87 { 88 valid = false; 89 cache_coherent= false; 90 is_cnt = false; 91 dirty = false; 92 lock = false; 93 tag = 0; 94 count = 0; 95 owner.inst = 0; 96 owner.srcid = 0; 97 ptr = 0; 98 } 99 100 DirectoryEntry(const DirectoryEntry &source) 101 { 102 valid = source.valid; 103 cache_coherent= source.cache_coherent; 104 is_cnt = source.is_cnt; 105 dirty = source.dirty; 106 lock = source.lock; 107 tag = source.tag; 108 count = source.count; 109 owner = source.owner; 110 ptr = source.ptr; 111 } 112 113 ///////////////////////////////////////////////////////////////////// 114 // The init() function initializes the entry 115 ///////////////////////////////////////////////////////////////////// 116 void init() 117 { 118 valid = false; 119 cache_coherent = false; 120 is_cnt = false; 121 dirty = false; 122 lock = false; 123 count = 0; 124 } 125 126 ///////////////////////////////////////////////////////////////////// 127 // The copy() function copies an existing source entry to a target 128 ///////////////////////////////////////////////////////////////////// 129 void copy(const DirectoryEntry &source) 130 { 131 valid = source.valid; 132 cache_coherent = source.cache_coherent; 133 is_cnt = source.is_cnt; 134 dirty = source.dirty; 135 lock = source.lock; 136 tag = source.tag; 137 count = source.count; 138 owner = source.owner; 139 ptr = source.ptr; 140 } 141 142 //////////////////////////////////////////////////////////////////// 143 // The print() function prints the entry 144 //////////////////////////////////////////////////////////////////// 145 void print() 146 { 147 std::cout << "Valid = " << valid 148 << " ; COHERENCE = " << cache_coherent 149 << " ; IS COUNT = " << is_cnt 150 << " ; Dirty = " << dirty 151 << " ; Lock = " << lock 152 << " ; Tag = " << std::hex << tag << std::dec 153 << " ; Count = " << count 154 << " ; Owner = " << owner.srcid 155 << " " << owner.inst 156 << " ; Pointer = " << ptr << std::endl; 157 } 158 159 }; // end class DirectoryEntry 160 161 //////////////////////////////////////////////////////////////////////// 162 // The directory 163 //////////////////////////////////////////////////////////////////////// 164 class CacheDirectory { 165 166 typedef sc_dt::sc_uint<40> addr_t; 167 typedef uint32_t data_t; 168 typedef uint32_t tag_t; 169 170 private: 171 172 // Directory constants 173 size_t m_ways; 174 size_t m_sets; 175 size_t m_words; 176 size_t m_width; 177 uint32_t lfsr; 178 179 // the directory & lru tables 180 DirectoryEntry **m_dir_tab; 181 LruEntry **m_lru_tab; 182 183 public: 184 185 //////////////////////// 186 // Constructor 187 //////////////////////// 188 CacheDirectory( size_t ways, size_t sets, size_t words, size_t address_width) 189 { 190 m_ways = ways; 191 m_sets = sets; 192 m_words = words; 193 m_width = address_width; 194 lfsr = -1; 195 196 m_dir_tab = new DirectoryEntry*[sets]; 197 for ( size_t i=0; i<sets; i++ ) { 198 m_dir_tab[i] = new DirectoryEntry[ways]; 199 for ( size_t j=0 ; j<ways ; j++) m_dir_tab[i][j].init(); 200 } 201 m_lru_tab = new LruEntry*[sets]; 202 for ( size_t i=0; i<sets; i++ ) { 203 m_lru_tab[i] = new LruEntry[ways]; 204 for ( size_t j=0 ; j<ways ; j++) m_lru_tab[i][j].init(); 205 } 206 } // end constructor 207 208 ///////////////// 209 // Destructor 210 ///////////////// 211 ~CacheDirectory() 212 { 213 for(size_t i=0 ; i<m_sets ; i++){ 214 delete [] m_dir_tab[i]; 215 delete [] m_lru_tab[i]; 216 } 217 delete [] m_dir_tab; 218 delete [] m_lru_tab; 219 } // end destructor 220 221 ///////////////////////////////////////////////////////////////////// 222 // The read() function reads a directory entry. In case of hit, the 223 // LRU is updated. 224 // Arguments : 225 // - address : the address of the entry 226 // - way : (return argument) the way of the entry in case of hit 227 // The function returns a copy of a (valid or invalid) entry 228 ///////////////////////////////////////////////////////////////////// 229 DirectoryEntry read(const addr_t &address, size_t &way) 230 { 270 271 ///////////////////////////////////////////////////////////////////// 272 // The read_neutral() function reads a directory entry, without 273 // changing the LRU 274 // Arguments : 275 // - address : the address of the entry 276 // The function returns a copy of a (valid or invalid) entry 277 ///////////////////////////////////////////////////////////////////// 278 DirectoryEntry read_neutral(const addr_t &address, 279 size_t* ret_way, 280 size_t* ret_set) 281 { 231 282 232 283 #define L2 soclib::common::uint32_log2 233 constsize_t set = (size_t)(address >> (L2(m_words) + 2)) & (m_sets - 1);234 consttag_t tag = (tag_t)(address >> (L2(m_sets) + L2(m_words) + 2));284 size_t set = (size_t)(address >> (L2(m_words) + 2)) & (m_sets - 1); 285 tag_t tag = (tag_t)(address >> (L2(m_sets) + L2(m_words) + 2)); 235 286 #undef L2 236 287 237 bool hit = false; 238 for ( size_t i=0 ; i<m_ways ; i++ ) { 239 bool equal = ( m_dir_tab[set][i].tag == tag ); 240 bool valid = m_dir_tab[set][i].valid; 241 hit = equal && valid; 242 if ( hit ) { 243 way = i; 244 break; 245 } 246 } 247 if ( hit ) { 248 m_lru_tab[set][way].recent = true; 249 return DirectoryEntry(m_dir_tab[set][way]); 250 } else { 251 return DirectoryEntry(); 252 } 253 } // end read() 254 255 ///////////////////////////////////////////////////////////////////// 256 // The inval function invalidate an entry defined by the set and 257 // way arguments. 258 ///////////////////////////////////////////////////////////////////// 259 void inval( const size_t &way, const size_t &set ) 260 { 261 m_dir_tab[set][way].init(); 262 } 263 264 ///////////////////////////////////////////////////////////////////// 265 // The read_neutral() function reads a directory entry, without 266 // changing the LRU 267 // Arguments : 268 // - address : the address of the entry 269 // The function returns a copy of a (valid or invalid) entry 270 ///////////////////////////////////////////////////////////////////// 271 DirectoryEntry read_neutral( const addr_t &address, 272 size_t* ret_way, 273 size_t* ret_set ) 274 { 275 276 #define L2 soclib::common::uint32_log2 277 size_t set = (size_t)(address >> (L2(m_words) + 2)) & (m_sets - 1); 278 tag_t tag = (tag_t)(address >> (L2(m_sets) + L2(m_words) + 2)); 279 #undef L2 280 281 for ( size_t way = 0 ; way < m_ways ; way++ ) 282 { 283 bool equal = ( m_dir_tab[set][way].tag == tag ); 284 bool valid = m_dir_tab[set][way].valid; 285 if ( equal and valid ) 286 { 287 *ret_set = set; 288 *ret_way = way; 289 return DirectoryEntry(m_dir_tab[set][way]); 290 } 291 } 292 return DirectoryEntry(); 293 } // end read_neutral() 294 295 ///////////////////////////////////////////////////////////////////// 296 // The write function writes a new entry, 297 // and updates the LRU bits if necessary. 298 // Arguments : 299 // - set : the set of the entry 300 // - way : the way of the entry 301 // - entry : the entry value 302 ///////////////////////////////////////////////////////////////////// 303 void write( const size_t &set, 304 const size_t &way, 305 const DirectoryEntry &entry) 306 { 307 assert( (set<m_sets) 308 && "Cache Directory write : The set index is invalid"); 309 assert( (way<m_ways) 310 && "Cache Directory write : The way index is invalid"); 311 312 // update Directory 313 m_dir_tab[set][way].copy(entry); 314 315 // update LRU bits 316 bool all_recent = true; 317 for ( size_t i=0 ; i<m_ways ; i++ ) 318 { 319 if ( i != way ) all_recent = m_lru_tab[set][i].recent && all_recent; 320 } 321 if ( all_recent ) 322 { 323 for( size_t i=0 ; i<m_ways ; i++ ) m_lru_tab[set][i].recent = false; 324 } 325 else 326 { 327 m_lru_tab[set][way].recent = true; 328 } 329 } // end write() 330 331 ///////////////////////////////////////////////////////////////////// 332 // The print() function prints a selected directory entry 333 // Arguments : 334 // - set : the set of the entry to print 335 // - way : the way of the entry to print 336 ///////////////////////////////////////////////////////////////////// 337 void print(const size_t &set, const size_t &way) 338 { 339 std::cout << std::dec << " set : " << set << " ; way : " << way << " ; " ; 340 m_dir_tab[set][way].print(); 341 } // end print() 342 343 ///////////////////////////////////////////////////////////////////// 344 // The select() function selects a directory entry to evince. 345 // Arguments : 346 // - set : (input argument) the set to modify 347 // - way : (return argument) the way to evince 348 ///////////////////////////////////////////////////////////////////// 349 DirectoryEntry select(const size_t &set, size_t &way) 350 { 351 assert( (set < m_sets) 352 && "Cache Directory : (select) The set index is invalid"); 353 354 // looking for an empty slot 355 for(size_t i=0; i<m_ways; i++) 356 { 357 if( not m_dir_tab[set][i].valid ) 358 { 359 way=i; 360 return DirectoryEntry(m_dir_tab[set][way]); 361 } 362 } 288 for (size_t way = 0; way < m_ways; way++) 289 { 290 bool equal = (m_dir_tab[set][way].tag == tag); 291 bool valid = m_dir_tab[set][way].valid; 292 if (equal and valid) 293 { 294 *ret_set = set; 295 *ret_way = way; 296 return DirectoryEntry(m_dir_tab[set][way]); 297 } 298 } 299 return DirectoryEntry(); 300 } // end read_neutral() 301 302 ///////////////////////////////////////////////////////////////////// 303 // The write function writes a new entry, 304 // and updates the LRU bits if necessary. 305 // Arguments : 306 // - set : the set of the entry 307 // - way : the way of the entry 308 // - entry : the entry value 309 ///////////////////////////////////////////////////////////////////// 310 void write(const size_t &set, 311 const size_t &way, 312 const DirectoryEntry &entry) 313 { 314 assert((set<m_sets) 315 && "Cache Directory write : The set index is invalid"); 316 assert((way<m_ways) 317 && "Cache Directory write : The way index is invalid"); 318 319 // update Directory 320 m_dir_tab[set][way].copy(entry); 321 322 // update LRU bits 323 bool all_recent = true; 324 for (size_t i = 0; i < m_ways; i++) 325 { 326 if (i != way) 327 { 328 all_recent = m_lru_tab[set][i].recent && all_recent; 329 } 330 } 331 if (all_recent) 332 { 333 for (size_t i = 0; i < m_ways; i++) 334 { 335 m_lru_tab[set][i].recent = false; 336 } 337 } 338 else 339 { 340 m_lru_tab[set][way].recent = true; 341 } 342 } // end write() 343 344 ///////////////////////////////////////////////////////////////////// 345 // The print() function prints a selected directory entry 346 // Arguments : 347 // - set : the set of the entry to print 348 // - way : the way of the entry to print 349 ///////////////////////////////////////////////////////////////////// 350 void print(const size_t &set, const size_t &way) 351 { 352 std::cout << std::dec << " set : " << set << " ; way : " << way << " ; "; 353 m_dir_tab[set][way].print(); 354 } // end print() 355 356 ///////////////////////////////////////////////////////////////////// 357 // The select() function selects a directory entry to evince. 358 // Arguments : 359 // - set : (input argument) the set to modify 360 // - way : (return argument) the way to evince 361 ///////////////////////////////////////////////////////////////////// 362 DirectoryEntry select(const size_t &set, size_t &way) 363 { 364 assert((set < m_sets) 365 && "Cache Directory : (select) The set index is invalid"); 366 367 // looking for an empty slot 368 for (size_t i = 0; i < m_ways; i++) 369 { 370 if (not m_dir_tab[set][i].valid) 371 { 372 way = i; 373 return DirectoryEntry(m_dir_tab[set][way]); 374 } 375 } 363 376 364 377 #ifdef RANDOM_EVICTION 365 lfsr = (lfsr >> 1) ^ ((-(lfsr & 1)) & 0xd0000001);366 way = lfsr % m_ways;367 return DirectoryEntry(m_dir_tab[set][way]);378 lfsr = (lfsr >> 1) ^ ((-(lfsr & 1)) & 0xd0000001); 379 way = lfsr % m_ways; 380 return DirectoryEntry(m_dir_tab[set][way]); 368 381 #endif 369 382 370 // looking for a not locked and not recently used entry371 for(size_t i=0; i<m_ways; i++)372 {373 if((not m_lru_tab[set][i].recent) && (not m_dir_tab[set][i].lock))374 {375 way=i;376 return DirectoryEntry(m_dir_tab[set][way]);377 }378 }379 380 // looking for a locked not recently used entry381 for(size_t i=0; i<m_ways; i++)382 {383 if( (not m_lru_tab[set][i].recent) &&(m_dir_tab[set][i].lock))384 {385 way=i;386 return DirectoryEntry(m_dir_tab[set][way]);387 }388 }389 390 // looking for a recently used entry not locked391 for(size_t i=0; i<m_ways; i++)392 {393 if( (m_lru_tab[set][i].recent) &&(not m_dir_tab[set][i].lock))394 {395 way=i;396 return DirectoryEntry(m_dir_tab[set][way]);397 }398 }399 400 // select way 0 (even if entry is locked and recently used)401 way = 0;402 return DirectoryEntry(m_dir_tab[set][0]);403 } // end select()404 405 /////////////////////////////////////////////////////////////////////406 // Global initialisation function407 /////////////////////////////////////////////////////////////////////408 void init()409 {410 for ( size_t set=0 ; set<m_sets ; set++)411 {412 for ( size_t way=0 ; way<m_ways ; way++)413 {414 m_dir_tab[set][way].init();415 m_lru_tab[set][way].init();416 }417 }418 } // end init()383 // looking for a not locked and not recently used entry 384 for (size_t i = 0; i < m_ways; i++) 385 { 386 if ((not m_lru_tab[set][i].recent) and (not m_dir_tab[set][i].lock)) 387 { 388 way = i; 389 return DirectoryEntry(m_dir_tab[set][way]); 390 } 391 } 392 393 // looking for a locked not recently used entry 394 for (size_t i = 0; i < m_ways; i++) 395 { 396 if ((not m_lru_tab[set][i].recent) and (m_dir_tab[set][i].lock)) 397 { 398 way = i; 399 return DirectoryEntry(m_dir_tab[set][way]); 400 } 401 } 402 403 // looking for a recently used entry not locked 404 for (size_t i = 0; i < m_ways; i++) 405 { 406 if ((m_lru_tab[set][i].recent) and (not m_dir_tab[set][i].lock)) 407 { 408 way = i; 409 return DirectoryEntry(m_dir_tab[set][way]); 410 } 411 } 412 413 // select way 0 (even if entry is locked and recently used) 414 way = 0; 415 return DirectoryEntry(m_dir_tab[set][0]); 416 } // end select() 417 418 ///////////////////////////////////////////////////////////////////// 419 // Global initialisation function 420 ///////////////////////////////////////////////////////////////////// 421 void init() 422 { 423 for (size_t set = 0; set < m_sets; set++) 424 { 425 for (size_t way = 0; way < m_ways; way++) 426 { 427 m_dir_tab[set][way].init(); 428 m_lru_tab[set][way].init(); 429 } 430 } 431 } // end init() 419 432 420 433 }; // end class CacheDirectory … … 425 438 class HeapEntry{ 426 439 427 public: 428 // Fields of the entry 429 Owner owner; 430 size_t next; 431 432 //////////////////////// 433 // Constructor 434 //////////////////////// 435 HeapEntry() 436 :owner(false,0) 437 { 438 next = 0; 439 } // end constructor 440 441 //////////////////////// 442 // Constructor 443 //////////////////////// 444 HeapEntry(const HeapEntry &entry) 445 { 446 owner.inst = entry.owner.inst; 447 owner.srcid = entry.owner.srcid; 448 next = entry.next; 449 } // end constructor 450 451 ///////////////////////////////////////////////////////////////////// 452 // The copy() function copies an existing source entry to a target 453 ///////////////////////////////////////////////////////////////////// 454 void copy(const HeapEntry &entry) 455 { 456 owner.inst = entry.owner.inst; 457 owner.srcid = entry.owner.srcid; 458 next = entry.next; 459 } // end copy() 460 461 //////////////////////////////////////////////////////////////////// 462 // The print() function prints the entry 463 //////////////////////////////////////////////////////////////////// 464 void print(){ 465 std::cout 466 << " -- owner.inst : " << std::dec << owner.inst << std::endl 467 << " -- owner.srcid : " << std::dec << owner.srcid << std::endl 468 << " -- next : " << std::dec << next << std::endl; 469 470 } // end print() 440 public: 441 // Fields of the entry 442 Owner owner; 443 size_t next; 444 445 //////////////////////// 446 // Constructor 447 //////////////////////// 448 HeapEntry() 449 :owner(false, 0) 450 { 451 next = 0; 452 } // end constructor 453 454 //////////////////////// 455 // Constructor 456 //////////////////////// 457 HeapEntry(const HeapEntry &entry) 458 { 459 owner.inst = entry.owner.inst; 460 owner.srcid = entry.owner.srcid; 461 next = entry.next; 462 } // end constructor 463 464 ///////////////////////////////////////////////////////////////////// 465 // The copy() function copies an existing source entry to a target 466 ///////////////////////////////////////////////////////////////////// 467 void copy(const HeapEntry &entry) 468 { 469 owner.inst = entry.owner.inst; 470 owner.srcid = entry.owner.srcid; 471 next = entry.next; 472 } // end copy() 473 474 //////////////////////////////////////////////////////////////////// 475 // The print() function prints the entry 476 //////////////////////////////////////////////////////////////////// 477 void print() 478 { 479 std::cout 480 << " -- owner.inst : " << std::dec << owner.inst << std::endl 481 << " -- owner.srcid : " << std::dec << owner.srcid << std::endl 482 << " -- next : " << std::dec << next << std::endl; 483 484 } // end print() 471 485 472 486 }; // end class HeapEntry … … 477 491 class HeapDirectory{ 478 492 479 private: 480 // Registers and the heap 481 size_t ptr_free; 482 bool full; 483 HeapEntry *m_heap_tab; 484 485 // Constants for debugging purpose 486 size_t tab_size; 487 488 public: 489 //////////////////////// 490 // Constructor 491 //////////////////////// 492 HeapDirectory(uint32_t size){ 493 assert(size>0 && "Memory Cache, HeapDirectory constructor : invalid size"); 494 ptr_free = 0; 495 full = false; 496 m_heap_tab = new HeapEntry[size]; 497 tab_size = size; 498 } // end constructor 499 500 ///////////////// 501 // Destructor 502 ///////////////// 503 ~HeapDirectory(){ 504 delete [] m_heap_tab; 505 } // end destructor 506 507 ///////////////////////////////////////////////////////////////////// 508 // Global initialisation function 509 ///////////////////////////////////////////////////////////////////// 510 void init(){ 511 ptr_free=0; 512 full=false; 513 for(size_t i=0; i< tab_size-1;i++){ 514 m_heap_tab[i].next = i+1; 515 } 516 m_heap_tab[tab_size-1].next = tab_size-1; 517 return; 518 } 519 520 ///////////////////////////////////////////////////////////////////// 521 // The print() function prints a selected directory entry 522 // Arguments : 523 // - ptr : the pointer to the entry to print 524 ///////////////////////////////////////////////////////////////////// 525 void print(const size_t &ptr){ 526 std::cout << "Heap, printing the entry : " << std::dec << ptr << std::endl; 527 m_heap_tab[ptr].print(); 528 } // end print() 529 530 ///////////////////////////////////////////////////////////////////// 531 // The print_list() function prints a list from selected directory entry 532 // Arguments : 533 // - ptr : the pointer to the first entry to print 534 ///////////////////////////////////////////////////////////////////// 535 void print_list(const size_t &ptr){ 536 bool end = false; 537 size_t ptr_temp = ptr; 538 std::cout << "Heap, printing the list from : " << std::dec << ptr << std::endl; 539 while(!end){ 540 m_heap_tab[ptr_temp].print(); 541 if(ptr_temp == m_heap_tab[ptr_temp].next) end = true; 542 ptr_temp = m_heap_tab[ptr_temp].next; 543 } 544 } // end print_list() 545 546 ///////////////////////////////////////////////////////////////////// 547 // The is_full() function return true if the heap is full. 548 ///////////////////////////////////////////////////////////////////// 549 bool is_full(){ 550 return full; 551 } // end is_full() 552 553 ///////////////////////////////////////////////////////////////////// 554 // The next_free_ptr() function returns the pointer 555 // to the next free entry. 556 ///////////////////////////////////////////////////////////////////// 557 size_t next_free_ptr(){ 558 return ptr_free; 559 } // end next_free_ptr() 560 561 ///////////////////////////////////////////////////////////////////// 562 // The next_free_entry() function returns 563 // a copy of the next free entry. 564 ///////////////////////////////////////////////////////////////////// 565 HeapEntry next_free_entry(){ 566 return HeapEntry(m_heap_tab[ptr_free]); 567 } // end next_free_entry() 568 569 ///////////////////////////////////////////////////////////////////// 570 // The write_free_entry() function modify the next free entry. 571 // Arguments : 572 // - entry : the entry to write 573 ///////////////////////////////////////////////////////////////////// 574 void write_free_entry(const HeapEntry &entry){ 575 m_heap_tab[ptr_free].copy(entry); 576 } // end write_free_entry() 577 578 ///////////////////////////////////////////////////////////////////// 579 // The write_free_ptr() function writes the pointer 580 // to the next free entry 581 ///////////////////////////////////////////////////////////////////// 582 void write_free_ptr(const size_t &ptr){ 583 assert( (ptr<tab_size) && "HeapDirectory error : try to write a wrong free pointer"); 584 ptr_free = ptr; 585 } // end write_free_ptr() 586 587 ///////////////////////////////////////////////////////////////////// 588 // The set_full() function sets the full bit (to true). 589 ///////////////////////////////////////////////////////////////////// 590 void set_full(){ 591 full = true; 592 } // end set_full() 593 594 ///////////////////////////////////////////////////////////////////// 595 // The unset_full() function unsets the full bit (to false). 596 ///////////////////////////////////////////////////////////////////// 597 void unset_full(){ 598 full = false; 599 } // end unset_full() 600 601 ///////////////////////////////////////////////////////////////////// 602 // The read() function returns a copy of 603 // the entry pointed by the argument 604 // Arguments : 605 // - ptr : the pointer to the entry to read 606 ///////////////////////////////////////////////////////////////////// 607 HeapEntry read(const size_t &ptr){ 608 assert( (ptr<tab_size) && "HeapDirectory error : try to write a wrong free pointer"); 609 return HeapEntry(m_heap_tab[ptr]); 610 } // end read() 611 612 ///////////////////////////////////////////////////////////////////// 613 // The write() function writes an entry in the heap 614 // Arguments : 615 // - ptr : the pointer to the entry to replace 616 // - entry : the entry to write 617 ///////////////////////////////////////////////////////////////////// 618 void write(const size_t &ptr, const HeapEntry &entry){ 619 assert( (ptr<tab_size) && "HeapDirectory error : try to write a wrong free pointer"); 620 m_heap_tab[ptr].copy(entry); 621 } // end write() 493 private: 494 // Registers and the heap 495 size_t ptr_free; 496 bool full; 497 HeapEntry *m_heap_tab; 498 499 // Constants for debugging purpose 500 size_t tab_size; 501 502 public: 503 //////////////////////// 504 // Constructor 505 //////////////////////// 506 HeapDirectory(uint32_t size) 507 { 508 assert(size > 0 && "Memory Cache, HeapDirectory constructor : invalid size"); 509 ptr_free = 0; 510 full = false; 511 m_heap_tab = new HeapEntry[size]; 512 tab_size = size; 513 } // end constructor 514 515 ///////////////// 516 // Destructor 517 ///////////////// 518 ~HeapDirectory() 519 { 520 delete [] m_heap_tab; 521 } // end destructor 522 523 ///////////////////////////////////////////////////////////////////// 524 // Global initialisation function 525 ///////////////////////////////////////////////////////////////////// 526 void init() 527 { 528 ptr_free = 0; 529 full = false; 530 for (size_t i = 0; i < tab_size - 1; i++) { 531 m_heap_tab[i].next = i + 1; 532 } 533 m_heap_tab[tab_size - 1].next = tab_size - 1; 534 return; 535 } 536 537 ///////////////////////////////////////////////////////////////////// 538 // The print() function prints a selected directory entry 539 // Arguments : 540 // - ptr : the pointer to the entry to print 541 ///////////////////////////////////////////////////////////////////// 542 void print(const size_t &ptr) 543 { 544 std::cout << "Heap, printing the entry : " << std::dec << ptr << std::endl; 545 m_heap_tab[ptr].print(); 546 } // end print() 547 548 ///////////////////////////////////////////////////////////////////// 549 // The print_list() function prints a list from selected directory entry 550 // Arguments : 551 // - ptr : the pointer to the first entry to print 552 ///////////////////////////////////////////////////////////////////// 553 void print_list(const size_t &ptr) 554 { 555 bool end = false; 556 size_t ptr_temp = ptr; 557 std::cout << "Heap, printing the list from : " << std::dec << ptr << std::endl; 558 while (!end) { 559 m_heap_tab[ptr_temp].print(); 560 if (ptr_temp == m_heap_tab[ptr_temp].next) { 561 end = true; 562 } 563 ptr_temp = m_heap_tab[ptr_temp].next; 564 } 565 } // end print_list() 566 567 ///////////////////////////////////////////////////////////////////// 568 // The is_full() function return true if the heap is full. 569 ///////////////////////////////////////////////////////////////////// 570 bool is_full() 571 { 572 return full; 573 } // end is_full() 574 575 ///////////////////////////////////////////////////////////////////// 576 // The next_free_ptr() function returns the pointer 577 // to the next free entry. 578 ///////////////////////////////////////////////////////////////////// 579 size_t next_free_ptr() 580 { 581 return ptr_free; 582 } // end next_free_ptr() 583 584 ///////////////////////////////////////////////////////////////////// 585 // The next_free_entry() function returns 586 // a copy of the next free entry. 587 ///////////////////////////////////////////////////////////////////// 588 HeapEntry next_free_entry() 589 { 590 return HeapEntry(m_heap_tab[ptr_free]); 591 } // end next_free_entry() 592 593 ///////////////////////////////////////////////////////////////////// 594 // The write_free_entry() function modify the next free entry. 595 // Arguments : 596 // - entry : the entry to write 597 ///////////////////////////////////////////////////////////////////// 598 void write_free_entry(const HeapEntry &entry) 599 { 600 m_heap_tab[ptr_free].copy(entry); 601 } // end write_free_entry() 602 603 ///////////////////////////////////////////////////////////////////// 604 // The write_free_ptr() function writes the pointer 605 // to the next free entry 606 ///////////////////////////////////////////////////////////////////// 607 void write_free_ptr(const size_t &ptr) 608 { 609 assert((ptr < tab_size) && "HeapDirectory error : try to write a wrong free pointer"); 610 ptr_free = ptr; 611 } // end write_free_ptr() 612 613 ///////////////////////////////////////////////////////////////////// 614 // The set_full() function sets the full bit (to true). 615 ///////////////////////////////////////////////////////////////////// 616 void set_full() 617 { 618 full = true; 619 } // end set_full() 620 621 ///////////////////////////////////////////////////////////////////// 622 // The unset_full() function unsets the full bit (to false). 623 ///////////////////////////////////////////////////////////////////// 624 void unset_full() 625 { 626 full = false; 627 } // end unset_full() 628 629 ///////////////////////////////////////////////////////////////////// 630 // The read() function returns a copy of 631 // the entry pointed by the argument 632 // Arguments : 633 // - ptr : the pointer to the entry to read 634 ///////////////////////////////////////////////////////////////////// 635 HeapEntry read(const size_t &ptr) 636 { 637 assert((ptr < tab_size) && "HeapDirectory error : try to write a wrong free pointer"); 638 return HeapEntry(m_heap_tab[ptr]); 639 } // end read() 640 641 ///////////////////////////////////////////////////////////////////// 642 // The write() function writes an entry in the heap 643 // Arguments : 644 // - ptr : the pointer to the entry to replace 645 // - entry : the entry to write 646 ///////////////////////////////////////////////////////////////////// 647 void write(const size_t &ptr, const HeapEntry &entry) 648 { 649 assert((ptr < tab_size) && "HeapDirectory error : try to write a wrong free pointer"); 650 m_heap_tab[ptr].copy(entry); 651 } // end write() 622 652 623 653 }; // end class HeapDirectory … … 628 658 class CacheData 629 659 { 630 private:631 const uint32_t m_sets;632 const uint32_t m_ways;633 const uint32_t m_words;634 635 uint32_t *** m_cache_data;636 637 public:638 639 ///////////////////////////////////////////////////////640 CacheData(uint32_t ways, uint32_t sets, uint32_t words)641 : m_sets(sets), m_ways(ways), m_words(words)642 {643 m_cache_data = new uint32_t ** [ways];644 for ( size_t i=0 ; i < ways ; i++)645 {646 m_cache_data[i] = new uint32_t * [sets];647 }648 for ( size_t i=0; i<ways; i++)649 {650 for ( size_t j=0; j<sets; j++)651 {652 m_cache_data[i][j] = new uint32_t [words];653 }654 }655 }656 ////////////657 ~CacheData()658 {659 for(size_t i=0; i<m_ways; i++)660 {661 for(size_t j=0; j<m_sets; j++)662 {663 delete [] m_cache_data[i][j];664 }665 }666 for(size_t i=0; i<m_ways; i++)667 {668 delete [] m_cache_data[i];669 }670 delete [] m_cache_data;671 }672 //////////////////////////////////////////673 uint32_t read (const uint32_t &way,674 const uint32_t &set,675 const uint32_t &word) const676 {677 assert((set < m_sets ) && "Cache data error: Trying to read a wrong set");678 assert((way < m_ways ) && "Cache data error: Trying to read a wrong way");679 assert((word < m_words) && "Cache data error: Trying to read a wrong word");680 681 return m_cache_data[way][set][word];682 }683 //////////////////////////////////////////684 void read_line(const uint32_t &way,685 const uint32_t &set,686 sc_core::sc_signal<uint32_t> * cache_line)687 {688 assert((set < m_sets ) && "Cache data error: Trying to read a wrong set");689 assert((way < m_ways ) && "Cache data error: Trying to read a wrong way");690 691 for (uint32_t word=0; word<m_words; word++)692 cache_line[word].write(m_cache_data[way][set][word]);693 }694 /////////////////////////////////////////695 void write ( const uint32_t &way,696 const uint32_t &set,697 const uint32_t &word,698 const uint32_t &data,699 const uint32_t &be = 0xF)700 {701 702 assert((set < m_sets ) && "Cache data error: Trying to write a wrong set");703 assert((way < m_ways ) && "Cache data error: Trying to write a wrong way");704 assert((word < m_words) && "Cache data error: Trying to write a wrong word");705 assert((be <= 0xF ) && "Cache data error: Trying to write a wrong be");706 707 if (be == 0x0) return;708 709 if (be == 0xF)710 {711 m_cache_data[way][set][word] = data;712 return;713 }714 715 uint32_t mask = 0;716 if(be & 0x1) mask = mask | 0x000000FF;717 if(be & 0x2) mask = mask | 0x0000FF00;718 if(be & 0x4) mask = mask | 0x00FF0000;719 if(be & 0x8) mask = mask | 0xFF000000;720 721 m_cache_data[way][set][word] =722 (data & mask) | (m_cache_data[way][set][word] & ~mask);723 }660 private: 661 const uint32_t m_sets; 662 const uint32_t m_ways; 663 const uint32_t m_words; 664 665 uint32_t *** m_cache_data; 666 667 public: 668 669 /////////////////////////////////////////////////////// 670 CacheData(uint32_t ways, uint32_t sets, uint32_t words) 671 : m_sets(sets), m_ways(ways), m_words(words) 672 { 673 m_cache_data = new uint32_t ** [ways]; 674 for (size_t i = 0; i < ways; i++) 675 { 676 m_cache_data[i] = new uint32_t * [sets]; 677 } 678 for (size_t i = 0; i < ways; i++) 679 { 680 for (size_t j = 0; j < sets; j++) 681 { 682 m_cache_data[i][j] = new uint32_t [words]; 683 } 684 } 685 } 686 //////////// 687 ~CacheData() 688 { 689 for (size_t i = 0; i < m_ways; i++) 690 { 691 for (size_t j = 0; j < m_sets; j++) 692 { 693 delete [] m_cache_data[i][j]; 694 } 695 } 696 for (size_t i = 0; i < m_ways; i++) 697 { 698 delete [] m_cache_data[i]; 699 } 700 delete [] m_cache_data; 701 } 702 ////////////////////////////////////////// 703 uint32_t read(const uint32_t &way, 704 const uint32_t &set, 705 const uint32_t &word) const 706 { 707 assert((set < m_sets ) && "Cache data error: Trying to read a wrong set"); 708 assert((way < m_ways ) && "Cache data error: Trying to read a wrong way"); 709 assert((word < m_words) && "Cache data error: Trying to read a wrong word"); 710 711 return m_cache_data[way][set][word]; 712 } 713 ////////////////////////////////////////// 714 void read_line(const uint32_t &way, 715 const uint32_t &set, 716 sc_core::sc_signal<uint32_t> * cache_line) 717 { 718 assert((set < m_sets) && "Cache data error: Trying to read a wrong set"); 719 assert((way < m_ways) && "Cache data error: Trying to read a wrong way"); 720 721 for (uint32_t word = 0; word < m_words; word++) { 722 cache_line[word].write(m_cache_data[way][set][word]); 723 } 724 } 725 ///////////////////////////////////////// 726 void write(const uint32_t &way, 727 const uint32_t &set, 728 const uint32_t &word, 729 const uint32_t &data, 730 const uint32_t &be = 0xF) 731 { 732 assert((set < m_sets ) && "Cache data error: Trying to write a wrong set"); 733 assert((way < m_ways ) && "Cache data error: Trying to write a wrong way"); 734 assert((word < m_words) && "Cache data error: Trying to write a wrong word"); 735 assert((be <= 0xF ) && "Cache data error: Trying to write a wrong be"); 736 737 if (be == 0x0) return; 738 739 if (be == 0xF) 740 { 741 m_cache_data[way][set][word] = data; 742 return; 743 } 744 745 uint32_t mask = 0; 746 if (be & 0x1) mask = mask | 0x000000FF; 747 if (be & 0x2) mask = mask | 0x0000FF00; 748 if (be & 0x4) mask = mask | 0x00FF0000; 749 if (be & 0x8) mask = mask | 0xFF000000; 750 751 m_cache_data[way][set][word] = 752 (data & mask) | (m_cache_data[way][set][word] & ~mask); 753 } 724 754 }; // end class CacheData 725 755 -
branches/RWT/modules/vci_mem_cache/caba/source/include/update_tab.h
r814 r823 12 12 class UpdateTabEntry { 13 13 14 typedef uint32_t size_t;15 typedef sc_dt::sc_uint<40> addr_t;16 17 public:18 19 bool valid; // It is a valid pending transaction20 bool update; // It is an update transaction21 bool brdcast; // It is a broadcast invalidate22 bool rsp; // Response to the initiator required23 bool ack; // Acknowledge to the CONFIG FSM required24 size_t srcid; // The srcid of the initiator which wrote the data25 size_t trdid; // The trdid of the initiator which wrote the data26 size_t pktid; // The pktid of the initiator which wrote the data27 addr_t nline; // The identifier of the cache line28 size_t count; // The number of acknowledge responses to receive29 30 UpdateTabEntry()31 {32 valid = false;33 update = false;34 brdcast = false;35 rsp = false;36 ack = false;37 srcid = 0;38 trdid = 0;39 pktid = 0;40 nline = 0;41 count = 0;42 }43 44 UpdateTabEntry(bool i_valid,45 bool i_update,46 bool i_brdcast,47 bool i_rsp,48 bool i_ack,49 size_t i_srcid,50 size_t i_trdid,51 size_t i_pktid,52 addr_t i_nline,53 size_t i_count)54 {55 valid = i_valid;56 update = i_update;57 brdcast = i_brdcast;58 rsp = i_rsp;59 ack = i_ack;60 srcid = i_srcid;61 trdid = i_trdid;62 pktid = i_pktid;63 nline = i_nline;64 count = i_count;65 }66 67 UpdateTabEntry(const UpdateTabEntry &source)68 {69 valid = source.valid;70 update = source.update;71 brdcast = source.brdcast;72 rsp = source.rsp;73 ack = source.ack;74 srcid = source.srcid;75 trdid = source.trdid;76 pktid = source.pktid;77 nline = source.nline;78 count = source.count;79 }80 81 ////////////////////////////////////////////////////82 // The init() function initializes the entry83 ///////////////////////////////////////////////////84 void init()85 {86 valid = false;87 update = false;88 brdcast = false;89 rsp = false;90 ack = false;91 srcid = 0;92 trdid = 0;93 pktid = 0;94 nline = 0;95 count = 0;96 }97 98 ////////////////////////////////////////////////////////////////////99 // The copy() function copies an existing entry100 // Its arguments are :101 // - source : the update tab entry to copy102 ////////////////////////////////////////////////////////////////////103 void copy(const UpdateTabEntry &source)104 {105 valid= source.valid;106 update= source.update;107 brdcast= source.brdcast;108 rsp= source.rsp;109 ack = source.ack;110 srcid= source.srcid;111 trdid= source.trdid;112 pktid= source.pktid;113 nline= source.nline;114 count= source.count;115 }116 117 ////////////////////////////////////////////////////////////////////118 // The print() function prints the entry119 ////////////////////////////////////////////////////////////////////120 void print()121 {122 std::cout << " val = " << std::dec << valid123 << " / updt = " << update124 << " / bc = " << brdcast125 << " / rsp = " << rsp126 << " / ack = " << ack127 << " / count = " << count128 << " / srcid = " << std::hex << srcid129 << " / trdid = " << trdid130 << " / pktid = " << pktid131 << " / nline = " << nline<< std::endl;132 }14 typedef uint32_t size_t; 15 typedef sc_dt::sc_uint<40> addr_t; 16 17 public: 18 19 bool valid; // It is a valid pending transaction 20 bool update; // It is an update transaction 21 bool brdcast; // It is a broadcast invalidate 22 bool rsp; // Response to the initiator required 23 bool ack; // Acknowledge to the CONFIG FSM required 24 size_t srcid; // The srcid of the initiator which wrote the data 25 size_t trdid; // The trdid of the initiator which wrote the data 26 size_t pktid; // The pktid of the initiator which wrote the data 27 addr_t nline; // The identifier of the cache line 28 size_t count; // The number of acknowledge responses to receive 29 30 UpdateTabEntry() 31 { 32 valid = false; 33 update = false; 34 brdcast = false; 35 rsp = false; 36 ack = false; 37 srcid = 0; 38 trdid = 0; 39 pktid = 0; 40 nline = 0; 41 count = 0; 42 } 43 44 UpdateTabEntry(bool i_valid, 45 bool i_update, 46 bool i_brdcast, 47 bool i_rsp, 48 bool i_ack, 49 size_t i_srcid, 50 size_t i_trdid, 51 size_t i_pktid, 52 addr_t i_nline, 53 size_t i_count) 54 { 55 valid = i_valid; 56 update = i_update; 57 brdcast = i_brdcast; 58 rsp = i_rsp; 59 ack = i_ack; 60 srcid = i_srcid; 61 trdid = i_trdid; 62 pktid = i_pktid; 63 nline = i_nline; 64 count = i_count; 65 } 66 67 UpdateTabEntry(const UpdateTabEntry &source) 68 { 69 valid = source.valid; 70 update = source.update; 71 brdcast = source.brdcast; 72 rsp = source.rsp; 73 ack = source.ack; 74 srcid = source.srcid; 75 trdid = source.trdid; 76 pktid = source.pktid; 77 nline = source.nline; 78 count = source.count; 79 } 80 81 //////////////////////////////////////////////////// 82 // The init() function initializes the entry 83 /////////////////////////////////////////////////// 84 void init() 85 { 86 valid = false; 87 update = false; 88 brdcast = false; 89 rsp = false; 90 ack = false; 91 srcid = 0; 92 trdid = 0; 93 pktid = 0; 94 nline = 0; 95 count = 0; 96 } 97 98 //////////////////////////////////////////////////////////////////// 99 // The copy() function copies an existing entry 100 // Its arguments are : 101 // - source : the update tab entry to copy 102 //////////////////////////////////////////////////////////////////// 103 void copy(const UpdateTabEntry &source) 104 { 105 valid = source.valid; 106 update = source.update; 107 brdcast = source.brdcast; 108 rsp = source.rsp; 109 ack = source.ack; 110 srcid = source.srcid; 111 trdid = source.trdid; 112 pktid = source.pktid; 113 nline = source.nline; 114 count = source.count; 115 } 116 117 //////////////////////////////////////////////////////////////////// 118 // The print() function prints the entry 119 //////////////////////////////////////////////////////////////////// 120 void print() 121 { 122 std::cout << " val = " << std::dec << valid 123 << " / updt = " << update 124 << " / bc = " << brdcast 125 << " / rsp = " << rsp 126 << " / ack = " << ack 127 << " / count = " << count 128 << " / srcid = " << std::hex << srcid 129 << " / trdid = " << trdid 130 << " / pktid = " << pktid 131 << " / nline = " << nline << std::endl; 132 } 133 133 }; 134 134 … … 138 138 class UpdateTab{ 139 139 140 typedef uint64_t addr_t; 141 142 private: 143 size_t size_tab; 144 std::vector<UpdateTabEntry> tab; 145 146 public: 147 148 UpdateTab() 149 : tab(0) 150 { 151 size_tab=0; 152 } 153 154 UpdateTab(size_t size_tab_i) 155 : tab(size_tab_i) 156 { 157 size_tab=size_tab_i; 158 } 159 160 //////////////////////////////////////////////////////////////////// 161 // The size() function returns the size of the tab 162 //////////////////////////////////////////////////////////////////// 163 const size_t size() 164 { 165 return size_tab; 166 } 167 168 //////////////////////////////////////////////////////////////////// 169 // The print() function diplays the tab content 170 //////////////////////////////////////////////////////////////////// 171 void print() 172 { 173 std::cout << "UPDATE TABLE Content" << std::endl; 174 for(size_t i=0; i<size_tab; i++) 175 { 176 std::cout << "[" << std::dec << i << "] "; 177 tab[i].print(); 178 } 179 return; 180 } 181 182 ///////////////////////////////////////////////////////////////////// 183 // The init() function initializes the tab 184 ///////////////////////////////////////////////////////////////////// 185 void init() 186 { 187 for ( size_t i=0; i<size_tab; i++) tab[i].init(); 188 } 189 190 ///////////////////////////////////////////////////////////////////// 191 // The reads() function reads an entry 192 // Arguments : 193 // - entry : the entry to read 194 // This function returns a copy of the entry. 195 ///////////////////////////////////////////////////////////////////// 196 UpdateTabEntry read (size_t entry) 197 { 198 assert(entry<size_tab && "Bad Update Tab Entry"); 199 return UpdateTabEntry(tab[entry]); 200 } 201 202 /////////////////////////////////////////////////////////////////////////// 203 // The set() function writes an entry in the Update Table 204 // Arguments : 205 // - update : transaction type (bool) 206 // - srcid : srcid of the initiator 207 // - trdid : trdid of the initiator 208 // - pktid : pktid of the initiator 209 // - count : number of expected responses 210 // - index : (return argument) index of the selected entry 211 // This function returns true if the write successed (an entry was empty). 212 /////////////////////////////////////////////////////////////////////////// 213 bool set(const bool update, 214 const bool brdcast, 215 const bool rsp, 216 const bool ack, 217 const size_t srcid, 218 const size_t trdid, 219 const size_t pktid, 220 const addr_t nline, 221 const size_t count, 222 size_t &index) 223 { 224 for ( size_t i=0 ; i<size_tab ; i++ ) 225 { 226 if( !tab[i].valid ) 227 { 228 tab[i].valid = true; 229 tab[i].update = update; 230 tab[i].brdcast = brdcast; 231 tab[i].rsp = rsp; 232 tab[i].ack = ack; 233 tab[i].srcid = (size_t) srcid; 234 tab[i].trdid = (size_t) trdid; 235 tab[i].pktid = (size_t) pktid; 236 tab[i].nline = (addr_t) nline; 237 tab[i].count = (size_t) count; 238 index = i; 140 typedef uint64_t addr_t; 141 142 private: 143 size_t size_tab; 144 std::vector<UpdateTabEntry> tab; 145 146 public: 147 148 UpdateTab() 149 : tab(0) 150 { 151 size_tab = 0; 152 } 153 154 UpdateTab(size_t size_tab_i) 155 : tab(size_tab_i) 156 { 157 size_tab = size_tab_i; 158 } 159 160 //////////////////////////////////////////////////////////////////// 161 // The size() function returns the size of the tab 162 //////////////////////////////////////////////////////////////////// 163 const size_t size() 164 { 165 return size_tab; 166 } 167 168 //////////////////////////////////////////////////////////////////// 169 // The print() function diplays the tab content 170 //////////////////////////////////////////////////////////////////// 171 void print() 172 { 173 std::cout << "UPDATE TABLE Content" << std::endl; 174 for (size_t i = 0; i < size_tab; i++) 175 { 176 std::cout << "[" << std::dec << i << "] "; 177 tab[i].print(); 178 } 179 return; 180 } 181 182 ///////////////////////////////////////////////////////////////////// 183 // The init() function initializes the tab 184 ///////////////////////////////////////////////////////////////////// 185 void init() 186 { 187 for (size_t i = 0; i < size_tab; i++) 188 { 189 tab[i].init(); 190 } 191 } 192 193 ///////////////////////////////////////////////////////////////////// 194 // The reads() function reads an entry 195 // Arguments : 196 // - entry : the entry to read 197 // This function returns a copy of the entry. 198 ///////////////////////////////////////////////////////////////////// 199 UpdateTabEntry read(size_t entry) 200 { 201 assert(entry < size_tab && "Bad Update Tab Entry"); 202 return UpdateTabEntry(tab[entry]); 203 } 204 205 /////////////////////////////////////////////////////////////////////////// 206 // The set() function writes an entry in the Update Table 207 // Arguments : 208 // - update : transaction type (bool) 209 // - srcid : srcid of the initiator 210 // - trdid : trdid of the initiator 211 // - pktid : pktid of the initiator 212 // - count : number of expected responses 213 // - index : (return argument) index of the selected entry 214 // This function returns true if the write successed (an entry was empty). 215 /////////////////////////////////////////////////////////////////////////// 216 bool set(const bool update, 217 const bool brdcast, 218 const bool rsp, 219 const bool ack, 220 const size_t srcid, 221 const size_t trdid, 222 const size_t pktid, 223 const addr_t nline, 224 const size_t count, 225 size_t &index) 226 { 227 for (size_t i = 0; i < size_tab; i++) 228 { 229 if (!tab[i].valid) 230 { 231 tab[i].valid = true; 232 tab[i].update = update; 233 tab[i].brdcast = brdcast; 234 tab[i].rsp = rsp; 235 tab[i].ack = ack; 236 tab[i].srcid = (size_t) srcid; 237 tab[i].trdid = (size_t) trdid; 238 tab[i].pktid = (size_t) pktid; 239 tab[i].nline = (addr_t) nline; 240 tab[i].count = (size_t) count; 241 index = i; 242 return true; 243 } 244 } 245 return false; 246 } // end set() 247 248 ///////////////////////////////////////////////////////////////////// 249 // The decrement() function decrements the counter for a given entry. 250 // Arguments : 251 // - index : the index of the entry 252 // - counter : (return argument) value of the counter after decrement 253 // This function returns true if the entry is valid. 254 ///////////////////////////////////////////////////////////////////// 255 bool decrement(const size_t index, 256 size_t &counter) 257 { 258 assert((index < size_tab) && "Bad Update Tab Entry"); 259 if (tab[index].valid) 260 { 261 tab[index].count--; 262 counter = tab[index].count; 263 return true; 264 } 265 else 266 { 267 return false; 268 } 269 } 270 271 ///////////////////////////////////////////////////////////////////// 272 // The is_full() function returns true if the table is full 273 ///////////////////////////////////////////////////////////////////// 274 bool is_full() 275 { 276 for (size_t i = 0; i < size_tab; i++) 277 { 278 if (!tab[i].valid) 279 { 280 return false; 281 } 282 } 239 283 return true; 240 } 241 } 242 return false; 243 } // end set() 244 245 ///////////////////////////////////////////////////////////////////// 246 // The decrement() function decrements the counter for a given entry. 247 // Arguments : 248 // - index : the index of the entry 249 // - counter : (return argument) value of the counter after decrement 250 // This function returns true if the entry is valid. 251 ///////////////////////////////////////////////////////////////////// 252 bool decrement( const size_t index, 253 size_t &counter ) 254 { 255 assert((index<size_tab) && "Bad Update Tab Entry"); 256 if ( tab[index].valid ) 257 { 258 tab[index].count--; 259 counter = tab[index].count; 260 return true; 261 } 262 else 263 { 264 return false; 265 } 266 } 267 268 ///////////////////////////////////////////////////////////////////// 269 // The is_full() function returns true if the table is full 270 ///////////////////////////////////////////////////////////////////// 271 bool is_full() 272 { 273 for(size_t i = 0 ; i < size_tab ; i++) 274 { 275 if(!tab[i].valid) return false; 276 } 277 return true; 278 } 279 280 ///////////////////////////////////////////////////////////////////// 281 // The is_not_empty() function returns true if the table is not empty 282 ///////////////////////////////////////////////////////////////////// 283 bool is_not_empty() 284 { 285 for(size_t i = 0 ; i < size_tab ; i++) 286 { 287 if(tab[i].valid) return true; 288 } 289 return false; 290 } 291 292 ///////////////////////////////////////////////////////////////////// 293 // The need_rsp() function returns the need of a response 294 // Arguments : 295 // - index : the index of the entry 296 ///////////////////////////////////////////////////////////////////// 297 bool need_rsp(const size_t index) 298 { 299 assert(index<size_tab && "Bad Update Tab Entry"); 300 return tab[index].rsp; 301 } 302 303 ///////////////////////////////////////////////////////////////////// 304 // The need_ack() function returns the need of an acknowledge 305 // Arguments : 306 // - index : the index of the entry 307 ///////////////////////////////////////////////////////////////////// 308 bool need_ack(const size_t index) 309 { 310 assert(index<size_tab && "Bad Update Tab Entry"); 311 return tab[index].ack; 312 } 313 314 ///////////////////////////////////////////////////////////////////// 315 // The is_brdcast() function returns the transaction type 316 // Arguments : 317 // - index : the index of the entry 318 ///////////////////////////////////////////////////////////////////// 319 bool is_brdcast(const size_t index) 320 { 321 assert(index<size_tab && "Bad Update Tab Entry"); 322 return tab[index].brdcast; 323 } 324 325 ///////////////////////////////////////////////////////////////////// 326 // The is_update() function returns the transaction type 327 // Arguments : 328 // - index : the index of the entry 329 ///////////////////////////////////////////////////////////////////// 330 bool is_update(const size_t index) 331 { 332 assert(index<size_tab && "Bad Update Tab Entry"); 333 return tab[index].update; 334 } 335 336 ///////////////////////////////////////////////////////////////////// 337 // The srcid() function returns the srcid value 338 // Arguments : 339 // - index : the index of the entry 340 ///////////////////////////////////////////////////////////////////// 341 size_t srcid(const size_t index) 342 { 343 assert(index<size_tab && "Bad Update Tab Entry"); 344 return tab[index].srcid; 345 } 346 347 ///////////////////////////////////////////////////////////////////// 348 // The trdid() function returns the trdid value 349 // Arguments : 350 // - index : the index of the entry 351 ///////////////////////////////////////////////////////////////////// 352 size_t trdid(const size_t index) 353 { 354 assert(index<size_tab && "Bad Update Tab Entry"); 355 return tab[index].trdid; 356 } 357 358 ///////////////////////////////////////////////////////////////////// 359 // The pktid() function returns the pktid value 360 // Arguments : 361 // - index : the index of the entry 362 ///////////////////////////////////////////////////////////////////// 363 size_t pktid(const size_t index) 364 { 365 assert(index<size_tab && "Bad Update Tab Entry"); 366 return tab[index].pktid; 367 } 368 369 ///////////////////////////////////////////////////////////////////// 370 // The nline() function returns the nline value 371 // Arguments : 372 // - index : the index of the entry 373 ///////////////////////////////////////////////////////////////////// 374 addr_t nline(const size_t index) 375 { 376 assert(index<size_tab && "Bad Update Tab Entry"); 377 return tab[index].nline; 378 } 379 380 ///////////////////////////////////////////////////////////////////// 381 // The search_inval() function returns the index of the entry in UPT 382 // Arguments : 383 // - nline : the line number of the entry in the directory 384 ///////////////////////////////////////////////////////////////////// 385 bool search_inval(const addr_t nline,size_t &index) 386 { 387 size_t i ; 388 389 for (i = 0 ; i < size_tab ; i++) 390 { 391 if ( (tab[i].nline == nline) and tab[i].valid and not tab[i].update ) 392 { 393 index = i ; 394 return true; 395 } 396 } 397 return false; 398 } 399 400 ///////////////////////////////////////////////////////////////////// 401 // The read_nline() function returns the index of the entry in UPT 402 // Arguments : 403 // - nline : the line number of the entry in the directory 404 ///////////////////////////////////////////////////////////////////// 405 bool read_nline(const addr_t nline,size_t &index) 406 { 407 size_t i ; 408 409 for (i = 0 ; i < size_tab ; i++) 410 { 411 if ( (tab[i].nline == nline) and tab[i].valid ) 412 { 413 index = i ; 414 return true; 415 } 416 } 417 return false; 418 } 419 420 ///////////////////////////////////////////////////////////////////// 421 // The clear() function erases an entry of the tab 422 // Arguments : 423 // - index : the index of the entry 424 ///////////////////////////////////////////////////////////////////// 425 void clear(const size_t index) 426 { 427 assert(index<size_tab && "Bad Update Tab Entry"); 428 tab[index].valid=false; 429 return; 430 } 284 } 285 286 ///////////////////////////////////////////////////////////////////// 287 // The is_not_empty() function returns true if the table is not empty 288 ///////////////////////////////////////////////////////////////////// 289 bool is_not_empty() 290 { 291 for (size_t i = 0; i < size_tab; i++) 292 { 293 if (tab[i].valid) 294 { 295 return true; 296 } 297 } 298 return false; 299 } 300 301 ///////////////////////////////////////////////////////////////////// 302 // The need_rsp() function returns the need of a response 303 // Arguments : 304 // - index : the index of the entry 305 ///////////////////////////////////////////////////////////////////// 306 bool need_rsp(const size_t index) 307 { 308 assert(index < size_tab && "Bad Update Tab Entry"); 309 return tab[index].rsp; 310 } 311 312 ///////////////////////////////////////////////////////////////////// 313 // The need_ack() function returns the need of an acknowledge 314 // Arguments : 315 // - index : the index of the entry 316 ///////////////////////////////////////////////////////////////////// 317 bool need_ack(const size_t index) 318 { 319 assert(index < size_tab && "Bad Update Tab Entry"); 320 return tab[index].ack; 321 } 322 323 ///////////////////////////////////////////////////////////////////// 324 // The is_brdcast() function returns the transaction type 325 // Arguments : 326 // - index : the index of the entry 327 ///////////////////////////////////////////////////////////////////// 328 bool is_brdcast(const size_t index) 329 { 330 assert(index < size_tab && "Bad Update Tab Entry"); 331 return tab[index].brdcast; 332 } 333 334 ///////////////////////////////////////////////////////////////////// 335 // The is_update() function returns the transaction type 336 // Arguments : 337 // - index : the index of the entry 338 ///////////////////////////////////////////////////////////////////// 339 bool is_update(const size_t index) 340 { 341 assert(index < size_tab && "Bad Update Tab Entry"); 342 return tab[index].update; 343 } 344 345 ///////////////////////////////////////////////////////////////////// 346 // The srcid() function returns the srcid value 347 // Arguments : 348 // - index : the index of the entry 349 ///////////////////////////////////////////////////////////////////// 350 size_t srcid(const size_t index) 351 { 352 assert(index < size_tab && "Bad Update Tab Entry"); 353 return tab[index].srcid; 354 } 355 356 ///////////////////////////////////////////////////////////////////// 357 // The trdid() function returns the trdid value 358 // Arguments : 359 // - index : the index of the entry 360 ///////////////////////////////////////////////////////////////////// 361 size_t trdid(const size_t index) 362 { 363 assert(index < size_tab && "Bad Update Tab Entry"); 364 return tab[index].trdid; 365 } 366 367 ///////////////////////////////////////////////////////////////////// 368 // The pktid() function returns the pktid value 369 // Arguments : 370 // - index : the index of the entry 371 ///////////////////////////////////////////////////////////////////// 372 size_t pktid(const size_t index) 373 { 374 assert(index < size_tab && "Bad Update Tab Entry"); 375 return tab[index].pktid; 376 } 377 378 ///////////////////////////////////////////////////////////////////// 379 // The nline() function returns the nline value 380 // Arguments : 381 // - index : the index of the entry 382 ///////////////////////////////////////////////////////////////////// 383 addr_t nline(const size_t index) 384 { 385 assert(index < size_tab && "Bad Update Tab Entry"); 386 return tab[index].nline; 387 } 388 389 ///////////////////////////////////////////////////////////////////// 390 // The search_inval() function returns the index of the entry in UPT 391 // Arguments : 392 // - nline : the line number of the entry in the directory 393 ///////////////////////////////////////////////////////////////////// 394 bool search_inval(const addr_t nline, size_t &index) 395 { 396 size_t i; 397 398 for (i = 0; i < size_tab; i++) 399 { 400 if ((tab[i].nline == nline) and tab[i].valid and not tab[i].update) 401 { 402 index = i; 403 return true; 404 } 405 } 406 return false; 407 } 408 409 ///////////////////////////////////////////////////////////////////// 410 // The read_nline() function returns the index of the entry in UPT 411 // Arguments : 412 // - nline : the line number of the entry in the directory 413 ///////////////////////////////////////////////////////////////////// 414 bool read_nline(const addr_t nline, size_t &index) 415 { 416 size_t i; 417 418 for (i = 0; i < size_tab; i++) 419 { 420 if ((tab[i].nline == nline) and tab[i].valid) 421 { 422 index = i; 423 return true; 424 } 425 } 426 return false; 427 } 428 429 ///////////////////////////////////////////////////////////////////// 430 // The clear() function erases an entry of the tab 431 // Arguments : 432 // - index : the index of the entry 433 ///////////////////////////////////////////////////////////////////// 434 void clear(const size_t index) 435 { 436 assert(index < size_tab && "Bad Update Tab Entry"); 437 tab[index].valid = false; 438 return; 439 } 431 440 432 441 }; -
branches/RWT/modules/vci_mem_cache/caba/source/include/vci_mem_cache.h
r814 r823 69 69 : public soclib::caba::BaseModule 70 70 { 71 typedef typename vci_param_int::fast_addr_t 72 typedef typename sc_dt::sc_uint<64> 73 typedef uint32_t 74 typedef uint32_t 75 typedef uint32_t 76 typedef uint32_t 71 typedef typename vci_param_int::fast_addr_t addr_t; 72 typedef typename sc_dt::sc_uint<64> wide_data_t; 73 typedef uint32_t data_t; 74 typedef uint32_t tag_t; 75 typedef uint32_t be_t; 76 typedef uint32_t copy_t; 77 77 78 78 /* States of the TGT_CMD fsm */ … … 113 113 CC_RECEIVE_IDLE, 114 114 CC_RECEIVE_CLEANUP, 115 CC_RECEIVE_CLEANUP_EOP,116 115 CC_RECEIVE_MULTI_ACK 117 116 }; … … 232 231 { 233 232 IXR_RSP_IDLE, 234 IXR_RSP_ACK,235 233 IXR_RSP_TRT_ERASE, 236 234 IXR_RSP_TRT_READ … … 399 397 // b1 accÚs table llsc type SW / other 400 398 // b2 WRITE/CAS/LL/SC 401 TYPE_READ_DATA_UNC 402 TYPE_READ_DATA_MISS 403 TYPE_READ_INS_UNC 404 TYPE_READ_INS_MISS 405 TYPE_WRITE 406 TYPE_CAS 407 TYPE_LL 408 TYPE_SC 399 TYPE_READ_DATA_UNC = 0x0, 400 TYPE_READ_DATA_MISS = 0x1, 401 TYPE_READ_INS_UNC = 0x2, 402 TYPE_READ_INS_MISS = 0x3, 403 TYPE_WRITE = 0x4, 404 TYPE_CAS = 0x5, 405 TYPE_LL = 0x6, 406 TYPE_SC = 0x7 409 407 }; 410 408 … … 425 423 426 424 // instrumentation counters 427 uint32_t m_cpt_cycles; // Counter of cycles425 uint32_t m_cpt_cycles; // Counter of cycles 428 426 429 427 // Counters accessible in software (not yet but eventually) and tagged 430 uint32_t m_cpt_reset_count; // Last cycle at which counters have been reset431 uint32_t m_cpt_read_local; // Number of local READ transactions432 uint32_t m_cpt_read_remote; // number of remote READ transactions433 uint32_t m_cpt_read_cost; // Number of (flits * distance) for READs434 435 uint32_t m_cpt_write_local; // Number of local WRITE transactions436 uint32_t m_cpt_write_remote; // number of remote WRITE transactions437 uint32_t m_cpt_write_flits_local; // number of flits for local WRITEs438 uint32_t m_cpt_write_flits_remote; // number of flits for remote WRITEs439 uint32_t m_cpt_write_cost; // Number of (flits * distance) for WRITEs440 uint32_t m_cpt_write_ncc_miss; // Number of write on ncc line441 442 uint32_t m_cpt_ll_local; // Number of local LL transactions443 uint32_t m_cpt_ll_remote; // number of remote LL transactions444 uint32_t m_cpt_ll_cost; // Number of (flits * distance) for LLs445 446 uint32_t m_cpt_sc_local; // Number of local SC transactions447 uint32_t m_cpt_sc_remote; // number of remote SC transactions448 uint32_t m_cpt_sc_cost; // Number of (flits * distance) for SCs449 450 uint32_t m_cpt_cas_local; // Number of local SC transactions451 uint32_t m_cpt_cas_remote; // number of remote SC transactions452 uint32_t m_cpt_cas_cost; // Number of (flits * distance) for SCs453 454 uint32_t m_cpt_update; // Number of requests causing an UPDATE455 uint32_t m_cpt_update_local; // Number of local UPDATE transactions456 uint32_t m_cpt_update_remote; // Number of remote UPDATE transactions457 uint32_t m_cpt_update_cost; // Number of (flits * distance) for UPDT458 459 uint32_t m_cpt_minval; // Number of requests causing M_INV460 uint32_t m_cpt_minval_local; // Number of local M_INV transactions461 uint32_t m_cpt_minval_remote; // Number of remote M_INV transactions462 uint32_t m_cpt_minval_cost; // Number of (flits * distance) for M_INV463 464 uint32_t m_cpt_binval; // Number of BROADCAST INVAL465 466 uint32_t m_cpt_cleanup_local; // Number of local CLEANUP transactions (all cleanup types)467 uint32_t m_cpt_cleanup_remote; // Number of remote CLEANUP transactions (all cleanup types)468 uint32_t m_cpt_cleanup_cost; // Number of (flits * distance) for CLEANUPs (all types)428 uint32_t m_cpt_reset_count; // Last cycle at which counters have been reset 429 uint32_t m_cpt_read_local; // Number of local READ transactions 430 uint32_t m_cpt_read_remote; // number of remote READ transactions 431 uint32_t m_cpt_read_cost; // Number of (flits * distance) for READs 432 433 uint32_t m_cpt_write_local; // Number of local WRITE transactions 434 uint32_t m_cpt_write_remote; // number of remote WRITE transactions 435 uint32_t m_cpt_write_flits_local; // number of flits for local WRITEs 436 uint32_t m_cpt_write_flits_remote; // number of flits for remote WRITEs 437 uint32_t m_cpt_write_cost; // Number of (flits * distance) for WRITEs 438 uint32_t m_cpt_write_ncc_miss; // Number of write on ncc line 439 440 uint32_t m_cpt_ll_local; // Number of local LL transactions 441 uint32_t m_cpt_ll_remote; // number of remote LL transactions 442 uint32_t m_cpt_ll_cost; // Number of (flits * distance) for LLs 443 444 uint32_t m_cpt_sc_local; // Number of local SC transactions 445 uint32_t m_cpt_sc_remote; // number of remote SC transactions 446 uint32_t m_cpt_sc_cost; // Number of (flits * distance) for SCs 447 448 uint32_t m_cpt_cas_local; // Number of local SC transactions 449 uint32_t m_cpt_cas_remote; // number of remote SC transactions 450 uint32_t m_cpt_cas_cost; // Number of (flits * distance) for SCs 451 452 uint32_t m_cpt_update; // Number of requests causing an UPDATE 453 uint32_t m_cpt_update_local; // Number of local UPDATE transactions 454 uint32_t m_cpt_update_remote; // Number of remote UPDATE transactions 455 uint32_t m_cpt_update_cost; // Number of (flits * distance) for UPDT 456 457 uint32_t m_cpt_minval; // Number of requests causing M_INV 458 uint32_t m_cpt_minval_local; // Number of local M_INV transactions 459 uint32_t m_cpt_minval_remote; // Number of remote M_INV transactions 460 uint32_t m_cpt_minval_cost; // Number of (flits * distance) for M_INV 461 462 uint32_t m_cpt_binval; // Number of BROADCAST INVAL 463 464 uint32_t m_cpt_cleanup_local; // Number of local CLEANUP transactions (all cleanup types) 465 uint32_t m_cpt_cleanup_remote; // Number of remote CLEANUP transactions (all cleanup types) 466 uint32_t m_cpt_cleanup_cost; // Number of (flits * distance) for CLEANUPs (all types) 469 467 470 468 // Counters not accessible by software, but tagged 471 uint32_t m_cpt_read_miss; // Number of MISS READ472 uint32_t m_cpt_write_miss; // Number of MISS WRITE473 uint32_t m_cpt_write_dirty; // Cumulated length for WRITE transactions474 uint32_t m_cpt_write_broadcast; // Number of BROADCAST INVAL because write475 476 uint32_t m_cpt_trt_rb; // Read blocked by a hit in trt477 uint32_t m_cpt_trt_full; // Transaction blocked due to a full trt469 uint32_t m_cpt_read_miss; // Number of MISS READ 470 uint32_t m_cpt_write_miss; // Number of MISS WRITE 471 uint32_t m_cpt_write_dirty; // Cumulated length for WRITE transactions 472 uint32_t m_cpt_write_broadcast; // Number of BROADCAST INVAL because write 473 474 uint32_t m_cpt_trt_rb; // Read blocked by a hit in trt 475 uint32_t m_cpt_trt_full; // Transaction blocked due to a full trt 478 476 479 477 uint32_t m_cpt_heap_unused; // NB cycles HEAP LOCK unused … … 481 479 uint32_t m_cpt_heap_min_slot_available; // NB HEAP : Min of slot available 482 480 483 uint32_t m_cpt_ncc_to_cc_read; // NB change from NCC to CC caused by a READ484 uint32_t m_cpt_ncc_to_cc_write; // NB change from NCC to CC caused by a WRITE485 486 uint32_t m_cpt_cleanup_data_local; // Number of local cleanups with data487 uint32_t m_cpt_cleanup_data_remote; // Number of remote cleanups with data488 uint32_t m_cpt_cleanup_data_cost; // Cost for cleanups with data489 490 uint32_t m_cpt_update_flits; // Number of flits for UPDATEs491 uint32_t m_cpt_inval_cost; // Number of (flits * distance) for INVALs481 uint32_t m_cpt_ncc_to_cc_read; // NB change from NCC to CC caused by a READ 482 uint32_t m_cpt_ncc_to_cc_write; // NB change from NCC to CC caused by a WRITE 483 484 uint32_t m_cpt_cleanup_data_local; // Number of local cleanups with data 485 uint32_t m_cpt_cleanup_data_remote; // Number of remote cleanups with data 486 uint32_t m_cpt_cleanup_data_cost; // Cost for cleanups with data 487 488 uint32_t m_cpt_update_flits; // Number of flits for UPDATEs 489 uint32_t m_cpt_inval_cost; // Number of (flits * distance) for INVALs 492 490 493 491 uint32_t m_cpt_get; … … 499 497 uint32_t m_cpt_upt_unused; // NB cycles UPT LOCK unused 500 498 501 // Unused502 uint32_t m_cpt_read_data_unc;503 uint32_t m_cpt_read_data_miss_CC;504 uint32_t m_cpt_read_ins_unc;505 uint32_t m_cpt_read_ins_miss;506 uint32_t m_cpt_read_ll_CC;507 uint32_t m_cpt_read_data_miss_NCC;508 uint32_t m_cpt_read_ll_NCC;509 510 499 size_t m_prev_count; 511 500 … … 515 504 516 505 public: 517 sc_in<bool> 518 sc_in<bool> 519 sc_out<bool> 520 soclib::caba::VciTarget<vci_param_int> 521 soclib::caba::VciInitiator<vci_param_ext> 522 soclib::caba::DspinInput<dspin_in_width> 523 soclib::caba::DspinOutput<dspin_out_width> 524 soclib::caba::DspinOutput<dspin_out_width> 506 sc_in<bool> p_clk; 507 sc_in<bool> p_resetn; 508 sc_out<bool> p_irq; 509 soclib::caba::VciTarget<vci_param_int> p_vci_tgt; 510 soclib::caba::VciInitiator<vci_param_ext> p_vci_ixr; 511 soclib::caba::DspinInput<dspin_in_width> p_dspin_p2m; 512 soclib::caba::DspinOutput<dspin_out_width> p_dspin_m2p; 513 soclib::caba::DspinOutput<dspin_out_width> p_dspin_clack; 525 514 526 515 #if MONITOR_MEMCACHE_FSM == 1 … … 557 546 const size_t nwords, // Number of words per line 558 547 const size_t max_copies, // max number of copies 559 const size_t heap_size =HEAP_ENTRIES,560 const size_t trt_lines =TRT_ENTRIES,561 const size_t upt_lines =UPT_ENTRIES,562 const size_t ivt_lines =IVT_ENTRIES,563 const size_t debug_start_cycle =0,564 const bool debug_ok =false );548 const size_t heap_size = HEAP_ENTRIES, 549 const size_t trt_lines = TRT_ENTRIES, 550 const size_t upt_lines = UPT_ENTRIES, 551 const size_t ivt_lines = IVT_ENTRIES, 552 const size_t debug_start_cycle = 0, 553 const bool debug_ok = false ); 565 554 566 555 ~VciMemCache(); … … 568 557 void reset_counters(); 569 558 void print_stats(bool activity_counters = true, bool stats = false); 570 void print_trace( size_t detailled = 0);559 void print_trace(size_t detailled = 0); 571 560 void cache_monitor(addr_t addr); 572 561 void start_monitor(addr_t addr, addr_t length); … … 581 570 uint32_t min_value(uint32_t old_value, uint32_t new_value); 582 571 bool is_local_req(uint32_t req_srcid); 583 int 572 int read_instrumentation(uint32_t regr, uint32_t & rdata); 584 573 585 574 // Component attributes … … 616 605 617 606 // adress masks 618 const soclib::common::AddressMaskingTable<addr_t> 619 const soclib::common::AddressMaskingTable<addr_t> 620 const soclib::common::AddressMaskingTable<addr_t> 621 const soclib::common::AddressMaskingTable<addr_t> 607 const soclib::common::AddressMaskingTable<addr_t> m_x; 608 const soclib::common::AddressMaskingTable<addr_t> m_y; 609 const soclib::common::AddressMaskingTable<addr_t> m_z; 610 const soclib::common::AddressMaskingTable<addr_t> m_nline; 622 611 623 612 // broadcast address 624 uint32_t 613 uint32_t m_broadcast_boundaries; 625 614 626 615 // configuration interface constants … … 632 621 633 622 // Fifo between TGT_CMD fsm and READ fsm 634 GenericFifo<addr_t> 635 GenericFifo<size_t> 636 GenericFifo<size_t> 637 GenericFifo<size_t> 638 GenericFifo<size_t> 623 GenericFifo<addr_t> m_cmd_read_addr_fifo; 624 GenericFifo<size_t> m_cmd_read_length_fifo; 625 GenericFifo<size_t> m_cmd_read_srcid_fifo; 626 GenericFifo<size_t> m_cmd_read_trdid_fifo; 627 GenericFifo<size_t> m_cmd_read_pktid_fifo; 639 628 640 629 // Fifo between TGT_CMD fsm and WRITE fsm 641 GenericFifo<addr_t> 642 GenericFifo<bool> 643 GenericFifo<size_t> 644 GenericFifo<size_t> 645 GenericFifo<size_t> 646 GenericFifo<data_t> 647 GenericFifo<be_t> 630 GenericFifo<addr_t> m_cmd_write_addr_fifo; 631 GenericFifo<bool> m_cmd_write_eop_fifo; 632 GenericFifo<size_t> m_cmd_write_srcid_fifo; 633 GenericFifo<size_t> m_cmd_write_trdid_fifo; 634 GenericFifo<size_t> m_cmd_write_pktid_fifo; 635 GenericFifo<data_t> m_cmd_write_data_fifo; 636 GenericFifo<be_t> m_cmd_write_be_fifo; 648 637 649 638 // Fifo between TGT_CMD fsm and CAS fsm 650 GenericFifo<addr_t> 651 GenericFifo<bool> 652 GenericFifo<size_t> 653 GenericFifo<size_t> 654 GenericFifo<size_t> 655 GenericFifo<data_t> 639 GenericFifo<addr_t> m_cmd_cas_addr_fifo; 640 GenericFifo<bool> m_cmd_cas_eop_fifo; 641 GenericFifo<size_t> m_cmd_cas_srcid_fifo; 642 GenericFifo<size_t> m_cmd_cas_trdid_fifo; 643 GenericFifo<size_t> m_cmd_cas_pktid_fifo; 644 GenericFifo<data_t> m_cmd_cas_wdata_fifo; 656 645 657 646 // Fifo between CC_RECEIVE fsm and CLEANUP fsm 658 GenericFifo<uint64_t> 647 GenericFifo<uint64_t> m_cc_receive_to_cleanup_fifo; 659 648 660 649 // Fifo between CC_RECEIVE fsm and MULTI_ACK fsm 661 GenericFifo<uint64_t> 650 GenericFifo<uint64_t> m_cc_receive_to_multi_ack_fifo; 662 651 663 652 // Buffer between TGT_CMD fsm and TGT_RSP fsm … … 678 667 ////////////////////////////////////////////////// 679 668 680 sc_signal<int> 669 sc_signal<int> r_tgt_cmd_fsm; 681 670 682 671 /////////////////////////////////////////////////////// … … 684 673 /////////////////////////////////////////////////////// 685 674 686 sc_signal<int> r_config_fsm;// FSM state687 sc_signal<bool> r_config_lock;// lock protecting exclusive access688 sc_signal<int> r_config_cmd;// config request type689 sc_signal<addr_t> r_config_address;// target buffer physical address690 sc_signal<size_t> r_config_srcid;// config request srcid691 sc_signal<size_t> r_config_trdid;// config request trdid692 sc_signal<size_t> r_config_pktid;// config request pktid693 sc_signal<size_t> r_config_cmd_lines;// number of lines to be handled694 sc_signal<size_t> r_config_rsp_lines;// number of lines not completed695 sc_signal<size_t> r_config_dir_way;// DIR: selected way696 sc_signal<bool> r_config_dir_lock;// DIR: locked entry697 sc_signal<size_t> r_config_dir_count;// DIR: number of copies698 sc_signal<bool> r_config_dir_is_cnt;// DIR: counter mode (broadcast)699 sc_signal<size_t> r_config_dir_copy_srcid;// DIR: first copy SRCID700 sc_signal<bool> r_config_dir_copy_inst;// DIR: first copy L1 type701 sc_signal<size_t> r_config_dir_ptr;// DIR: index of next copy in HEAP702 sc_signal<size_t> r_config_heap_next;// current pointer to scan HEAP703 sc_signal<size_t> r_config_trt_index;// selected entry in TRT704 sc_signal<size_t> r_config_ivt_index;// selected entry in IVT675 sc_signal<int> r_config_fsm; // FSM state 676 sc_signal<bool> r_config_lock; // lock protecting exclusive access 677 sc_signal<int> r_config_cmd; // config request type 678 sc_signal<addr_t> r_config_address; // target buffer physical address 679 sc_signal<size_t> r_config_srcid; // config request srcid 680 sc_signal<size_t> r_config_trdid; // config request trdid 681 sc_signal<size_t> r_config_pktid; // config request pktid 682 sc_signal<size_t> r_config_cmd_lines; // number of lines to be handled 683 sc_signal<size_t> r_config_rsp_lines; // number of lines not completed 684 sc_signal<size_t> r_config_dir_way; // DIR: selected way 685 sc_signal<bool> r_config_dir_lock; // DIR: locked entry 686 sc_signal<size_t> r_config_dir_count; // DIR: number of copies 687 sc_signal<bool> r_config_dir_is_cnt; // DIR: counter mode (broadcast) 688 sc_signal<size_t> r_config_dir_copy_srcid; // DIR: first copy SRCID 689 sc_signal<bool> r_config_dir_copy_inst; // DIR: first copy L1 type 690 sc_signal<size_t> r_config_dir_ptr; // DIR: index of next copy in HEAP 691 sc_signal<size_t> r_config_heap_next; // current pointer to scan HEAP 692 sc_signal<size_t> r_config_trt_index; // selected entry in TRT 693 sc_signal<size_t> r_config_ivt_index; // selected entry in IVT 705 694 706 695 // Buffer between CONFIG fsm and IXR_CMD fsm 707 sc_signal<bool> r_config_to_ixr_cmd_req;// valid request708 sc_signal<size_t> r_config_to_ixr_cmd_index;// TRT index696 sc_signal<bool> r_config_to_ixr_cmd_req; // valid request 697 sc_signal<size_t> r_config_to_ixr_cmd_index; // TRT index 709 698 710 699 // Buffer between CONFIG fsm and TGT_RSP fsm (send a done response to L1 cache) 711 sc_signal<bool> r_config_to_tgt_rsp_req;// valid request712 sc_signal<bool> r_config_to_tgt_rsp_error;// error response713 sc_signal<size_t> r_config_to_tgt_rsp_srcid;// Transaction srcid714 sc_signal<size_t> r_config_to_tgt_rsp_trdid;// Transaction trdid715 sc_signal<size_t> r_config_to_tgt_rsp_pktid;// Transaction pktid700 sc_signal<bool> r_config_to_tgt_rsp_req; // valid request 701 sc_signal<bool> r_config_to_tgt_rsp_error; // error response 702 sc_signal<size_t> r_config_to_tgt_rsp_srcid; // Transaction srcid 703 sc_signal<size_t> r_config_to_tgt_rsp_trdid; // Transaction trdid 704 sc_signal<size_t> r_config_to_tgt_rsp_pktid; // Transaction pktid 716 705 717 706 // Buffer between CONFIG fsm and CC_SEND fsm (multi-inval / broadcast-inval) 718 sc_signal<bool> r_config_to_cc_send_multi_req; 719 sc_signal<bool> r_config_to_cc_send_brdcast_req; 720 sc_signal<addr_t> r_config_to_cc_send_nline; 721 sc_signal<size_t> r_config_to_cc_send_trdid; 722 GenericFifo<bool> m_config_to_cc_send_inst_fifo; 723 GenericFifo<size_t> m_config_to_cc_send_srcid_fifo; 707 sc_signal<bool> r_config_to_cc_send_multi_req; // multi-inval request 708 sc_signal<bool> r_config_to_cc_send_brdcast_req; // broadcast-inval request 709 sc_signal<addr_t> r_config_to_cc_send_nline; // line index 710 sc_signal<size_t> r_config_to_cc_send_trdid; // UPT index 711 GenericFifo<bool> m_config_to_cc_send_inst_fifo; // fifo for the L1 type 712 GenericFifo<size_t> m_config_to_cc_send_srcid_fifo; // fifo for owners srcid 724 713 725 714 /////////////////////////////////////////////////////// … … 727 716 /////////////////////////////////////////////////////// 728 717 729 sc_signal<int> r_read_fsm; 730 sc_signal<size_t> r_read_copy; 731 sc_signal<size_t> r_read_copy_cache; 732 sc_signal<bool> r_read_copy_inst; 733 sc_signal<tag_t> r_read_tag; 734 sc_signal<bool> r_read_is_cnt; 735 sc_signal<bool> r_read_lock; 736 sc_signal<bool> r_read_dirty; 737 sc_signal<size_t> r_read_count; 738 sc_signal<size_t> r_read_ptr; 739 sc_signal<data_t> * r_read_data; 740 sc_signal<size_t> r_read_way; 741 sc_signal<size_t> r_read_trt_index; 742 sc_signal<size_t> r_read_next_ptr; 743 sc_signal<bool> r_read_last_free; 744 sc_signal<addr_t> r_read_ll_key; 718 sc_signal<int> r_read_fsm; // FSM state 719 sc_signal<size_t> r_read_copy; // Srcid of the first copy 720 sc_signal<size_t> r_read_copy_cache; // Srcid of the first copy 721 sc_signal<bool> r_read_copy_inst; // Type of the first copy 722 sc_signal<tag_t> r_read_tag; // cache line tag (in directory) 723 sc_signal<bool> r_read_is_cnt; // is_cnt bit (in directory) 724 sc_signal<bool> r_read_lock; // lock bit (in directory) 725 sc_signal<bool> r_read_dirty; // dirty bit (in directory) 726 sc_signal<size_t> r_read_count; // number of copies 727 sc_signal<size_t> r_read_ptr; // pointer to the heap 728 sc_signal<data_t> * r_read_data; // data (one cache line) 729 sc_signal<size_t> r_read_way; // associative way (in cache) 730 sc_signal<size_t> r_read_trt_index; // Transaction Table index 731 sc_signal<size_t> r_read_next_ptr; // Next entry to point to 732 sc_signal<bool> r_read_last_free; // Last free entry 733 sc_signal<addr_t> r_read_ll_key; // LL key from llsc_global_table 745 734 746 735 // Buffer between READ fsm and IXR_CMD fsm 747 sc_signal<bool> r_read_to_ixr_cmd_req; 748 sc_signal<size_t> r_read_to_ixr_cmd_index; 736 sc_signal<bool> r_read_to_ixr_cmd_req; // valid request 737 sc_signal<size_t> r_read_to_ixr_cmd_index; // TRT index 749 738 750 739 // Buffer between READ fsm and TGT_RSP fsm (send a hit read response to L1 cache) 751 sc_signal<bool> r_read_to_tgt_rsp_req; 752 sc_signal<size_t> r_read_to_tgt_rsp_srcid; 753 sc_signal<size_t> r_read_to_tgt_rsp_trdid; 754 sc_signal<size_t> r_read_to_tgt_rsp_pktid; 755 sc_signal<data_t> * r_read_to_tgt_rsp_data; 756 sc_signal<size_t> r_read_to_tgt_rsp_word; 757 sc_signal<size_t> r_read_to_tgt_rsp_length; 758 sc_signal<addr_t> r_read_to_tgt_rsp_ll_key; 740 sc_signal<bool> r_read_to_tgt_rsp_req; // valid request 741 sc_signal<size_t> r_read_to_tgt_rsp_srcid; // Transaction srcid 742 sc_signal<size_t> r_read_to_tgt_rsp_trdid; // Transaction trdid 743 sc_signal<size_t> r_read_to_tgt_rsp_pktid; // Transaction pktid 744 sc_signal<data_t> * r_read_to_tgt_rsp_data; // data (one cache line) 745 sc_signal<size_t> r_read_to_tgt_rsp_word; // first word of the response 746 sc_signal<size_t> r_read_to_tgt_rsp_length; // length of the response 747 sc_signal<addr_t> r_read_to_tgt_rsp_ll_key; // LL key from llsc_global_table 759 748 760 749 //RWT: Buffer between READ fsm and CC_SEND fsm (send inval) … … 777 766 778 767 //RWT: 779 sc_signal<bool> r_read_coherent; 768 sc_signal<bool> r_read_coherent; // State of the cache slot after transaction 780 769 sc_signal<bool> r_read_ll_done; 781 770 … … 839 828 840 829 // RWT: Buffer between WRITE fsm and CLEANUP fsm (change slot state) 841 sc_signal<bool> r_write_to_cleanup_req; 842 sc_signal<addr_t> r_write_to_cleanup_nline; 830 sc_signal<bool> r_write_to_cleanup_req; // valid request 831 sc_signal<addr_t> r_write_to_cleanup_nline; // cache line index 843 832 844 833 // RWT 845 sc_signal<bool> r_write_coherent; 834 sc_signal<bool> r_write_coherent; // cache slot state after transaction 846 835 847 836 //Buffer between WRITE fsm and CC_SEND fsm (INVAL for RWT) … … 1114 1103 1115 1104 sc_signal<bool> r_cleanup_ncc; 1116 sc_signal<bool> r_cleanup_to_ixr_cmd_ncc_l1_dirty;1117 sc_signal<bool> r_xram_rsp_to_ixr_cmd_inval_ncc_pending;1118 1105 1119 1106 sc_signal<bool> r_cleanup_to_ixr_cmd_req; -
branches/RWT/modules/vci_mem_cache/caba/source/include/xram_transaction.h
r814 r823 15 15 class TransactionTabEntry 16 16 { 17 typedef sc_dt::sc_uint<64> 18 typedef sc_dt::sc_uint<40> 19 typedef uint32_t 20 typedef uint32_t 17 typedef sc_dt::sc_uint<64> wide_data_t; 18 typedef sc_dt::sc_uint<40> addr_t; 19 typedef uint32_t data_t; 20 typedef uint32_t be_t; 21 21 22 22 public: … … 154 154 class TransactionTab 155 155 { 156 typedef sc_dt::sc_uint<64> 157 typedef sc_dt::sc_uint<40> 158 typedef uint32_t 159 typedef uint32_t 156 typedef sc_dt::sc_uint<64> wide_data_t; 157 typedef sc_dt::sc_uint<40> addr_t; 158 typedef uint32_t data_t; 159 typedef uint32_t be_t; 160 160 161 161 private: 162 const std::string tab_name; 163 size_t size_tab; 162 const std::string tab_name; // the name for logs 163 size_t size_tab; // the size of the tab 164 164 165 165 data_t be_to_mask(be_t be) 166 166 { 167 167 data_t ret = 0; 168 if ( be&0x1) {168 if (be & 0x1) { 169 169 ret = ret | 0x000000FF; 170 170 } 171 if ( be&0x2) {171 if (be & 0x2) { 172 172 ret = ret | 0x0000FF00; 173 173 } 174 if ( be&0x4) {174 if (be & 0x4) { 175 175 ret = ret | 0x00FF0000; 176 176 } 177 if ( be&0x8) {177 if (be & 0x8) { 178 178 ret = ret | 0xFF000000; 179 179 } … … 189 189 TransactionTab() 190 190 { 191 size_tab =0;192 tab =NULL;191 size_tab = 0; 192 tab = NULL; 193 193 } 194 194 … … 196 196 size_t n_entries, 197 197 size_t n_words ) 198 : tab_name( name),199 size_tab( n_entries)198 : tab_name(name), 199 size_tab(n_entries) 200 200 { 201 201 tab = new TransactionTabEntry[size_tab]; 202 for ( size_t i=0; i<size_tab; i++)202 for (size_t i = 0; i < size_tab; i++) 203 203 { 204 204 tab[i].alloc(n_words); … … 222 222 void init() 223 223 { 224 for ( size_t i=0; i<size_tab; i++)224 for (size_t i = 0; i < size_tab; i++) 225 225 { 226 226 tab[i].init(); … … 234 234 void print(const size_t index) 235 235 { 236 assert( 236 assert((index < size_tab) and 237 237 "MEMC ERROR: The selected entry is out of range in TRT write_data_mask()"); 238 238 … … 247 247 TransactionTabEntry read(const size_t index) 248 248 { 249 assert( 249 assert((index < size_tab) and 250 250 "MEMC ERROR: Invalid Transaction Tab Entry"); 251 251 … … 260 260 bool full(size_t &index) 261 261 { 262 for (size_t i=0; i<size_tab; i++)263 { 264 if (!tab[i].valid)262 for (size_t i = 0; i < size_tab; i++) 263 { 264 if (!tab[i].valid) 265 265 { 266 index =i;266 index = i; 267 267 return false; 268 268 } … … 278 278 // The function returns true if a read request has already been sent 279 279 ////////////////////////////////////////////////////////////////////// 280 bool hit_read(const addr_t nline, size_t &index)281 { 282 for (size_t i=0; i<size_tab; i++)283 { 284 if ((tab[i].valid && (nline==tab[i].nline)) && (tab[i].xram_read))280 bool hit_read(const addr_t nline, size_t &index) 281 { 282 for (size_t i = 0; i < size_tab; i++) 283 { 284 if ((tab[i].valid && (nline == tab[i].nline)) && (tab[i].xram_read)) 285 285 { 286 index =i;286 index = i; 287 287 return true; 288 288 } … … 299 299 bool hit_write(const addr_t nline) 300 300 { 301 for (size_t i=0; i<size_tab; i++)302 { 303 if(tab[i].valid && (nline ==tab[i].nline) && !(tab[i].xram_read))301 for (size_t i = 0; i < size_tab; i++) 302 { 303 if(tab[i].valid && (nline == tab[i].nline) && !(tab[i].xram_read)) 304 304 { 305 305 return true; … … 319 319 bool hit_write(const addr_t nline, size_t* index) 320 320 { 321 for (size_t i=0; i<size_tab; i++){322 if (tab[i].valid && (nline==tab[i].nline) && !(tab[i].xram_read)) {321 for (size_t i = 0; i < size_tab; i++){ 322 if (tab[i].valid && (nline == tab[i].nline) && !(tab[i].xram_read)) { 323 323 *index = i; 324 324 return true; … … 337 337 ///////////////////////////////////////////////////////////////////// 338 338 void write_data_mask(const size_t index, 339 const std::vector<be_t> &be,340 const std::vector<data_t> &data)341 { 342 assert( 339 const std::vector<be_t> &be, 340 const std::vector<data_t> &data) 341 { 342 assert((index < size_tab) and 343 343 "MEMC ERROR: The selected entry is out of range in TRT write_data_mask()"); 344 344 345 assert( (be.size()==tab[index].wdata_be.size()) and345 assert((be.size() == tab[index].wdata_be.size()) and 346 346 "MEMC ERROR: Bad be size in TRT write_data_mask()"); 347 347 348 assert( (data.size()==tab[index].wdata.size()) and348 assert((data.size() == tab[index].wdata.size()) and 349 349 "MEMC ERROR: Bad data size in TRT write_data_mask()"); 350 350 351 for (size_t i=0; i<tab[index].wdata_be.size(); i++)351 for (size_t i = 0; i < tab[index].wdata_be.size(); i++) 352 352 { 353 353 tab[index].wdata_be[i] = tab[index].wdata_be[i] | be[i]; … … 375 375 ///////////////////////////////////////////////////////////////////// 376 376 void set(const size_t index, 377 const bool xram_read,378 const addr_t nline,379 const size_t srcid,380 const size_t trdid,381 const size_t pktid,382 const bool proc_read,383 const size_t read_length,384 const size_t word_index,385 const std::vector<be_t> &data_be,386 const std::vector<data_t> &data,387 const data_t ll_key = 0,388 const bool config = false)389 { 390 assert( 377 const bool xram_read, 378 const addr_t nline, 379 const size_t srcid, 380 const size_t trdid, 381 const size_t pktid, 382 const bool proc_read, 383 const size_t read_length, 384 const size_t word_index, 385 const std::vector<be_t> &data_be, 386 const std::vector<data_t> &data, 387 const data_t ll_key = 0, 388 const bool config = false) 389 { 390 assert((index < size_tab) and 391 391 "MEMC ERROR: The selected entry is out of range in TRT set()"); 392 392 393 assert( (data_be.size()==tab[index].wdata_be.size()) and393 assert((data_be.size() == tab[index].wdata_be.size()) and 394 394 "MEMC ERROR: Bad data_be argument in TRT set()"); 395 395 396 assert( (data.size()==tab[index].wdata.size()) and396 assert((data.size() == tab[index].wdata.size()) and 397 397 "MEMC ERROR: Bad data argument in TRT set()"); 398 398 … … 408 408 tab[index].ll_key = ll_key; 409 409 tab[index].config = config; 410 for (size_t i=0; i<tab[index].wdata.size(); i++)411 { 412 tab[index].wdata_be[i] 413 tab[index].wdata[i] 410 for (size_t i = 0; i < tab[index].wdata.size(); i++) 411 { 412 tab[index].wdata_be[i] = data_be[i]; 413 tab[index].wdata[i] = data[i]; 414 414 } 415 415 } … … 429 429 const bool rerror) 430 430 { 431 data_t 432 data_t 433 434 assert( 431 data_t value; 432 data_t mask; 433 434 assert((index < size_tab) and 435 435 "MEMC ERROR: The selected entry is out of range in TRT write_rsp()"); 436 436 437 assert( 437 assert((word < tab[index].wdata_be.size()) and 438 438 "MEMC ERROR: Bad word index in TRT write_rsp()"); 439 439 440 assert( 440 assert((tab[index].valid) and 441 441 "MEMC ERROR: TRT entry not valid in TRT write_rsp()"); 442 442 443 assert( 443 assert((tab[index].xram_read ) and 444 444 "MEMC ERROR: TRT entry is not a GET in TRT write_rsp()"); 445 445 446 if ( rerror)446 if (rerror) 447 447 { 448 448 tab[index].rerror = true; … … 456 456 457 457 // second 32 bits word 458 value = (data_t)(data >>32);459 mask = be_to_mask(tab[index].wdata_be[word +1]);460 tab[index].wdata[word +1] = (tab[index].wdata[word+1] & mask) | (value & ~mask);458 value = (data_t)(data >> 32); 459 mask = be_to_mask(tab[index].wdata_be[word + 1]); 460 tab[index].wdata[word + 1] = (tab[index].wdata[word + 1] & mask) | (value & ~mask); 461 461 } 462 462 ///////////////////////////////////////////////////////////////////// … … 467 467 void erase(const size_t index) 468 468 { 469 assert( 469 assert((index < size_tab) and 470 470 "MEMC ERROR: The selected entry is out of range in TRT erase()"); 471 471 … … 480 480 bool is_config(const size_t index) 481 481 { 482 assert( 482 assert((index < size_tab) and 483 483 "MEMC ERROR: The selected entry is out of range in TRT is_config()"); 484 484 -
branches/RWT/modules/vci_mem_cache/caba/source/src/vci_mem_cache.cpp
r814 r823 1 1 /* -*- c++ -*- 2 * 3 * File : vci_mem_cache.cpp 4 * Date : 30/10/2008 5 * Copyright : UPMC / LIP6 6 * Authors : Alain Greiner / Eric Guthmuller 7 * 8 * SOCLIB_LGPL_HEADER_BEGIN 9 * 10 * This file is part of SoCLib, GNU LGPLv2.1. 11 * 12 * SoCLib is free software; you can redistribute it and/or modify it 13 * under the terms of the GNU Lesser General Public License as published 14 * by the Free Software Foundation; version 2.1 of the License. 15 * 16 * SoCLib is distributed in the hope that it will be useful, but 17 * WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 19 * Lesser General Public License for more details. 20 * 21 * You should have received a copy of the GNU Lesser General Public 22 * License along with SoCLib; if not, write to the Free Software 23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 24 * 02110-1301 USA 25 * 26 * SOCLIB_LGPL_HEADER_END 27 * 28 * Maintainers: alain.greiner@lip6.fr 29 * eric.guthmuller@polytechnique.edu 30 * cesar.fuguet-tortolero@lip6.fr 31 * alexandre.joannou@lip6.fr 32 */ 2 * 3 * File : vci_mem_cache.cpp 4 * Date : 30/10/2008 5 * Copyright : UPMC / LIP6 6 * Authors : Alain Greiner / Eric Guthmuller 7 * 8 * SOCLIB_LGPL_HEADER_BEGIN 9 * 10 * This file is part of SoCLib, GNU LGPLv2.1. 11 * 12 * SoCLib is free software; you can redistribute it and/or modify it 13 * under the terms of the GNU Lesser General Public License as published 14 * by the Free Software Foundation; version 2.1 of the License. 15 * 16 * SoCLib is distributed in the hope that it will be useful, but 17 * WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 19 * Lesser General Public License for more details. 20 * 21 * You should have received a copy of the GNU Lesser General Public 22 * License along with SoCLib; if not, write to the Free Software 23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 24 * 02110-1301 USA 25 * 26 * SOCLIB_LGPL_HEADER_END 27 * 28 * Maintainers: alain.greiner@lip6.fr 29 * eric.guthmuller@polytechnique.edu 30 * cesar.fuguet-tortolero@lip6.fr 31 * alexandre.joannou@lip6.fr 32 */ 33 33 34 34 35 #include "../include/vci_mem_cache.h" 36 #include "mem_cache.h" 35 37 36 38 ////// debug services ///////////////////////////////////////////////////////////// … … 96 98 "CC_RECEIVE_IDLE", 97 99 "CC_RECEIVE_CLEANUP", 98 "CC_RECEIVE_CLEANUP_EOP",99 100 "CC_RECEIVE_MULTI_ACK" 100 101 }; … … 203 204 { 204 205 "IXR_RSP_IDLE", 205 "IXR_RSP_ACK",206 206 "IXR_RSP_TRT_ERASE", 207 207 "IXR_RSP_TRT_READ" … … 355 355 356 356 tmpl(/**/) ::VciMemCache( 357 sc_module_name 358 const MappingTable &mtp,// mapping table for direct network359 const MappingTable &mtx,// mapping table for external network360 const IntTab &srcid_x,// global index on external network361 const IntTab &tgtid_d,// global index on direct network362 const size_t x_width,// number of x bits in platform363 const size_t y_width,// number of x bits in platform364 const size_t nways,// number of ways per set365 const size_t nsets,// number of associative sets366 const size_t nwords,// number of words in cache line367 const size_t max_copies,// max number of copies in heap368 const size_t heap_size,// number of heap entries369 const size_t trt_lines,// number of TRT entries370 const size_t upt_lines,// number of UPT entries371 const size_t ivt_lines,// number of IVT entries372 const size_t 373 const bool 357 sc_module_name name, 358 const MappingTable &mtp, // mapping table for direct network 359 const MappingTable &mtx, // mapping table for external network 360 const IntTab &srcid_x, // global index on external network 361 const IntTab &tgtid_d, // global index on direct network 362 const size_t x_width, // number of x bits in platform 363 const size_t y_width, // number of x bits in platform 364 const size_t nways, // number of ways per set 365 const size_t nsets, // number of associative sets 366 const size_t nwords, // number of words in cache line 367 const size_t max_copies, // max number of copies in heap 368 const size_t heap_size, // number of heap entries 369 const size_t trt_lines, // number of TRT entries 370 const size_t upt_lines, // number of UPT entries 371 const size_t ivt_lines, // number of IVT entries 372 const size_t debug_start_cycle, 373 const bool debug_ok) 374 374 375 375 : soclib::caba::BaseModule(name), 376 376 377 p_clk( "p_clk"),378 p_resetn( "p_resetn"),379 p_irq( "p_irq"),380 p_vci_tgt( "p_vci_tgt"),381 p_vci_ixr( "p_vci_ixr"),382 p_dspin_p2m( "p_dspin_p2m"),383 p_dspin_m2p( "p_dspin_m2p"),384 p_dspin_clack( "p_dspin_clack"),377 p_clk("p_clk"), 378 p_resetn("p_resetn"), 379 p_irq("p_irq"), 380 p_vci_tgt("p_vci_tgt"), 381 p_vci_ixr("p_vci_ixr"), 382 p_dspin_p2m("p_dspin_p2m"), 383 p_dspin_m2p("p_dspin_m2p"), 384 p_dspin_clack("p_dspin_clack"), 385 385 386 386 m_seglist(mtp.getSegmentList(tgtid_d)), 387 387 m_nseg(0), 388 m_srcid_x( 388 m_srcid_x(mtx.indexForId(srcid_x)), 389 389 m_initiators(1 << vci_param_int::S), 390 390 m_heap_size(heap_size), … … 394 394 m_x_width(x_width), 395 395 m_y_width(y_width), 396 m_debug_start_cycle( debug_start_cycle),397 m_debug_ok( debug_ok),396 m_debug_start_cycle(debug_start_cycle), 397 m_debug_ok(debug_ok), 398 398 m_trt_lines(trt_lines), 399 399 m_trt(this->name(), trt_lines, nwords), … … 404 404 m_cache_data(nways, nsets, nwords), 405 405 m_heap(m_heap_size), 406 m_max_copies( max_copies),406 m_max_copies(max_copies), 407 407 m_llsc_table(), 408 408 … … 419 419 420 420 // CONFIG interface 421 m_config_addr_mask((1 <<12)-1),421 m_config_addr_mask((1 << 12) - 1), 422 422 423 423 m_config_regr_width(7), 424 424 m_config_func_width(3), 425 m_config_regr_idx_mask((1 <<m_config_regr_width)-1),426 m_config_func_idx_mask((1 <<m_config_func_width)-1),425 m_config_regr_idx_mask((1 << m_config_regr_width) - 1), 426 m_config_func_idx_mask((1 << m_config_func_width) - 1), 427 427 428 428 // FIFOs … … 453 453 r_tgt_cmd_fsm("r_tgt_cmd_fsm"), 454 454 455 r_config_fsm( "r_config_fsm"),456 457 m_config_to_cc_send_inst_fifo( "m_config_to_cc_send_inst_fifo", 8),458 m_config_to_cc_send_srcid_fifo( "m_config_to_cc_send_srcid_fifo", 8),459 460 r_read_fsm( "r_read_fsm"),461 462 r_write_fsm( "r_write_fsm"),455 r_config_fsm("r_config_fsm"), 456 457 m_config_to_cc_send_inst_fifo("m_config_to_cc_send_inst_fifo", 8), 458 m_config_to_cc_send_srcid_fifo("m_config_to_cc_send_srcid_fifo", 8), 459 460 r_read_fsm("r_read_fsm"), 461 462 r_write_fsm("r_write_fsm"), 463 463 464 464 m_write_to_cc_send_inst_fifo("m_write_to_cc_send_inst_fifo",8), … … 495 495 r_alloc_heap_reset_cpt("r_alloc_heap_reset_cpt") 496 496 #if MONITOR_MEMCACHE_FSM == 1 497 497 , 498 498 p_read_fsm("p_read_fsm"), 499 499 p_write_fsm("p_write_fsm"), … … 530 530 531 531 // check internal and external data width 532 assert( (vci_param_int::B == 4) and532 assert((vci_param_int::B == 4) and 533 533 "MEMC ERROR : VCI internal data width must be 32 bits"); 534 534 535 assert( 535 assert((vci_param_ext::B == 8) and 536 536 "MEMC ERROR : VCI external data width must be 64 bits"); 537 537 538 538 // Check coherence between internal & external addresses 539 assert( 539 assert((vci_param_int::N == vci_param_ext::N) and 540 540 "MEMC ERROR : VCI internal & external addresses must have the same width"); 541 541 … … 544 544 size_t i = 0; 545 545 546 for (seg = m_seglist.begin(); seg != m_seglist.end(); seg++)546 for (seg = m_seglist.begin(); seg != m_seglist.end(); seg++) 547 547 { 548 548 std::cout << " => segment " << seg->name() … … 552 552 } 553 553 554 assert( 554 assert((m_nseg > 0) and 555 555 "MEMC ERROR : At least one segment must be mapped to this component"); 556 556 557 557 m_seg = new soclib::common::Segment*[m_nseg]; 558 558 559 for (seg = m_seglist.begin() ; seg != m_seglist.end(); seg++)560 { 561 if ( seg->special()) m_seg_config = i;559 for (seg = m_seglist.begin(); seg != m_seglist.end(); seg++) 560 { 561 if (seg->special()) m_seg_config = i; 562 562 m_seg[i] = & (*seg); 563 563 i++; … … 574 574 r_xram_rsp_victim_data = new sc_signal<data_t>[nwords]; 575 575 r_xram_rsp_to_tgt_rsp_data = new sc_signal<data_t>[nwords]; 576 //r_xram_rsp_to_ixr_cmd_data = new sc_signal<data_t>[nwords];577 576 578 577 // Allocation for READ FSM … … 585 584 r_write_to_cc_send_data = new sc_signal<data_t>[nwords]; 586 585 r_write_to_cc_send_be = new sc_signal<be_t>[nwords]; 587 //r_write_to_ixr_cmd_data = new sc_signal<data_t>[nwords];588 586 589 587 // Allocation for CAS FSM 590 //r_cas_to_ixr_cmd_data = new sc_signal<data_t>[nwords];591 588 r_cas_data = new sc_signal<data_t>[nwords]; 592 589 r_cas_rdata = new sc_signal<data_t>[2]; … … 624 621 bool data_change = false; 625 622 626 if ( entry.valid)623 if (entry.valid) 627 624 { 628 for ( size_t word = 0 ; word<m_words ; word++)625 for (size_t word = 0; word < m_words; word++) 629 626 { 630 627 m_debug_data[word] = m_cache_data.read(way, set, word); 631 if ( 632 (m_debug_data[word] != m_debug_previous_data[word]))628 if (m_debug_previous_valid and 629 (m_debug_data[word] != m_debug_previous_data[word])) 633 630 { 634 631 data_change = true; … … 638 635 639 636 // print values if any change 640 if ( 641 642 (entry.valid and (entry.dirty != m_debug_previous_dirty)) or data_change)637 if ((entry.valid != m_debug_previous_valid) or 638 (entry.valid and (entry.count != m_debug_previous_count)) or 639 (entry.valid and (entry.dirty != m_debug_previous_dirty)) or data_change) 643 640 { 644 641 std::cout << "Monitor MEMC " << name() … … 672 669 m_debug_previous_valid = entry.valid; 673 670 m_debug_previous_dirty = entry.dirty; 674 for ( size_t word=0 ; word<m_words ; word++)671 for (size_t word = 0; word < m_words; word++) 675 672 m_debug_previous_data[word] = m_debug_data[word]; 676 673 } … … 814 811 815 812 ////////////////////////////////////////////////// 816 tmpl(void)::print_trace( size_t detailed)813 tmpl(void)::print_trace(size_t detailed) 817 814 ////////////////////////////////////////////////// 818 815 { … … 837 834 << " | " << alloc_heap_fsm_str[r_alloc_heap_fsm.read()] << std::endl; 838 835 839 if ( detailed) m_trt.print(0);836 if (detailed) m_trt.print(0); 840 837 } 841 838 … … 910 907 std::cout << "*** MEM_CACHE " << name() << std::endl; 911 908 std::cout << "**********************************" << std::dec << std::endl; 912 if (activity_counters) { 909 if (activity_counters) 910 { 913 911 std::cout << "----------------------------------" << std::dec << std::endl; 914 912 std::cout << "--- Activity Counters ---" << std::dec << std::endl; … … 1026 1024 1027 1025 // RESET 1028 if (!p_resetn.read())1026 if (!p_resetn.read()) 1029 1027 { 1030 1028 … … 1075 1073 m_cmd_write_data_fifo.init(); 1076 1074 1077 m_cmd_cas_addr_fifo.init() 1078 m_cmd_cas_srcid_fifo.init() 1079 m_cmd_cas_trdid_fifo.init() 1080 m_cmd_cas_pktid_fifo.init() 1081 m_cmd_cas_wdata_fifo.init() 1082 m_cmd_cas_eop_fifo.init() 1075 m_cmd_cas_addr_fifo.init(); 1076 m_cmd_cas_srcid_fifo.init(); 1077 m_cmd_cas_trdid_fifo.init(); 1078 m_cmd_cas_pktid_fifo.init(); 1079 m_cmd_cas_wdata_fifo.init(); 1080 m_cmd_cas_eop_fifo.init(); 1083 1081 1084 1082 r_config_cmd = MEMC_CMD_NOP; … … 1095 1093 r_read_to_cleanup_req = false; 1096 1094 1097 r_write_to_tgt_rsp_req 1098 r_write_to_ixr_cmd_req 1099 r_write_to_cc_send_multi_req 1100 r_write_to_cc_send_brdcast_req 1101 r_write_to_multi_ack_req 1095 r_write_to_tgt_rsp_req = false; 1096 r_write_to_ixr_cmd_req = false; 1097 r_write_to_cc_send_multi_req = false; 1098 r_write_to_cc_send_brdcast_req = false; 1099 r_write_to_multi_ack_req = false; 1102 1100 1103 1101 m_write_to_cc_send_inst_fifo.init(); … … 1108 1106 m_cc_receive_to_cleanup_fifo.init(); 1109 1107 1110 r_multi_ack_to_tgt_rsp_req 1108 r_multi_ack_to_tgt_rsp_req = false; 1111 1109 1112 1110 m_cc_receive_to_multi_ack_fifo.init(); … … 1121 1119 m_cas_to_cc_send_inst_fifo.init(); 1122 1120 m_cas_to_cc_send_srcid_fifo.init(); 1123 #if L1_MULTI_CACHE 1124 m_cas_to_cc_send_cache_id_fifo.init(); 1125 #endif 1126 1127 for(size_t i=0; i<m_trt_lines ; i++) 1121 1122 for (size_t i = 0; i < m_trt_lines; i++) 1128 1123 { 1129 1124 r_ixr_rsp_to_xram_rsp_rok[i] = false; … … 1152 1147 r_cleanup_contains_data = false; 1153 1148 r_cleanup_ncc = false; 1154 r_cleanup_to_ixr_cmd_ncc_l1_dirty = false;1155 r_xram_rsp_to_ixr_cmd_inval_ncc_pending = false;1156 1149 r_cleanup_to_ixr_cmd_req = false; 1157 1150 r_cleanup_to_ixr_cmd_srcid = 0; … … 1166 1159 1167 1160 // Activity counters 1161 m_cpt_cycles = 0; 1168 1162 m_cpt_reset_count = 0; 1169 m_cpt_cycles = 0;1170 1163 m_cpt_read_local = 0; 1171 1164 m_cpt_read_remote = 0; … … 1220 1213 m_cpt_put = 0; 1221 1214 1222 m_cpt_ncc_to_cc_read = 0; 1223 m_cpt_ncc_to_cc_write = 0; 1215 m_cpt_ncc_to_cc_read = 0; 1216 m_cpt_ncc_to_cc_write = 0; 1217 1218 m_cpt_dir_unused = 0; 1219 m_cpt_upt_unused = 0; 1220 m_cpt_ivt_unused = 0; 1221 m_cpt_heap_unused = 0; 1222 m_cpt_trt_unused = 0; 1224 1223 1225 1224 m_cpt_heap_min_slot_available = m_heap_size; 1226 1225 m_cpt_heap_slot_available = m_heap_size; 1227 1226 1228 m_cpt_dir_unused = 0;1229 m_cpt_upt_unused = 0;1230 m_cpt_ivt_unused = 0;1231 m_cpt_heap_unused = 0;1232 m_cpt_trt_unused = 0;1233 1234 // Unused1235 m_cpt_read_data_unc = 0;1236 m_cpt_read_data_miss_CC = 0;1237 m_cpt_read_ins_unc = 0;1238 m_cpt_read_ins_miss = 0;1239 m_cpt_read_ll_CC = 0;1240 m_cpt_read_data_miss_NCC = 0;1241 m_cpt_read_ll_NCC = 0;1242 1243 1227 return; 1244 1228 } 1245 1229 1246 bool 1247 bool 1248 1249 bool 1250 bool 1251 1252 bool 1253 bool 1254 1255 bool 1256 bool 1257 1258 bool 1259 bool 1260 1261 bool 1262 bool 1263 bool 1264 size_t 1265 1266 bool 1267 bool 1268 bool 1269 size_t 1230 bool cmd_read_fifo_put = false; 1231 bool cmd_read_fifo_get = false; 1232 1233 bool cmd_write_fifo_put = false; 1234 bool cmd_write_fifo_get = false; 1235 1236 bool cmd_cas_fifo_put = false; 1237 bool cmd_cas_fifo_get = false; 1238 1239 bool cc_receive_to_cleanup_fifo_get = false; 1240 bool cc_receive_to_cleanup_fifo_put = false; 1241 1242 bool cc_receive_to_multi_ack_fifo_get = false; 1243 bool cc_receive_to_multi_ack_fifo_put = false; 1244 1245 bool write_to_cc_send_fifo_put = false; 1246 bool write_to_cc_send_fifo_get = false; 1247 bool write_to_cc_send_fifo_inst = false; 1248 size_t write_to_cc_send_fifo_srcid = 0; 1249 1250 bool xram_rsp_to_cc_send_fifo_put = false; 1251 bool xram_rsp_to_cc_send_fifo_get = false; 1252 bool xram_rsp_to_cc_send_fifo_inst = false; 1253 size_t xram_rsp_to_cc_send_fifo_srcid = 0; 1270 1254 1271 1255 bool config_rsp_lines_incr = false; … … 1273 1257 bool config_rsp_lines_ixr_rsp_decr = false; 1274 1258 1275 bool 1276 bool 1277 bool 1278 size_t 1279 1280 bool 1281 bool 1282 bool 1283 size_t 1259 bool config_to_cc_send_fifo_put = false; 1260 bool config_to_cc_send_fifo_get = false; 1261 bool config_to_cc_send_fifo_inst = false; 1262 size_t config_to_cc_send_fifo_srcid = 0; 1263 1264 bool cas_to_cc_send_fifo_put = false; 1265 bool cas_to_cc_send_fifo_get = false; 1266 bool cas_to_cc_send_fifo_inst = false; 1267 size_t cas_to_cc_send_fifo_srcid = 0; 1284 1268 1285 1269 m_debug = (m_cpt_cycles > m_debug_start_cycle) and m_debug_ok; 1286 1270 1287 1271 #if DEBUG_MEMC_GLOBAL 1288 if (m_debug)1272 if (m_debug) 1289 1273 { 1290 1274 std::cout … … 1336 1320 ////////////////// 1337 1321 case TGT_CMD_IDLE: // waiting a VCI command (RAM or CONFIG) 1338 if(p_vci_tgt.cmdval) 1339 { 1340 1341 1322 if (p_vci_tgt.cmdval) 1323 { 1342 1324 #if DEBUG_MEMC_TGT_CMD 1343 if(m_debug) 1325 if (m_debug) 1326 { 1344 1327 std::cout << " <MEMC " << name() 1345 1328 << " TGT_CMD_IDLE> Receive command from srcid " 1346 1329 << std::hex << p_vci_tgt.srcid.read() 1347 1330 << " / address " << std::hex << p_vci_tgt.address.read() << std::endl; 1331 } 1348 1332 #endif 1349 1333 // checking segmentation violation … … 1352 1336 bool config = false; 1353 1337 1354 for (size_t seg_id = 0; (seg_id < m_nseg); seg_id++)1338 for (size_t seg_id = 0; seg_id < m_nseg; seg_id++) 1355 1339 { 1356 1340 if (m_seg[seg_id]->contains(address) && 1357 m_seg[seg_id]->contains(address + plen - vci_param_int::B) 1341 m_seg[seg_id]->contains(address + plen - vci_param_int::B)) 1358 1342 { 1359 if ( m_seg[seg_id]->special()) config = true;1343 if (m_seg[seg_id]->special()) config = true; 1360 1344 } 1361 1345 } 1362 1346 1363 if (config) 1347 if (config) /////////// configuration command 1364 1348 { 1365 1349 if (!p_vci_tgt.eop.read()) r_tgt_cmd_fsm = TGT_CMD_ERROR; 1366 1350 else r_tgt_cmd_fsm = TGT_CMD_CONFIG; 1367 1351 } 1368 else 1369 { 1370 if ( p_vci_tgt.cmd.read() == vci_param_int::CMD_READ)1352 else //////////// memory access 1353 { 1354 if (p_vci_tgt.cmd.read() == vci_param_int::CMD_READ) 1371 1355 { 1372 1356 // check that the pktid is either : … … 1377 1361 // ==> bit2 must be zero with the TSAR encoding 1378 1362 // ==> mask = 0b0100 = 0x4 1379 assert( 1363 assert(((p_vci_tgt.pktid.read() & 0x4) == 0x0) and 1380 1364 "The type specified in the pktid field is incompatible with the READ CMD"); 1381 1365 r_tgt_cmd_fsm = TGT_CMD_READ; 1382 1366 } 1383 else if (p_vci_tgt.cmd.read() == vci_param_int::CMD_WRITE)1367 else if (p_vci_tgt.cmd.read() == vci_param_int::CMD_WRITE) 1384 1368 { 1385 1369 // check that the pktid is TYPE_WRITE … … 1390 1374 r_tgt_cmd_fsm = TGT_CMD_WRITE; 1391 1375 } 1392 else if (p_vci_tgt.cmd.read() == vci_param_int::CMD_LOCKED_READ)1376 else if (p_vci_tgt.cmd.read() == vci_param_int::CMD_LOCKED_READ) 1393 1377 { 1394 1378 // check that the pktid is TYPE_LL … … 1399 1383 r_tgt_cmd_fsm = TGT_CMD_READ; 1400 1384 } 1401 else if (p_vci_tgt.cmd.read() == vci_param_int::CMD_NOP)1385 else if (p_vci_tgt.cmd.read() == vci_param_int::CMD_NOP) 1402 1386 { 1403 1387 // check that the pktid is either : … … 1410 1394 "The type specified in the pktid field is incompatible with the NOP CMD"); 1411 1395 1412 if((p_vci_tgt.pktid.read() & 0x7) == TYPE_CAS) r_tgt_cmd_fsm = TGT_CMD_CAS; 1413 else r_tgt_cmd_fsm = TGT_CMD_WRITE; 1396 if ((p_vci_tgt.pktid.read() & 0x7) == TYPE_CAS) 1397 { 1398 r_tgt_cmd_fsm = TGT_CMD_CAS; 1399 } 1400 else 1401 { 1402 r_tgt_cmd_fsm = TGT_CMD_WRITE; 1403 } 1414 1404 } 1415 1405 else … … 1425 1415 1426 1416 // wait if pending request 1427 if (r_tgt_cmd_to_tgt_rsp_req.read()) break;1417 if (r_tgt_cmd_to_tgt_rsp_req.read()) break; 1428 1418 1429 1419 // consume all the command packet flits before sending response error 1430 if ( p_vci_tgt.cmdval and p_vci_tgt.eop)1420 if (p_vci_tgt.cmdval and p_vci_tgt.eop) 1431 1421 { 1432 1422 r_tgt_cmd_to_tgt_rsp_srcid = p_vci_tgt.srcid.read(); … … 1438 1428 1439 1429 #if DEBUG_MEMC_TGT_CMD 1440 if(m_debug) 1430 if (m_debug) 1431 { 1441 1432 std::cout << " <MEMC " << name() 1442 1433 << " TGT_CMD_ERROR> Segmentation violation:" … … 1446 1437 << " / pktid = " << p_vci_tgt.pktid.read() 1447 1438 << " / plen = " << std::dec << p_vci_tgt.plen.read() << std::endl; 1448 #endif 1449 1439 } 1440 #endif 1450 1441 } 1451 1442 break; … … 1453 1444 //////////////////// 1454 1445 case TGT_CMD_CONFIG: // execute config request and return response 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 // Y : SUBTYPE ( LOCAL, REMOTE, OTHER )//1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 addr_t addr_lsb = p_vci_tgt.address.read() &1501 m_config_addr_mask; 1502 1503 addr_t cell = (addr_lsb / vci_param_int::B); 1504 1505 size_t regr = cell & 1506 m_config_regr_idx_mask;1507 1508 size_t func = (cell >> m_config_regr_width) &1509 m_config_func_idx_mask;1510 1511 bool need_rsp;1512 int error; 1513 uint32_t rdata = 0; // default value1514 uint32_t wdata = p_vci_tgt.wdata.read();1515 1516 switch(func)1517 { 1518 // memory operation1519 case MEMC_CONFIG:1446 { 1447 /////////////////////////////////////////////////////////// 1448 // Decoding CONFIG interface commands // 1449 // // 1450 // VCI ADDRESS // 1451 // ================================================ // 1452 // GLOBAL | LOCAL | ... | FUNC_IDX | REGS_IDX | 00 // 1453 // IDX | IDX | | (3 bits) | (7 bits) | // 1454 // ================================================ // 1455 // // 1456 // For instrumentation : FUNC_IDX = 0b001 // 1457 // // 1458 // REGS_IDX // 1459 // ============================================ // 1460 // Z | Y | X | W // 1461 // (1 bit) | (2 bits) | (3 bits) | (1 bit) // 1462 // ============================================ // 1463 // // 1464 // Z : DIRECT / COHERENCE // 1465 // Y : SUBTYPE (LOCAL, REMOTE, OTHER) // 1466 // X : REGISTER INDEX // 1467 // W : HI / LO // 1468 // // 1469 // For configuration: FUNC_IDX = 0b000 // 1470 // // 1471 // REGS_IDX // 1472 // ============================================ // 1473 // RESERVED | X | // 1474 // (4 bits) | (3 bits) | // 1475 // ============================================ // 1476 // // 1477 // X : REGISTER INDEX // 1478 // // 1479 // For WRITE MISS error signaling: FUNC = 0x010 // 1480 // // 1481 // REGS_IDX // 1482 // ============================================ // 1483 // RESERVED | X | // 1484 // (4 bits) | (3 bits) | // 1485 // ============================================ // 1486 // // 1487 // X : REGISTER INDEX // 1488 // // 1489 /////////////////////////////////////////////////////////// 1490 1491 addr_t addr_lsb = p_vci_tgt.address.read() & m_config_addr_mask; 1492 1493 addr_t cell = (addr_lsb / vci_param_int::B); 1494 1495 size_t regr = cell & m_config_regr_idx_mask; 1496 1497 size_t func = (cell >> m_config_regr_width) & m_config_func_idx_mask; 1498 1499 bool need_rsp; 1500 int error; 1501 uint32_t rdata = 0; // default value 1502 uint32_t wdata = p_vci_tgt.wdata.read(); 1503 1504 switch (func) 1505 { 1506 // memory operation 1507 case MEMC_CONFIG: 1508 { 1509 if ((p_vci_tgt.cmd.read() == vci_param_int::CMD_READ) // get lock 1510 and (regr == MEMC_LOCK)) 1520 1511 { 1521 if ((p_vci_tgt.cmd.read() == vci_param_int::CMD_READ) // get lock 1522 and (regr == MEMC_LOCK)) 1523 { 1524 rdata = (uint32_t) r_config_lock.read(); 1525 need_rsp = true; 1526 error = 0; 1527 r_config_lock = true; 1528 } 1529 else if ((p_vci_tgt.cmd.read() == vci_param_int::CMD_WRITE) // release lock 1530 and (regr == MEMC_LOCK)) 1531 { 1532 need_rsp = true; 1533 error = 0; 1534 r_config_lock = false; 1535 } 1536 else if ((p_vci_tgt.cmd.read() == vci_param_int::CMD_WRITE) // set addr_lo 1537 and (regr == MEMC_ADDR_LO)) 1538 { 1539 assert( ((wdata % (m_words * vci_param_int::B)) == 0) and 1540 "VCI_MEM_CACHE CONFIG ERROR: The buffer must be aligned on a cache line"); 1541 1542 need_rsp = true; 1543 error = 0; 1544 r_config_address = (r_config_address.read() & 0xFFFFFFFF00000000LL) | 1545 ((addr_t)wdata); 1546 } 1547 else if ((p_vci_tgt.cmd.read() == vci_param_int::CMD_WRITE) // set addr_hi 1548 and (regr == MEMC_ADDR_HI)) 1549 1550 { 1551 need_rsp = true; 1552 error = 0; 1553 r_config_address = (r_config_address.read() & 0x00000000FFFFFFFFLL) | 1554 (((addr_t) wdata) << 32); 1555 } 1556 else if ((p_vci_tgt.cmd.read() == vci_param_int::CMD_WRITE) // set buf_lines 1557 and (regr == MEMC_BUF_LENGTH)) 1558 { 1559 need_rsp = true; 1560 error = 0; 1561 size_t lines = wdata / (m_words << 2); 1562 if (wdata % (m_words << 2)) lines++; 1563 r_config_cmd_lines = lines; 1564 r_config_rsp_lines = 0; 1565 } 1566 else if ((p_vci_tgt.cmd.read() == vci_param_int::CMD_WRITE) // set cmd type 1567 and (regr == MEMC_CMD_TYPE)) 1568 { 1569 need_rsp = false; 1570 error = 0; 1571 r_config_cmd = wdata; 1572 1573 // prepare delayed response from CONFIG FSM 1574 r_config_srcid = p_vci_tgt.srcid.read(); 1575 r_config_trdid = p_vci_tgt.trdid.read(); 1576 r_config_pktid = p_vci_tgt.pktid.read(); 1577 } 1578 else 1579 { 1580 need_rsp = true; 1581 error = 1; 1582 } 1583 1584 break; 1512 rdata = (uint32_t) r_config_lock.read(); 1513 need_rsp = true; 1514 error = 0; 1515 r_config_lock = true; 1585 1516 } 1586 1587 // instrumentation registers 1588 case MEMC_INSTRM: 1589 { 1590 need_rsp = true; 1591 1592 if (p_vci_tgt.cmd.read() == vci_param_int::CMD_READ) 1593 { 1594 error = read_instrumentation(regr, rdata); 1595 } 1596 else 1597 { 1598 error = 1; 1599 } 1600 1601 break; 1602 } 1603 1604 // xram GET bus error registers 1605 case MEMC_RERROR: 1517 else if ((p_vci_tgt.cmd.read() == vci_param_int::CMD_WRITE) // release lock 1518 and (regr == MEMC_LOCK)) 1606 1519 { 1607 1520 need_rsp = true; 1608 1521 error = 0; 1609 1610 if (p_vci_tgt.cmd.read() == vci_param_int::CMD_WRITE) 1611 { 1612 switch (regr) 1613 { 1614 case MEMC_RERROR_IRQ_ENABLE: 1615 r_xram_rsp_rerror_irq_enable = 1616 (p_vci_tgt.wdata.read() != 0); 1617 1618 break; 1619 1620 default: 1621 error = 1; 1622 break; 1623 } 1624 } 1625 else if (p_vci_tgt.cmd.read() == vci_param_int::CMD_READ) 1626 { 1627 switch (regr) 1628 { 1629 case MEMC_RERROR_SRCID: 1630 rdata = (uint32_t) 1631 r_xram_rsp_rerror_rsrcid.read(); 1632 1633 break; 1634 1635 case MEMC_RERROR_ADDR_LO: 1636 rdata = (uint32_t) 1637 (r_xram_rsp_rerror_address.read()) & 1638 ((1ULL<<32)-1); 1639 1640 break; 1641 1642 case MEMC_RERROR_ADDR_HI: 1643 rdata = (uint32_t) 1644 (r_xram_rsp_rerror_address.read() >> 32) & 1645 ((1ULL<<32)-1); 1646 1647 break; 1648 1649 case MEMC_RERROR_IRQ_RESET: 1650 if (not r_xram_rsp_rerror_irq.read()) break; 1651 1652 r_xram_rsp_rerror_irq = false; 1653 1654 break; 1655 1656 case MEMC_RERROR_IRQ_ENABLE: 1657 rdata = (uint32_t) 1658 (r_xram_rsp_rerror_irq_enable.read()) ? 1 : 0; 1659 1660 break; 1661 1662 default: 1663 error = 1; 1664 break; 1665 } 1666 } 1667 else 1668 { 1669 error = 1; 1670 } 1671 1672 break; 1522 r_config_lock = false; 1673 1523 } 1674 1675 //unknown function 1676 default: 1524 else if ((p_vci_tgt.cmd.read() == vci_param_int::CMD_WRITE) // set addr_lo 1525 and (regr == MEMC_ADDR_LO)) 1526 { 1527 assert(((wdata % (m_words * vci_param_int::B)) == 0) and 1528 "VCI_MEM_CACHE CONFIG ERROR: The buffer must be aligned on a cache line"); 1529 1530 need_rsp = true; 1531 error = 0; 1532 r_config_address = (r_config_address.read() & 0xFFFFFFFF00000000LL) | 1533 ((addr_t)wdata); 1534 } 1535 else if ((p_vci_tgt.cmd.read() == vci_param_int::CMD_WRITE) // set addr_hi 1536 and (regr == MEMC_ADDR_HI)) 1537 1538 { 1539 need_rsp = true; 1540 error = 0; 1541 r_config_address = (r_config_address.read() & 0x00000000FFFFFFFFLL) | 1542 (((addr_t) wdata) << 32); 1543 } 1544 else if ((p_vci_tgt.cmd.read() == vci_param_int::CMD_WRITE) // set buf_lines 1545 and (regr == MEMC_BUF_LENGTH)) 1546 { 1547 need_rsp = true; 1548 error = 0; 1549 size_t lines = wdata / (m_words << 2); 1550 if (wdata % (m_words << 2)) lines++; 1551 r_config_cmd_lines = lines; 1552 r_config_rsp_lines = 0; 1553 } 1554 else if ((p_vci_tgt.cmd.read() == vci_param_int::CMD_WRITE) // set cmd type 1555 and (regr == MEMC_CMD_TYPE)) 1556 { 1557 need_rsp = false; 1558 error = 0; 1559 r_config_cmd = wdata; 1560 1561 // prepare delayed response from CONFIG FSM 1562 r_config_srcid = p_vci_tgt.srcid.read(); 1563 r_config_trdid = p_vci_tgt.trdid.read(); 1564 r_config_pktid = p_vci_tgt.pktid.read(); 1565 } 1566 else 1677 1567 { 1678 1568 need_rsp = true; 1679 1569 error = 1; 1680 1681 break;1682 1570 } 1683 } 1684 1685 if (need_rsp) 1686 { 1687 // blocked if previous pending request to TGT_RSP FSM 1688 if (r_tgt_cmd_to_tgt_rsp_req.read()) break; 1689 1690 r_tgt_cmd_to_tgt_rsp_srcid = p_vci_tgt.srcid.read(); 1691 r_tgt_cmd_to_tgt_rsp_trdid = p_vci_tgt.trdid.read(); 1692 r_tgt_cmd_to_tgt_rsp_pktid = p_vci_tgt.pktid.read(); 1693 r_tgt_cmd_to_tgt_rsp_req = true; 1694 r_tgt_cmd_to_tgt_rsp_error = error; 1695 r_tgt_cmd_to_tgt_rsp_rdata = rdata; 1696 } 1697 1698 r_tgt_cmd_fsm = TGT_CMD_IDLE; 1571 1572 break; 1573 } 1574 1575 // instrumentation registers 1576 case MEMC_INSTRM: 1577 { 1578 need_rsp = true; 1579 1580 if (p_vci_tgt.cmd.read() == vci_param_int::CMD_READ) 1581 { 1582 error = read_instrumentation(regr, rdata); 1583 } 1584 else 1585 { 1586 error = 1; 1587 } 1588 1589 break; 1590 } 1591 1592 // xram GET bus error registers 1593 case MEMC_RERROR: 1594 { 1595 need_rsp = true; 1596 error = 0; 1597 1598 if (p_vci_tgt.cmd.read() == vci_param_int::CMD_WRITE) 1599 { 1600 switch (regr) 1601 { 1602 case MEMC_RERROR_IRQ_ENABLE: 1603 r_xram_rsp_rerror_irq_enable = 1604 (p_vci_tgt.wdata.read() != 0); 1605 1606 break; 1607 1608 default: 1609 error = 1; 1610 break; 1611 } 1612 } 1613 else if (p_vci_tgt.cmd.read() == vci_param_int::CMD_READ) 1614 { 1615 switch (regr) 1616 { 1617 case MEMC_RERROR_SRCID: 1618 rdata = (uint32_t) 1619 r_xram_rsp_rerror_rsrcid.read(); 1620 break; 1621 1622 case MEMC_RERROR_ADDR_LO: 1623 rdata = (uint32_t) 1624 (r_xram_rsp_rerror_address.read()) & ((1ULL << 32) - 1); 1625 break; 1626 1627 case MEMC_RERROR_ADDR_HI: 1628 rdata = (uint32_t) 1629 (r_xram_rsp_rerror_address.read() >> 32) & ((1ULL << 32) - 1); 1630 break; 1631 1632 case MEMC_RERROR_IRQ_RESET: 1633 if (not r_xram_rsp_rerror_irq.read()) break; 1634 r_xram_rsp_rerror_irq = false; 1635 break; 1636 1637 case MEMC_RERROR_IRQ_ENABLE: 1638 rdata = (uint32_t) (r_xram_rsp_rerror_irq_enable.read()) ? 1 : 0; 1639 break; 1640 1641 default: 1642 error = 1; 1643 break; 1644 } 1645 } 1646 else 1647 { 1648 error = 1; 1649 } 1650 1651 break; 1652 } 1653 1654 //unknown function 1655 default: 1656 { 1657 need_rsp = true; 1658 error = 1; 1659 break; 1660 } 1661 } 1662 1663 if (need_rsp) 1664 { 1665 // blocked if previous pending request to TGT_RSP FSM 1666 if (r_tgt_cmd_to_tgt_rsp_req.read()) break; 1667 1668 r_tgt_cmd_to_tgt_rsp_srcid = p_vci_tgt.srcid.read(); 1669 r_tgt_cmd_to_tgt_rsp_trdid = p_vci_tgt.trdid.read(); 1670 r_tgt_cmd_to_tgt_rsp_pktid = p_vci_tgt.pktid.read(); 1671 r_tgt_cmd_to_tgt_rsp_req = true; 1672 r_tgt_cmd_to_tgt_rsp_error = error; 1673 r_tgt_cmd_to_tgt_rsp_rdata = rdata; 1674 } 1675 1676 r_tgt_cmd_fsm = TGT_CMD_IDLE; 1699 1677 1700 1678 #if DEBUG_MEMC_TGT_CMD 1701 if (m_debug) 1702 std::cout << " <MEMC " << name() << " TGT_CMD_CONFIG> Configuration request:" 1703 << " address = " << std::hex << p_vci_tgt.address.read() 1704 << " / func = " << func 1705 << " / regr = " << regr 1706 << " / rdata = " << rdata 1707 << " / wdata = " << p_vci_tgt.wdata.read() 1708 << " / need_rsp = " << need_rsp 1709 << " / error = " << error << std::endl; 1710 #endif 1711 break; 1712 } 1713 1714 #if 0 1715 case TGT_CMD_CONFIG: // execute config request and return response 1716 { 1717 addr_t seg_base = m_seg[m_seg_config]->baseAddress(); 1718 addr_t address = p_vci_tgt.address.read(); 1719 size_t cell = (address - seg_base)/vci_param_int::B; 1720 1721 bool need_rsp; 1722 size_t error; 1723 uint32_t rdata = 0; // default value 1724 uint32_t wdata = p_vci_tgt.wdata.read(); 1725 1726 if ( (p_vci_tgt.cmd.read() == vci_param_int::CMD_READ) // get lock 1727 and (cell == MEMC_LOCK) ) 1728 { 1729 rdata = (uint32_t)r_config_lock.read(); 1730 need_rsp = true; 1731 error = 0; 1732 r_config_lock = true; 1733 } 1734 else if ( (p_vci_tgt.cmd.read() == vci_param_int::CMD_WRITE) // release lock 1735 and (cell == MEMC_LOCK)) 1736 { 1737 need_rsp = true; 1738 error = 0; 1739 r_config_lock = false; 1740 } 1741 else if ( (p_vci_tgt.cmd.read() == vci_param_int::CMD_WRITE) // set addr_lo 1742 and (cell == MEMC_ADDR_LO)) 1743 { 1744 assert( ((wdata % (m_words*vci_param_int::B)) == 0) and 1745 "VCI_MEM_CACHE CONFIG ERROR: The buffer must be aligned on a cache line"); 1746 1747 need_rsp = true; 1748 error = 0; 1749 r_config_address = (r_config_address.read() & 0xFFFFFFFF00000000LL) | 1750 ((addr_t)wdata); 1751 } 1752 else if ( (p_vci_tgt.cmd.read() == vci_param_int::CMD_WRITE) // set addr_hi 1753 and (cell == MEMC_ADDR_HI)) 1754 { 1755 need_rsp = true; 1756 error = 0; 1757 r_config_address = (r_config_address.read() & 0x00000000FFFFFFFFLL) | 1758 (((addr_t) wdata) << 32); 1759 } 1760 else if ( (p_vci_tgt.cmd.read() == vci_param_int::CMD_WRITE) // set buf_lines 1761 and (cell == MEMC_BUF_LENGTH)) 1762 { 1763 need_rsp = true; 1764 error = 0; 1765 size_t lines = wdata / (m_words << 2); 1766 if (wdata % (m_words << 2)) lines++; 1767 r_config_cmd_lines = lines; 1768 r_config_rsp_lines = 0; 1769 } 1770 else if ( (p_vci_tgt.cmd.read() == vci_param_int::CMD_WRITE) // set cmd type 1771 and (cell == MEMC_CMD_TYPE)) 1772 { 1773 need_rsp = false; 1774 error = 0; 1775 r_config_cmd = wdata; 1776 r_config_srcid = p_vci_tgt.srcid.read(); 1777 r_config_trdid = p_vci_tgt.trdid.read(); 1778 r_config_pktid = p_vci_tgt.pktid.read(); 1779 } 1780 else 1781 { 1782 need_rsp = true; 1783 error = 1; 1784 } 1785 1786 if ( need_rsp ) 1787 { 1788 // blocked if previous pending request to TGT_RSP FSM 1789 if ( r_tgt_cmd_to_tgt_rsp_req.read() ) break; 1790 1791 r_tgt_cmd_to_tgt_rsp_srcid = p_vci_tgt.srcid.read(); 1792 r_tgt_cmd_to_tgt_rsp_trdid = p_vci_tgt.trdid.read(); 1793 r_tgt_cmd_to_tgt_rsp_pktid = p_vci_tgt.pktid.read(); 1794 r_tgt_cmd_to_tgt_rsp_req = true; 1795 r_tgt_cmd_to_tgt_rsp_error = error; 1796 r_tgt_cmd_to_tgt_rsp_rdata = rdata; 1797 r_tgt_cmd_fsm = TGT_CMD_IDLE; 1798 } 1799 else 1800 { 1801 r_tgt_cmd_fsm = TGT_CMD_IDLE; 1802 } 1803 1804 #if DEBUG_MEMC_TGT_CMD 1805 if(m_debug) 1806 std::cout << " <MEMC " << name() << " TGT_CMD_CONFIG> Configuration request:" 1807 << " address = " << std::hex << p_vci_tgt.address.read() 1808 << " / wdata = " << p_vci_tgt.wdata.read() 1809 << " / need_rsp = " << need_rsp 1810 << " / error = " << error << std::endl; 1811 #endif 1812 break; 1813 } 1814 #endif // #if 0 1815 ////////////////// 1679 if (m_debug) 1680 { 1681 std::cout << " <MEMC " << name() << " TGT_CMD_CONFIG> Configuration request:" 1682 << " address = " << std::hex << p_vci_tgt.address.read() 1683 << " / func = " << func 1684 << " / regr = " << regr 1685 << " / rdata = " << rdata 1686 << " / wdata = " << p_vci_tgt.wdata.read() 1687 << " / need_rsp = " << need_rsp 1688 << " / error = " << error << std::endl; 1689 } 1690 #endif 1691 break; 1692 } 1693 ////////////////// 1816 1694 case TGT_CMD_READ: // Push a read request into read fifo 1817 1695 1818 1696 // check that the read does not cross a cache line limit. 1819 if ( ((m_x[(addr_t) p_vci_tgt.address.read()]+ (p_vci_tgt.plen.read() >>2)) > 16) and1697 if (((m_x[(addr_t) p_vci_tgt.address.read()] + (p_vci_tgt.plen.read() >> 2)) > 16) and 1820 1698 (p_vci_tgt.cmd.read() != vci_param_int::CMD_LOCKED_READ)) 1821 1699 { … … 1825 1703 } 1826 1704 // check single flit 1827 if (!p_vci_tgt.eop.read())1705 if (!p_vci_tgt.eop.read()) 1828 1706 { 1829 1707 std::cout << "VCI_MEM_CACHE ERROR " << name() << " TGT_CMD_READ state" … … 1832 1710 } 1833 1711 // check plen for LL 1834 if ( 1835 (p_vci_tgt.plen.read() != 8) 1712 if ((p_vci_tgt.cmd.read() == vci_param_int::CMD_LOCKED_READ) and 1713 (p_vci_tgt.plen.read() != 8)) 1836 1714 { 1837 1715 std::cout << "VCI_MEM_CACHE ERROR " << name() << " TGT_CMD_READ state" … … 1840 1718 } 1841 1719 1842 if ( p_vci_tgt.cmdval and m_cmd_read_addr_fifo.wok())1720 if (p_vci_tgt.cmdval and m_cmd_read_addr_fifo.wok()) 1843 1721 { 1844 1722 1845 1723 #if DEBUG_MEMC_TGT_CMD 1846 if(m_debug) 1724 if (m_debug) 1725 { 1847 1726 std::cout << " <MEMC " << name() << " TGT_CMD_READ> Push into read_fifo:" 1848 1727 << " address = " << std::hex << p_vci_tgt.address.read() … … 1851 1730 << " / pktid = " << p_vci_tgt.pktid.read() 1852 1731 << " / plen = " << std::dec << p_vci_tgt.plen.read() << std::endl; 1732 } 1853 1733 #endif 1854 1734 cmd_read_fifo_put = true; 1855 1735 // <Activity counters> 1856 if (p_vci_tgt.cmd.read() == vci_param_int::CMD_LOCKED_READ) { 1736 if (p_vci_tgt.cmd.read() == vci_param_int::CMD_LOCKED_READ) 1737 { 1857 1738 if (is_local_req(p_vci_tgt.srcid.read())) 1858 1739 { … … 1864 1745 } 1865 1746 // (1 (CMD) + 2 (RSP)) VCI flits for LL => 2 + 3 dspin flits 1866 m_cpt_ll_cost += 5 * req_distance(p_vci_tgt.srcid.read()); // LL on a single word1747 m_cpt_ll_cost += 5 * req_distance(p_vci_tgt.srcid.read()); 1867 1748 } 1868 1749 else { … … 1885 1766 /////////////////// 1886 1767 case TGT_CMD_WRITE: 1887 if (p_vci_tgt.cmdval and m_cmd_write_addr_fifo.wok())1768 if (p_vci_tgt.cmdval and m_cmd_write_addr_fifo.wok()) 1888 1769 { 1889 1770 uint32_t plen = p_vci_tgt.plen.read(); 1890 1771 #if DEBUG_MEMC_TGT_CMD 1891 if(m_debug) 1772 if (m_debug) 1773 { 1892 1774 std::cout << " <MEMC " << name() << " TGT_CMD_WRITE> Push into write_fifo:" 1893 1775 << " address = " << std::hex << p_vci_tgt.address.read() … … 1898 1780 << " / be = " << p_vci_tgt.be.read() 1899 1781 << " / plen = " << std::dec << p_vci_tgt.plen.read() << std::endl; 1782 } 1900 1783 #endif 1901 1784 cmd_write_fifo_put = true; 1902 1785 // <Activity counters> 1903 if (p_vci_tgt.cmd.read() == vci_param_int::CMD_NOP) { 1786 if (p_vci_tgt.cmd.read() == vci_param_int::CMD_NOP) 1787 { 1904 1788 // (2 (CMD) + 1 (RSP)) flits VCI => 4 + (1 (success) || 2 (failure)) flits dspin 1905 1789 m_cpt_sc_cost += 5 * req_distance(p_vci_tgt.srcid.read()); … … 1919 1803 // </Activity counters> 1920 1804 1921 if (p_vci_tgt.eop) { 1805 if (p_vci_tgt.eop) 1806 { 1922 1807 // <Activity counters> 1923 if (p_vci_tgt.cmd.read() == vci_param_int::CMD_NOP) { 1808 if (p_vci_tgt.cmd.read() == vci_param_int::CMD_NOP) 1809 { 1924 1810 if (is_local_req(p_vci_tgt.srcid.read())) 1925 1811 { … … 1949 1835 ///////////////// 1950 1836 case TGT_CMD_CAS: 1951 if ((p_vci_tgt.plen.read() != 8) and (p_vci_tgt.plen.read() != 16))1837 if ((p_vci_tgt.plen.read() != 8) and (p_vci_tgt.plen.read() != 16)) 1952 1838 { 1953 1839 std::cout << "VCI_MEM_CACHE ERROR " << name() << " TGT_CMD_CAS state" … … 1956 1842 } 1957 1843 1958 if (p_vci_tgt.cmdval and m_cmd_cas_addr_fifo.wok())1844 if (p_vci_tgt.cmdval and m_cmd_cas_addr_fifo.wok()) 1959 1845 { 1960 1846 1961 1847 #if DEBUG_MEMC_TGT_CMD 1962 if(m_debug) 1848 if (m_debug) 1849 { 1963 1850 std::cout << " <MEMC " << name() << " TGT_CMD_CAS> Pushing command into cmd_cas_fifo:" 1964 1851 << " address = " << std::hex << p_vci_tgt.address.read() … … 1969 1856 << " be = " << p_vci_tgt.be.read() 1970 1857 << " plen = " << std::dec << p_vci_tgt.plen.read() << std::endl; 1858 } 1971 1859 #endif 1972 1860 cmd_cas_fifo_put = true; 1973 if (p_vci_tgt.eop) { 1861 if (p_vci_tgt.eop) 1862 { 1974 1863 // <Activity counters> 1975 1864 if (is_local_req(p_vci_tgt.srcid.read())) … … 2005 1894 //////////////////////////////////////////////////////////////////////// 2006 1895 2007 switch (r_multi_ack_fsm.read())1896 switch (r_multi_ack_fsm.read()) 2008 1897 { 2009 1898 //////////////////// 2010 1899 case MULTI_ACK_IDLE: 2011 { 2012 bool multi_ack_fifo_rok = m_cc_receive_to_multi_ack_fifo.rok(); 2013 2014 // No CC_RECEIVE FSM request and no WRITE FSM request 2015 if( not multi_ack_fifo_rok and not r_write_to_multi_ack_req.read()) 2016 break; 2017 2018 uint8_t updt_index; 2019 2020 // handling WRITE FSM request to decrement update table response 2021 // counter if no CC_RECEIVE FSM request 2022 if(not multi_ack_fifo_rok) 2023 { 2024 updt_index = r_write_to_multi_ack_upt_index.read(); 2025 r_write_to_multi_ack_req = false; 2026 } 2027 // Handling CC_RECEIVE FSM request 1900 { 1901 bool multi_ack_fifo_rok = m_cc_receive_to_multi_ack_fifo.rok(); 1902 1903 // No CC_RECEIVE FSM request and no WRITE FSM request 1904 if (not multi_ack_fifo_rok and not r_write_to_multi_ack_req.read()) 1905 break; 1906 1907 uint8_t updt_index; 1908 1909 // handling WRITE FSM request to decrement update table response 1910 // counter if no CC_RECEIVE FSM request 1911 if (not multi_ack_fifo_rok) 1912 { 1913 updt_index = r_write_to_multi_ack_upt_index.read(); 1914 r_write_to_multi_ack_req = false; 1915 } 1916 // Handling CC_RECEIVE FSM request 1917 else 1918 { 1919 uint64_t flit = m_cc_receive_to_multi_ack_fifo.read(); 1920 updt_index = DspinRwtParam::dspin_get(flit, 1921 DspinRwtParam::MULTI_ACK_UPDT_INDEX); 1922 1923 cc_receive_to_multi_ack_fifo_get = true; 1924 } 1925 1926 assert((updt_index < m_upt.size()) and 1927 "VCI_MEM_CACHE ERROR in MULTI_ACK_IDLE : " 1928 "index too large for UPT"); 1929 1930 r_multi_ack_upt_index = updt_index; 1931 r_multi_ack_fsm = MULTI_ACK_UPT_LOCK; 1932 1933 #if DEBUG_MEMC_MULTI_ACK 1934 if (m_debug) 1935 { 1936 if (multi_ack_fifo_rok) 1937 { 1938 std::cout << " <MEMC " << name() 1939 << " MULTI_ACK_IDLE> Response for UPT entry " 1940 << (size_t) updt_index << std::endl; 1941 } 2028 1942 else 2029 1943 { 2030 uint64_t flit = m_cc_receive_to_multi_ack_fifo.read(); 2031 updt_index = DspinRwtParam::dspin_get(flit, 2032 DspinRwtParam::MULTI_ACK_UPDT_INDEX); 2033 2034 cc_receive_to_multi_ack_fifo_get = true; 2035 } 2036 2037 assert((updt_index < m_upt.size()) and 2038 "VCI_MEM_CACHE ERROR in MULTI_ACK_IDLE : " 2039 "index too large for UPT"); 2040 2041 r_multi_ack_upt_index = updt_index; 2042 r_multi_ack_fsm = MULTI_ACK_UPT_LOCK; 1944 std::cout << " <MEMC " << name() 1945 << " MULTI_ACK_IDLE> Write FSM request to decrement UPT entry " 1946 << updt_index << std::endl; 1947 } 1948 } 1949 #endif 1950 break; 1951 } 1952 1953 //////////////////////// 1954 case MULTI_ACK_UPT_LOCK: 1955 { 1956 // get lock to the UPDATE table 1957 if (r_alloc_upt_fsm.read() != ALLOC_UPT_MULTI_ACK) break; 1958 1959 // decrement the number of expected responses 1960 size_t count = 0; 1961 bool valid = m_upt.decrement(r_multi_ack_upt_index.read(), count); 1962 1963 if (not valid) 1964 { 1965 std::cout << "VCI_MEM_CACHE ERROR " << name() 1966 << " MULTI_ACK_UPT_LOCK state" << std::endl 1967 << "unsuccessful access to decrement the UPT" << std::endl; 1968 exit(0); 1969 } 1970 1971 if (count == 0) 1972 { 1973 r_multi_ack_fsm = MULTI_ACK_UPT_CLEAR; 1974 } 1975 else 1976 { 1977 r_multi_ack_fsm = MULTI_ACK_IDLE; 1978 } 2043 1979 2044 1980 #if DEBUG_MEMC_MULTI_ACK 2045 if(m_debug) 2046 { 2047 if (multi_ack_fifo_rok) 2048 { 2049 std::cout << " <MEMC " << name() 2050 << " MULTI_ACK_IDLE> Response for UPT entry " 2051 << (size_t)updt_index << std::endl; 2052 } 2053 else 2054 { 2055 std::cout << " <MEMC " << name() 2056 << " MULTI_ACK_IDLE> Write FSM request to decrement UPT entry " 2057 << updt_index << std::endl; 2058 } 2059 } 2060 #endif 2061 break; 2062 } 2063 2064 //////////////////////// 2065 case MULTI_ACK_UPT_LOCK: 2066 { 2067 // get lock to the UPDATE table 2068 if(r_alloc_upt_fsm.read() != ALLOC_UPT_MULTI_ACK) break; 2069 2070 // decrement the number of expected responses 2071 size_t count = 0; 2072 bool valid = m_upt.decrement(r_multi_ack_upt_index.read(), count); 2073 2074 2075 if(not valid) 2076 { 2077 std::cout << "VCI_MEM_CACHE ERROR " << name() 2078 << " MULTI_ACK_UPT_LOCK state" << std::endl 2079 << "unsuccessful access to decrement the UPT" << std::endl; 2080 exit(0); 2081 } 2082 2083 if(count == 0) 2084 { 2085 r_multi_ack_fsm = MULTI_ACK_UPT_CLEAR; 2086 } 2087 else 2088 { 2089 r_multi_ack_fsm = MULTI_ACK_IDLE; 2090 } 1981 if (m_debug) 1982 { 1983 std::cout << " <MEMC " << name() 1984 << " MULTI_ACK_UPT_LOCK> Decrement the responses counter for UPT:" 1985 << " entry = " << r_multi_ack_upt_index.read() 1986 << " / rsp_count = " << std::dec << count << std::endl; 1987 } 1988 #endif 1989 break; 1990 } 1991 1992 ///////////////////////// 1993 case MULTI_ACK_UPT_CLEAR: // Clear UPT entry / Test if rsp or ack required 1994 { 1995 if (r_alloc_upt_fsm.read() != ALLOC_UPT_MULTI_ACK) 1996 { 1997 std::cout << "VCI_MEM_CACHE ERROR " << name() 1998 << " MULTI_ACK_UPT_CLEAR state" 1999 << " bad UPT allocation" << std::endl; 2000 exit(0); 2001 } 2002 2003 r_multi_ack_srcid = m_upt.srcid(r_multi_ack_upt_index.read()); 2004 r_multi_ack_trdid = m_upt.trdid(r_multi_ack_upt_index.read()); 2005 r_multi_ack_pktid = m_upt.pktid(r_multi_ack_upt_index.read()); 2006 r_multi_ack_nline = m_upt.nline(r_multi_ack_upt_index.read()); 2007 bool need_rsp = m_upt.need_rsp(r_multi_ack_upt_index.read()); 2008 2009 // clear the UPT entry 2010 m_upt.clear(r_multi_ack_upt_index.read()); 2011 2012 if (need_rsp) r_multi_ack_fsm = MULTI_ACK_WRITE_RSP; 2013 else r_multi_ack_fsm = MULTI_ACK_IDLE; 2091 2014 2092 2015 #if DEBUG_MEMC_MULTI_ACK 2093 if(m_debug) 2094 std::cout << " <MEMC " << name() 2095 << " MULTI_ACK_UPT_LOCK> Decrement the responses counter for UPT:" 2096 << " entry = " << r_multi_ack_upt_index.read() 2097 << " / rsp_count = " << std::dec << count << std::endl; 2098 #endif 2099 break; 2100 } 2101 2102 ///////////////////////// 2103 case MULTI_ACK_UPT_CLEAR: // Clear UPT entry / Test if rsp or ack required 2104 { 2105 if(r_alloc_upt_fsm.read() != ALLOC_UPT_MULTI_ACK) 2106 { 2107 std::cout << "VCI_MEM_CACHE ERROR " << name() 2108 << " MULTI_ACK_UPT_CLEAR state" 2109 << " bad UPT allocation" << std::endl; 2110 exit(0); 2111 } 2112 2113 r_multi_ack_srcid = m_upt.srcid(r_multi_ack_upt_index.read()); 2114 r_multi_ack_trdid = m_upt.trdid(r_multi_ack_upt_index.read()); 2115 r_multi_ack_pktid = m_upt.pktid(r_multi_ack_upt_index.read()); 2116 r_multi_ack_nline = m_upt.nline(r_multi_ack_upt_index.read()); 2117 bool need_rsp = m_upt.need_rsp(r_multi_ack_upt_index.read()); 2118 2119 // clear the UPT entry 2120 m_upt.clear(r_multi_ack_upt_index.read()); 2121 2122 if ( need_rsp ) r_multi_ack_fsm = MULTI_ACK_WRITE_RSP; 2123 else r_multi_ack_fsm = MULTI_ACK_IDLE; 2016 if (m_debug) 2017 { 2018 std::cout << " <MEMC " << name() 2019 << " MULTI_ACK_UPT_CLEAR> Clear UPT entry " 2020 << std::dec << r_multi_ack_upt_index.read() << std::endl; 2021 } 2022 #endif 2023 break; 2024 } 2025 ///////////////////////// 2026 case MULTI_ACK_WRITE_RSP: // Post a response request to TGT_RSP FSM 2027 // Wait if pending request 2028 { 2029 if (r_multi_ack_to_tgt_rsp_req.read()) break; 2030 2031 r_multi_ack_to_tgt_rsp_req = true; 2032 r_multi_ack_to_tgt_rsp_srcid = r_multi_ack_srcid.read(); 2033 r_multi_ack_to_tgt_rsp_trdid = r_multi_ack_trdid.read(); 2034 r_multi_ack_to_tgt_rsp_pktid = r_multi_ack_pktid.read(); 2035 r_multi_ack_fsm = MULTI_ACK_IDLE; 2124 2036 2125 2037 #if DEBUG_MEMC_MULTI_ACK 2126 if(m_debug) 2127 std::cout << " <MEMC " << name() 2128 << " MULTI_ACK_UPT_CLEAR> Clear UPT entry " 2129 << std::dec << r_multi_ack_upt_index.read() << std::endl; 2130 #endif 2131 break; 2132 } 2133 ///////////////////////// 2134 case MULTI_ACK_WRITE_RSP: // Post a response request to TGT_RSP FSM 2135 // Wait if pending request 2136 { 2137 if ( r_multi_ack_to_tgt_rsp_req.read() ) break; 2138 2139 r_multi_ack_to_tgt_rsp_req = true; 2140 r_multi_ack_to_tgt_rsp_srcid = r_multi_ack_srcid.read(); 2141 r_multi_ack_to_tgt_rsp_trdid = r_multi_ack_trdid.read(); 2142 r_multi_ack_to_tgt_rsp_pktid = r_multi_ack_pktid.read(); 2143 r_multi_ack_fsm = MULTI_ACK_IDLE; 2144 2145 #if DEBUG_MEMC_MULTI_ACK 2146 if(m_debug) 2147 std::cout << " <MEMC " << name() << " MULTI_ACK_WRITE_RSP>" 2148 << " Request TGT_RSP FSM to send a response to srcid " 2149 << std::hex << r_multi_ack_srcid.read() << std::endl; 2150 #endif 2151 break; 2152 } 2038 if (m_debug) 2039 { 2040 std::cout << " <MEMC " << name() << " MULTI_ACK_WRITE_RSP>" 2041 << " Request TGT_RSP FSM to send a response to srcid " 2042 << std::hex << r_multi_ack_srcid.read() << std::endl; 2043 } 2044 #endif 2045 break; 2046 } 2153 2047 } // end switch r_multi_ack_fsm 2154 2048 … … 2162 2056 // An INVAL or SYNC configuration operation is defined by the following registers: 2163 2057 // - bool r_config_cmd : INVAL / SYNC / NOP 2164 2165 2058 // - uint64_t r_config_address : buffer base address 2166 2059 // - uint32_t r_config_cmd_lines : number of lines to be handled 2167 2060 // - uint32_t r_config_rsp_lines : number of lines not completed 2168 2169 2061 // 2170 2062 // For both INVAL and SYNC commands, the CONFIG FSM contains the loop handling 2171 //2172 2063 // all cache lines covered by the buffer. The various lines of a given buffer 2173 2064 // can be pipelined: the CONFIG FSM does not wait the response for line (n) to send … … 2187 2078 // a response is requested to TGT_RSP FSM. 2188 2079 // If there is copies, a multi-inval, or a broadcast-inval coherence transaction 2189 //2190 2080 // is launched and registered in UPT. The multi-inval transaction completion 2191 2081 // is signaled by the CLEANUP FSM by decrementing the r_config_rsp_lines counter. … … 2215 2105 //////////////////////////////////////////////////////////////////////////////////// 2216 2106 2217 switch ( r_config_fsm.read())2107 switch (r_config_fsm.read()) 2218 2108 { 2219 2109 ///////////////// 2220 2110 case CONFIG_IDLE: // waiting a config request 2221 2222 if ( r_config_cmd.read() != MEMC_CMD_NOP)2223 2224 r_config_fsm= CONFIG_LOOP;2111 { 2112 if (r_config_cmd.read() != MEMC_CMD_NOP) 2113 { 2114 r_config_fsm = CONFIG_LOOP; 2225 2115 2226 2116 #if DEBUG_MEMC_CONFIG 2227 if(m_debug) 2228 std::cout << " <MEMC " << name() << " CONFIG_IDLE> Config Request received" 2229 << " address = " << std::hex << r_config_address.read() 2230 << " / nlines = " << std::dec << r_config_cmd_lines.read() 2231 << " / type = " << r_config_cmd.read() << std::endl; 2232 #endif 2233 } 2234 break; 2235 } 2236 ///////////////// 2237 case CONFIG_LOOP: // test last line to be handled 2238 { 2239 if ( r_config_cmd_lines.read() == 0 ) 2240 { 2241 r_config_cmd = MEMC_CMD_NOP; 2242 r_config_fsm = CONFIG_WAIT; 2117 if (m_debug) 2118 { 2119 std::cout << " <MEMC " << name() << " CONFIG_IDLE> Config Request received" 2120 << " / address = " << std::hex << r_config_address.read() 2121 << " / lines = " << std::dec << r_config_cmd_lines.read() 2122 << " / type = " << r_config_cmd.read() << std::endl; 2123 } 2124 #endif 2125 } 2126 break; 2127 } 2128 ///////////////// 2129 case CONFIG_LOOP: // test if last line to be handled 2130 { 2131 if (r_config_cmd_lines.read() == 0) 2132 { 2133 r_config_cmd = MEMC_CMD_NOP; 2134 r_config_fsm = CONFIG_WAIT; 2135 } 2136 else 2137 { 2138 r_config_fsm = CONFIG_DIR_REQ; 2139 } 2140 2141 #if DEBUG_MEMC_CONFIG 2142 if (m_debug) 2143 { 2144 std::cout << " <MEMC " << name() << " CONFIG_LOOP>" 2145 << " / address = " << std::hex << r_config_address.read() 2146 << " / lines not handled = " << std::dec << r_config_cmd_lines.read() 2147 << " / command = " << r_config_cmd.read() << std::endl; 2148 } 2149 #endif 2150 break; 2151 } 2152 ///////////////// 2153 case CONFIG_WAIT: // wait completion (last response) 2154 { 2155 if (r_config_rsp_lines.read() == 0) // last response received 2156 { 2157 r_config_fsm = CONFIG_RSP; 2158 } 2159 2160 #if DEBUG_MEMC_CONFIG 2161 if (m_debug) 2162 { 2163 std::cout << " <MEMC " << name() << " CONFIG_WAIT>" 2164 << " / lines to do = " << std::dec << r_config_rsp_lines.read() << std::endl; 2165 } 2166 #endif 2167 break; 2168 } 2169 //////////////// 2170 case CONFIG_RSP: // request TGT_RSP FSM to return response 2171 { 2172 if (not r_config_to_tgt_rsp_req.read()) 2173 { 2174 r_config_to_tgt_rsp_srcid = r_config_srcid.read(); 2175 r_config_to_tgt_rsp_trdid = r_config_trdid.read(); 2176 r_config_to_tgt_rsp_pktid = r_config_pktid.read(); 2177 r_config_to_tgt_rsp_error = false; 2178 r_config_to_tgt_rsp_req = true; 2179 r_config_fsm = CONFIG_IDLE; 2180 2181 #if DEBUG_MEMC_CONFIG 2182 if (m_debug) 2183 { 2184 std::cout << " <MEMC " << name() << " CONFIG_RSP> Request TGT_RSP FSM to send response:" 2185 << " error = " << r_config_to_tgt_rsp_error.read() 2186 << " / rsrcid = " << std::hex << r_config_srcid.read() 2187 << " / rtrdid = " << std::hex << r_config_trdid.read() 2188 << " / rpktid = " << std::hex << r_config_pktid.read() << std::endl; 2189 } 2190 #endif 2191 } 2192 break; 2193 } 2194 //////////////////// 2195 case CONFIG_DIR_REQ: // Request directory lock 2196 { 2197 if (r_alloc_dir_fsm.read() == ALLOC_DIR_CONFIG) 2198 { 2199 r_config_fsm = CONFIG_DIR_ACCESS; 2200 } 2201 2202 #if DEBUG_MEMC_CONFIG 2203 if (m_debug) 2204 { 2205 std::cout << " <MEMC " << name() << " CONFIG_DIR_REQ>" 2206 << " Request DIR access" << std::endl; 2207 } 2208 #endif 2209 break; 2210 } 2211 /////////////////////// 2212 case CONFIG_DIR_ACCESS: // Access directory and decode config command 2213 { 2214 assert((r_alloc_dir_fsm.read() == ALLOC_DIR_CONFIG) and 2215 "MEMC ERROR in CONFIG_DIR_ACCESS state: bad DIR allocation"); 2216 2217 size_t way = 0; 2218 DirectoryEntry entry = m_cache_directory.read(r_config_address.read(), way); 2219 2220 r_config_dir_way = way; 2221 r_config_dir_copy_inst = entry.owner.inst; 2222 r_config_dir_copy_srcid = entry.owner.srcid; 2223 r_config_dir_is_cnt = entry.is_cnt; 2224 r_config_dir_lock = entry.lock; 2225 r_config_dir_count = entry.count; 2226 r_config_dir_ptr = entry.ptr; 2227 2228 if (entry.valid and // hit & inval command 2229 (r_config_cmd.read() == MEMC_CMD_INVAL)) 2230 { 2231 r_config_fsm = CONFIG_IVT_LOCK; 2232 } 2233 else if (entry.valid and // hit & sync command 2234 entry.dirty and 2235 (r_config_cmd.read() == MEMC_CMD_SYNC)) 2236 { 2237 r_config_fsm = CONFIG_TRT_LOCK; 2238 } 2239 else // miss : return to LOOP 2240 { 2241 r_config_cmd_lines = r_config_cmd_lines.read() - 1; 2242 r_config_address = r_config_address.read() + (m_words << 2); 2243 r_config_fsm = CONFIG_LOOP; 2244 } 2245 2246 #if DEBUG_MEMC_CONFIG 2247 if (m_debug) 2248 { 2249 std::cout << " <MEMC " << name() << " CONFIG_DIR_ACCESS> Accessing directory: " 2250 << " address = " << std::hex << r_config_address.read() 2251 << " / hit = " << std::dec << entry.valid 2252 << " / dirty = " << entry.dirty 2253 << " / count = " << entry.count 2254 << " / is_cnt = " << entry.is_cnt << std::endl; 2255 } 2256 #endif 2257 break; 2258 } 2259 ///////////////////// 2260 case CONFIG_TRT_LOCK: // enter this state in case of SYNC command 2261 // to a dirty cache line 2262 // keep DIR lock, and try to get TRT lock 2263 // return to LOOP state if TRT full 2264 // reset dirty bit in DIR and register a PUT 2265 // transaction in TRT if not full. 2266 { 2267 assert((r_alloc_dir_fsm.read() == ALLOC_DIR_CONFIG) and 2268 "MEMC ERROR in CONFIG_TRT_LOCK state: bad DIR allocation"); 2269 2270 if (r_alloc_trt_fsm.read() == ALLOC_TRT_CONFIG) 2271 { 2272 size_t index = 0; 2273 bool wok = not m_trt.full(index); 2274 2275 if (not wok) 2276 { 2277 r_config_fsm = CONFIG_LOOP; 2243 2278 } 2244 2279 else 2245 2280 { 2246 r_config_fsm = CONFIG_DIR_REQ; 2281 size_t way = r_config_dir_way.read(); 2282 size_t set = m_y[r_config_address.read()]; 2283 2284 // reset dirty bit in DIR 2285 DirectoryEntry entry; 2286 entry.valid = true; 2287 entry.dirty = false; 2288 entry.tag = m_z[r_config_address.read()]; 2289 entry.is_cnt = r_config_dir_is_cnt.read(); 2290 entry.lock = r_config_dir_lock.read(); 2291 entry.ptr = r_config_dir_ptr.read(); 2292 entry.count = r_config_dir_count.read(); 2293 entry.owner.inst = r_config_dir_copy_inst.read(); 2294 entry.owner.srcid = r_config_dir_copy_srcid.read(); 2295 m_cache_directory.write(set, way, entry); 2296 2297 r_config_trt_index = index; 2298 r_config_fsm = CONFIG_TRT_SET; 2247 2299 } 2248 2300 2249 2301 #if DEBUG_MEMC_CONFIG 2250 if(m_debug) 2251 std::cout << " <MEMC " << name() << " CONFIG_LOOP>" 2252 << " address = " << std::hex << r_config_address.read() 2253 << " / nlines = " << std::dec << r_config_cmd_lines.read() 2254 << " / command = " << r_config_cmd.read() << std::endl; 2255 #endif 2256 break; 2257 } 2258 ///////////////// 2259 case CONFIG_WAIT: // wait completion (last response) 2260 { 2261 if ( r_config_rsp_lines.read() == 0 ) // last response received 2262 { 2263 r_config_fsm = CONFIG_RSP; 2264 } 2302 if (m_debug) 2303 { 2304 std::cout << " <MEMC " << name() << " CONFIG_TRT_LOCK> Access TRT: " 2305 << " wok = " << std::dec << wok 2306 << " index = " << index << std::endl; 2307 } 2308 #endif 2309 } 2310 break; 2311 } 2312 //////////////////// 2313 case CONFIG_TRT_SET: // read data in cache 2314 // and post a PUT request in TRT 2315 { 2316 assert((r_alloc_dir_fsm.read() == ALLOC_DIR_CONFIG) and 2317 "MEMC ERROR in CONFIG_TRT_SET state: bad DIR allocation"); 2318 2319 assert((r_alloc_trt_fsm.read() == ALLOC_TRT_CONFIG) and 2320 "MEMC ERROR in CONFIG_TRT_SET state: bad TRT allocation"); 2321 2322 // read data into cache 2323 size_t way = r_config_dir_way.read(); 2324 size_t set = m_y[r_config_address.read()]; 2325 std::vector<data_t> data_vector; 2326 data_vector.clear(); 2327 for (size_t word = 0; word < m_words; word++) 2328 { 2329 uint32_t data = m_cache_data.read(way, set, word); 2330 data_vector.push_back(data); 2331 } 2332 2333 // post the PUT request in TRT 2334 m_trt.set(r_config_trt_index.read(), 2335 false, // PUT transaction 2336 m_nline[r_config_address.read()], // line index 2337 0, // srcid: unused 2338 0, // trdid: unused 2339 0, // pktid: unused 2340 false, // not proc_read 2341 0, // read_length: unused 2342 0, // word_index: unused 2343 std::vector<be_t>(m_words, 0xF), // byte-enable: unused 2344 data_vector, // data to be written 2345 0, // ll_key: unused 2346 true); // requested by config FSM 2347 config_rsp_lines_incr = true; 2348 r_config_fsm = CONFIG_PUT_REQ; 2265 2349 2266 2350 #if DEBUG_MEMC_CONFIG 2267 if(m_debug) 2268 std::cout << " <MEMC " << name() << " CONFIG_WAIT>" 2269 << " / lines to do = " << std::dec << r_config_rsp_lines.read() << std::endl; 2270 #endif 2271 break; 2272 } 2273 //////////////// 2274 case CONFIG_RSP: // request TGT_RSP FSM to return response 2275 { 2276 if ( not r_config_to_tgt_rsp_req.read() ) 2277 { 2278 r_config_to_tgt_rsp_srcid = r_config_srcid.read(); 2279 r_config_to_tgt_rsp_trdid = r_config_trdid.read(); 2280 r_config_to_tgt_rsp_pktid = r_config_pktid.read(); 2281 r_config_to_tgt_rsp_error = false; 2282 r_config_to_tgt_rsp_req = true; 2283 r_config_fsm = CONFIG_IDLE; 2351 if (m_debug) 2352 { 2353 std::cout << " <MEMC " << name() << " CONFIG_TRT_SET> PUT request in TRT:" 2354 << " address = " << std::hex << r_config_address.read() 2355 << " index = " << std::dec << r_config_trt_index.read() << std::endl; 2356 } 2357 #endif 2358 break; 2359 } 2360 //////////////////// 2361 case CONFIG_PUT_REQ: // post PUT request to IXR_CMD_FSM 2362 { 2363 if (not r_config_to_ixr_cmd_req.read()) 2364 { 2365 r_config_to_ixr_cmd_req = true; 2366 r_config_to_ixr_cmd_index = r_config_trt_index.read(); 2367 2368 // prepare next iteration 2369 r_config_cmd_lines = r_config_cmd_lines.read() - 1; 2370 r_config_address = r_config_address.read() + (m_words << 2); 2371 r_config_fsm = CONFIG_LOOP; 2284 2372 2285 2373 #if DEBUG_MEMC_CONFIG 2286 if(m_debug) 2287 std::cout << " <MEMC " << name() << " CONFIG_RSP> Request TGT_RSP FSM to return response:" 2288 << " error = " << r_config_to_tgt_rsp_error.read() 2289 << " / rsrcid = " << std::hex << r_config_srcid.read() 2290 << " / rtrdid = " << std::hex << r_config_trdid.read() 2291 << " / rpktid = " << std::hex << r_config_pktid.read() << std::endl; 2292 #endif 2293 } 2294 break; 2295 2296 } 2297 2298 //////////////////// 2299 case CONFIG_DIR_REQ: // Request directory lock 2300 { 2301 if ( r_alloc_dir_fsm.read() == ALLOC_DIR_CONFIG ) 2302 { 2303 r_config_fsm = CONFIG_DIR_ACCESS; 2304 } 2374 if (m_debug) 2375 { 2376 std::cout << " <MEMC " << name() << " CONFIG_PUT_REQ> post PUT request to IXR_CMD_FSM" 2377 << " / address = " << std::hex << r_config_address.read() << std::endl; 2378 } 2379 #endif 2380 } 2381 break; 2382 } 2383 ///////////////////// 2384 case CONFIG_IVT_LOCK: // enter this state in case of INVAL command 2385 // Keep DIR lock and Try to get IVT lock. 2386 // Return to LOOP state if IVT full. 2387 // Register inval in IVT, and invalidate the 2388 // directory if IVT not full. 2389 { 2390 assert((r_alloc_dir_fsm.read() == ALLOC_DIR_CONFIG) and 2391 "MEMC ERROR in CONFIG_IVT_LOCK state: bad DIR allocation"); 2392 2393 if (r_alloc_ivt_fsm.read() == ALLOC_IVT_CONFIG) 2394 { 2395 size_t set = m_y[(addr_t) (r_config_address.read())]; 2396 size_t way = r_config_dir_way.read(); 2397 2398 if (r_config_dir_count.read() == 0) // inval DIR and return to LOOP 2399 { 2400 m_cache_directory.inval(way, set); 2401 r_config_cmd_lines = r_config_cmd_lines.read() - 1; 2402 r_config_address = r_config_address.read() + (m_words << 2); 2403 r_config_fsm = CONFIG_LOOP; 2305 2404 2306 2405 #if DEBUG_MEMC_CONFIG 2307 if(m_debug) 2308 std::cout << " <MEMC " << name() << " CONFIG_DIR_REQ>" 2309 << " Request DIR access" << std::endl; 2310 #endif 2311 break; 2312 } 2313 /////////////////////// 2314 case CONFIG_DIR_ACCESS: // Access directory and decode config command 2315 { 2316 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CONFIG) and 2317 "MEMC ERROR in CONFIG_DIR_ACCESS state: bad DIR allocation"); 2318 2319 size_t way = 0; 2320 DirectoryEntry entry = m_cache_directory.read(r_config_address.read(), way); 2321 2322 r_config_dir_way = way; 2323 r_config_dir_copy_inst = entry.owner.inst; 2324 r_config_dir_copy_srcid = entry.owner.srcid; 2325 r_config_dir_is_cnt = entry.is_cnt; 2326 r_config_dir_count = entry.count; 2327 r_config_dir_lock = entry.lock; 2328 r_config_dir_ptr = entry.ptr; 2329 2330 if (entry.valid and // hit & inval command 2331 (r_config_cmd.read() == MEMC_CMD_INVAL)) 2332 { 2333 r_config_fsm = CONFIG_IVT_LOCK; 2334 } 2335 else if ( entry.valid and // hit & sync command 2336 entry.dirty and 2337 (r_config_cmd.read() == MEMC_CMD_SYNC) ) 2338 { 2339 r_config_fsm = CONFIG_TRT_LOCK; 2340 } 2341 else // return to LOOP 2342 { 2343 r_config_cmd_lines = r_config_cmd_lines.read() - 1; 2344 r_config_address = r_config_address.read() + (m_words<<2); 2345 r_config_fsm = CONFIG_LOOP; 2346 } 2406 if (m_debug) 2407 { 2408 std::cout << " <MEMC " << name() << " CONFIG_IVT_LOCK>" 2409 << " No copies in L1 : inval DIR entry" << std::endl; 2410 } 2411 #endif 2412 } 2413 else // try to register inval in IVT 2414 { 2415 bool wok = false; 2416 size_t index = 0; 2417 bool broadcast = r_config_dir_is_cnt.read(); 2418 size_t srcid = r_config_srcid.read(); 2419 size_t trdid = r_config_trdid.read(); 2420 size_t pktid = r_config_pktid.read(); 2421 addr_t nline = m_nline[(addr_t) (r_config_address.read())]; 2422 size_t nb_copies = r_config_dir_count.read(); 2423 2424 wok = m_ivt.set(false, // it's an inval transaction 2425 broadcast, 2426 false, // no response required 2427 true, // acknowledge required 2428 srcid, 2429 trdid, 2430 pktid, 2431 nline, 2432 nb_copies, 2433 index); 2434 2435 if (wok) // IVT success => inval DIR slot 2436 { 2437 m_cache_directory.inval(way, set); 2438 r_config_ivt_index = index; 2439 config_rsp_lines_incr = true; 2440 if (broadcast) 2441 { 2442 r_config_fsm = CONFIG_BC_SEND; 2443 } 2444 else 2445 { 2446 r_config_fsm = CONFIG_INVAL_SEND; 2447 } 2347 2448 2348 2449 #if DEBUG_MEMC_CONFIG 2349 if(m_debug) 2350 std::cout << " <MEMC " << name() << " CONFIG_DIR_ACCESS> Accessing directory: " 2351 << " address = " << std::hex << r_config_address.read() 2352 << " / hit = " << std::dec << entry.valid 2353 << " / dirty = " << entry.dirty 2354 << " / count = " << entry.count 2355 << " / is_cnt = " << entry.is_cnt << std::endl; 2356 #endif 2357 break; 2358 } 2359 ///////////////////// 2360 case CONFIG_TRT_LOCK: // enter this state in case of SYNC command 2361 // to a dirty cache line 2362 // keep DIR lock, and try to get TRT lock 2363 // return to LOOP state if TRT full 2364 // reset dirty bit in DIR and register a PUT 2365 // trabsaction in TRT if not full. 2366 { 2367 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CONFIG) and 2368 "MEMC ERROR in CONFIG_TRT_LOCK state: bad DIR allocation"); 2369 2370 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_CONFIG ) 2371 { 2372 size_t index = 0; 2373 bool wok = not m_trt.full(index); 2374 2375 if ( not wok ) 2450 if (m_debug) 2451 { 2452 std::cout << " <MEMC " << name() << " CONFIG_IVT_LOCK>" 2453 << " Inval DIR entry and register inval in IVT" 2454 << " / index = " << std::dec << index 2455 << " / broadcast = " << broadcast << std::endl; 2456 } 2457 #endif 2458 } 2459 else // IVT full => release both DIR and IVT locks 2376 2460 { 2377 2461 r_config_fsm = CONFIG_LOOP; 2462 2463 #if DEBUG_MEMC_CONFIG 2464 if (m_debug) 2465 { 2466 std::cout << " <MEMC " << name() << " CONFIG_IVT_LOCK>" 2467 << " IVT full : release DIR & IVT locks and retry" << std::endl; 2468 } 2469 #endif 2378 2470 } 2379 else 2380 { 2381 size_t way = r_config_dir_way.read(); 2382 size_t set = m_y[r_config_address.read()]; 2383 2384 // reset dirty bit in DIR 2385 DirectoryEntry entry; 2386 entry.valid = true; 2387 entry.dirty = false; 2388 entry.tag = m_z[r_config_address.read()]; 2389 entry.is_cnt = r_config_dir_is_cnt.read(); 2390 entry.lock = r_config_dir_lock.read(); 2391 entry.ptr = r_config_dir_ptr.read(); 2392 entry.count = r_config_dir_count.read(); 2393 entry.owner.inst = r_config_dir_copy_inst.read(); 2394 entry.owner.srcid = r_config_dir_copy_srcid.read(); 2395 m_cache_directory.write( set, way, entry ); 2396 2397 r_config_trt_index = index; 2398 r_config_fsm = CONFIG_TRT_SET; 2399 } 2471 } 2472 } 2473 break; 2474 } 2475 //////////////////// 2476 case CONFIG_BC_SEND: // Post a broadcast inval request to CC_SEND FSM 2477 { 2478 if (not r_config_to_cc_send_multi_req.read() and 2479 not r_config_to_cc_send_brdcast_req.read()) 2480 { 2481 // post bc inval request 2482 r_config_to_cc_send_multi_req = false; 2483 r_config_to_cc_send_brdcast_req = true; 2484 r_config_to_cc_send_trdid = r_config_ivt_index.read(); 2485 r_config_to_cc_send_nline = m_nline[(addr_t)(r_config_address.read())]; 2486 2487 // prepare next iteration 2488 r_config_cmd_lines = r_config_cmd_lines.read() - 1; 2489 r_config_address = r_config_address.read() + (m_words << 2); 2490 r_config_fsm = CONFIG_LOOP; 2400 2491 2401 2492 #if DEBUG_MEMC_CONFIG 2402 if(m_debug) 2403 std::cout << " <MEMC " << name() << " CONFIG_TRT_LOCK> Access TRT: " 2404 << " wok = " << std::dec << wok 2405 << " index = " << index << std::endl; 2406 #endif 2407 } 2408 break; 2409 } 2410 //////////////////// 2411 case CONFIG_TRT_SET: // read data in cache 2412 // and post a PUT request in TRT 2413 { 2414 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CONFIG) and 2415 "MEMC ERROR in CONFIG_TRT_SET state: bad DIR allocation"); 2416 2417 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_CONFIG) and 2418 "MEMC ERROR in CONFIG_TRT_SET state: bad TRT allocation"); 2419 2420 // read data into cache 2421 size_t way = r_config_dir_way.read(); 2422 size_t set = m_y[r_config_address.read()]; 2423 std::vector<data_t> data_vector; 2424 data_vector.clear(); 2425 for(size_t word=0; word<m_words; word++) 2426 { 2427 uint32_t data = m_cache_data.read( way, set, word ); 2428 data_vector.push_back( data ); 2429 } 2430 2431 // post the PUT request in TRT 2432 m_trt.set( r_config_trt_index.read(), 2433 false, // PUT transaction 2434 m_nline[r_config_address.read()], // line index 2435 0, // srcid: unused 2436 0, // trdid: unused 2437 0, // pktid: unused 2438 false, // not proc_read 2439 0, // read_length: unused 2440 0, // word_index: unused 2441 std::vector<be_t>(m_words,0xF), // byte-enable: unused 2442 data_vector, // data to be written 2443 0, // ll_key: unused 2444 true ); // requested by config FSM 2445 config_rsp_lines_incr = true; 2446 r_config_fsm = CONFIG_PUT_REQ; 2493 if (m_debug) 2494 { 2495 std::cout << " <MEMC " << name() << " CONFIG_BC_SEND>" 2496 << " Post a broadcast inval request to CC_SEND FSM" 2497 << " / address = " << r_config_address.read() <<std::endl; 2498 } 2499 #endif 2500 } 2501 break; 2502 } 2503 /////////////////////// 2504 case CONFIG_INVAL_SEND: // Post a multi inval request to CC_SEND FSM 2505 { 2506 if (not r_config_to_cc_send_multi_req.read() and 2507 not r_config_to_cc_send_brdcast_req.read()) 2508 { 2509 // post multi inval request 2510 r_config_to_cc_send_multi_req = true; 2511 r_config_to_cc_send_brdcast_req = false; 2512 r_config_to_cc_send_trdid = r_config_ivt_index.read(); 2513 r_config_to_cc_send_nline = m_nline[(addr_t)(r_config_address.read())]; 2514 2515 // post data into FIFO 2516 config_to_cc_send_fifo_srcid = r_config_dir_copy_srcid.read(); 2517 config_to_cc_send_fifo_inst = r_config_dir_copy_inst.read(); 2518 config_to_cc_send_fifo_put = true; 2519 2520 if (r_config_dir_count.read() == 1) // one copy 2521 { 2522 // prepare next iteration 2523 r_config_cmd_lines = r_config_cmd_lines.read() - 1; 2524 r_config_address = r_config_address.read() + (m_words << 2); 2525 r_config_fsm = CONFIG_LOOP; 2526 } 2527 else // several copies 2528 { 2529 r_config_fsm = CONFIG_HEAP_REQ; 2530 } 2447 2531 2448 2532 #if DEBUG_MEMC_CONFIG 2449 if(m_debug) 2450 std::cout << " <MEMC " << name() << " CONFIG_TRT_SET> PUT request in TRT:" 2451 << " address = " << std::hex << r_config_address.read() 2452 << " index = " << std::dec << r_config_trt_index.read() << std::endl; 2453 #endif 2454 break; 2455 } 2456 //////////////////// 2457 case CONFIG_PUT_REQ: // post PUT request to IXR_CMD_FSM 2458 { 2459 if ( not r_config_to_ixr_cmd_req.read() ) 2460 { 2461 r_config_to_ixr_cmd_req = true; 2462 r_config_to_ixr_cmd_index = r_config_trt_index.read(); 2463 2464 // prepare next iteration 2465 r_config_cmd_lines = r_config_cmd_lines.read() - 1; 2466 r_config_address = r_config_address.read() + (m_words<<2); 2467 r_config_fsm = CONFIG_LOOP; 2468 2469 #if DEBUG_MEMC_CONFIG 2470 if(m_debug) 2471 std::cout << " <MEMC " << name() << " CONFIG_PUT_REQ> post PUT request to IXR_CMD_FSM" 2472 << " / address = " << std::hex << r_config_address.read() << std::endl; 2473 #endif 2474 } 2475 break; 2476 } 2477 ///////////////////// 2478 case CONFIG_IVT_LOCK: // enter this state in case of INVAL command 2479 // Keep DIR lock and Try to get IVT lock. 2480 // Return to LOOP state if IVT full. 2481 // Register inval in IVT, and invalidate the 2482 // directory if IVT not full. 2483 { 2484 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CONFIG) and 2485 "MEMC ERROR in CONFIG_IVT_LOCK state: bad DIR allocation"); 2486 2487 if ( r_alloc_ivt_fsm.read() == ALLOC_IVT_CONFIG ) 2488 { 2489 size_t set = m_y[(addr_t)(r_config_address.read())]; 2490 size_t way = r_config_dir_way.read(); 2491 2492 if ( r_config_dir_count.read() == 0 ) // inval DIR and return to LOOP 2493 { 2494 m_cache_directory.inval( way, set ); 2495 r_config_cmd_lines = r_config_cmd_lines.read() - 1; 2496 r_config_address = r_config_address.read() + (m_words<<2); 2497 r_config_fsm = CONFIG_LOOP; 2498 2499 #if DEBUG_MEMC_CONFIG 2500 if(m_debug) 2501 std::cout << " <MEMC " << name() << " CONFIG_IVT_LOCK>" 2502 << " No copies in L1 : inval DIR entry" << std::endl; 2503 #endif 2504 } 2505 else // try to register inval in IVT 2506 { 2507 bool wok = false; 2508 size_t index = 0; 2509 bool broadcast = r_config_dir_is_cnt.read(); 2510 size_t srcid = r_config_srcid.read(); 2511 size_t trdid = r_config_trdid.read(); 2512 size_t pktid = r_config_pktid.read(); 2513 addr_t nline = m_nline[(addr_t)(r_config_address.read())]; 2514 size_t nb_copies = r_config_dir_count.read(); 2515 2516 wok = m_ivt.set(false, // it's an inval transaction 2517 broadcast, 2518 false, // no response required 2519 true, // acknowledge required 2520 srcid, 2521 trdid, 2522 pktid, 2523 nline, 2524 nb_copies, 2525 index); 2526 2527 if ( wok ) // IVT success => inval DIR slot 2528 { 2529 m_cache_directory.inval( way, set ); 2530 r_config_ivt_index = index; 2531 config_rsp_lines_incr = true; 2532 if ( broadcast ) r_config_fsm = CONFIG_BC_SEND; 2533 else r_config_fsm = CONFIG_INVAL_SEND; 2534 2535 #if DEBUG_MEMC_CONFIG 2536 if(m_debug) 2537 std::cout << " <MEMC " << name() << " CONFIG_IVT_LOCK>" 2538 << " Inval DIR entry and register inval in IVT" 2539 << " : index = " << std::dec << index 2540 << " / broadcast = " << broadcast << std::endl; 2541 #endif 2542 } 2543 else // IVT full => release both DIR and IVT locks 2544 { 2545 r_config_fsm = CONFIG_LOOP; 2546 2547 #if DEBUG_MEMC_CONFIG 2548 if(m_debug) 2549 std::cout << " <MEMC " << name() << " CONFIG_IVT_LOCK>" 2550 << " IVT full : release DIR & IVT locks and retry" << std::endl; 2551 #endif 2552 } 2553 } 2554 } 2555 break; 2556 } 2557 //////////////////// 2558 case CONFIG_BC_SEND: // Post a broadcast inval request to CC_SEND FSM 2559 { 2560 if( not r_config_to_cc_send_multi_req.read() and 2561 not r_config_to_cc_send_brdcast_req.read() ) 2562 { 2563 // post bc inval request 2564 r_config_to_cc_send_multi_req = false; 2565 r_config_to_cc_send_brdcast_req = true; 2566 r_config_to_cc_send_trdid = r_config_ivt_index.read(); 2567 r_config_to_cc_send_nline = m_nline[(addr_t)(r_config_address.read())]; 2568 2569 // prepare next iteration 2570 r_config_cmd_lines = r_config_cmd_lines.read() - 1; 2571 r_config_address = r_config_address.read() + (m_words<<2); 2572 r_config_fsm = CONFIG_LOOP; 2573 2574 #if DEBUG_MEMC_CONFIG 2575 if(m_debug) 2576 std::cout << " <MEMC " << name() << " CONFIG_BC_SEND>" 2577 << " Post a broadcast inval request to CC_SEND FSM" 2578 << " / address = " << r_config_address.read() <<std::endl; 2579 #endif 2580 } 2581 break; 2582 } 2583 /////////////////////// 2584 case CONFIG_INVAL_SEND: // Post a multi inval request to CC_SEND FSM 2585 { 2586 if( not r_config_to_cc_send_multi_req.read() and 2587 not r_config_to_cc_send_brdcast_req.read() ) 2588 { 2589 r_config_to_cc_send_multi_req = true; 2590 r_config_to_cc_send_brdcast_req = false; 2591 r_config_to_cc_send_trdid = r_config_ivt_index.read(); 2592 r_config_to_cc_send_nline = m_nline[(addr_t)(r_config_address.read())]; 2593 2594 // post data into FIFO 2595 config_to_cc_send_fifo_srcid = r_config_dir_copy_srcid.read(); 2596 config_to_cc_send_fifo_inst = r_config_dir_copy_inst.read(); 2597 config_to_cc_send_fifo_put = true; 2598 2599 if ( r_config_dir_count.read() == 1 ) // one copy 2600 { 2601 // prepare next iteration 2602 r_config_cmd_lines = r_config_cmd_lines.read() - 1; 2603 r_config_address = r_config_address.read() + (m_words<<2); 2604 r_config_fsm = CONFIG_LOOP; 2605 } 2606 else // several copies 2607 { 2608 r_config_fsm = CONFIG_HEAP_REQ; 2609 } 2610 2611 #if DEBUG_MEMC_CONFIG 2612 if(m_debug) 2613 std::cout << " <MEMC " << name() << " CONFIG_INVAL_SEND>" 2614 << " Post multi inval request to CC_SEND FSM" 2615 << " / address = " << std::hex << r_config_address.read() 2616 << " / copy = " << r_config_dir_copy_srcid.read() 2617 << " / inst = " << std::dec << r_config_dir_copy_inst.read() << std::endl; 2618 #endif 2619 } 2620 break; 2621 } 2622 ///////////////////// 2623 case CONFIG_HEAP_REQ: // Try to get access to Heap 2624 { 2625 if ( r_alloc_heap_fsm.read() == ALLOC_HEAP_CONFIG ) 2626 { 2627 r_config_fsm = CONFIG_HEAP_SCAN; 2628 r_config_heap_next = r_config_dir_ptr.read(); 2629 } 2630 2631 #if DEBUG_MEMC_CONFIG 2632 if(m_debug) 2633 std::cout << " <MEMC " << name() << " CONFIG_HEAP_REQ>" 2634 << " Requesting HEAP lock" << std::endl; 2635 #endif 2636 break; 2637 } 2638 ////////////////////// 2639 case CONFIG_HEAP_SCAN: // scan HEAP and send inval to CC_SEND FSM 2640 { 2641 HeapEntry entry = m_heap.read( r_config_heap_next.read() ); 2642 bool last_copy = (entry.next == r_config_heap_next.read()); 2643 2644 config_to_cc_send_fifo_srcid = entry.owner.srcid; 2645 config_to_cc_send_fifo_inst = entry.owner.inst; 2646 // config_to_cc_send_fifo_last = last_copy; 2647 config_to_cc_send_fifo_put = true; 2648 2649 if ( m_config_to_cc_send_inst_fifo.wok() ) // inval request accepted 2650 { 2651 r_config_heap_next = entry.next; 2652 if ( last_copy ) r_config_fsm = CONFIG_HEAP_LAST; 2653 2654 // <Activity counters> 2655 m_cpt_heap_slot_available++; 2656 // </Activity counters> 2657 } 2658 2659 #if DEBUG_MEMC_CONFIG 2660 if(m_debug) 2661 std::cout << " <MEMC " << name() << " CONFIG_HEAP_SCAN>" 2533 if (m_debug) 2534 { 2535 std::cout << " <MEMC " << name() << " CONFIG_INVAL_SEND>" 2662 2536 << " Post multi inval request to CC_SEND FSM" 2663 2537 << " / address = " << std::hex << r_config_address.read() 2664 << " / copy = " << entry.owner.srcid 2665 << " / inst = " << std::dec << entry.owner.inst << std::endl; 2666 #endif 2667 break; 2668 } 2669 ////////////////////// 2670 case CONFIG_HEAP_LAST: // HEAP housekeeping 2671 { 2672 size_t free_pointer = m_heap.next_free_ptr(); 2673 HeapEntry last_entry; 2674 last_entry.owner.srcid = 0; 2675 last_entry.owner.inst = false; 2676 2677 if ( m_heap.is_full() ) 2678 { 2679 last_entry.next = r_config_dir_ptr.read(); 2680 m_heap.unset_full(); 2681 } 2682 else 2683 { 2684 last_entry.next = free_pointer; 2685 } 2686 2687 m_heap.write_free_ptr( r_config_dir_ptr.read() ); 2688 m_heap.write( r_config_heap_next.read(), last_entry ); 2689 2690 // prepare next iteration 2691 r_config_cmd_lines = r_config_cmd_lines.read() - 1; 2692 r_config_address = r_config_address.read() + (m_words<<2); 2693 r_config_fsm = CONFIG_LOOP; 2538 << " / copy = " << r_config_dir_copy_srcid.read() 2539 << " / inst = " << std::dec << r_config_dir_copy_inst.read() << std::endl; 2540 } 2541 #endif 2542 } 2543 break; 2544 } 2545 ///////////////////// 2546 case CONFIG_HEAP_REQ: // Try to get access to Heap 2547 { 2548 if (r_alloc_heap_fsm.read() == ALLOC_HEAP_CONFIG) 2549 { 2550 r_config_fsm = CONFIG_HEAP_SCAN; 2551 r_config_heap_next = r_config_dir_ptr.read(); 2552 } 2694 2553 2695 2554 #if DEBUG_MEMC_CONFIG 2696 if(m_debug) 2697 std::cout << " <MEMC " << name() << " CONFIG_HEAP_LAST>" 2698 << " Heap housekeeping" << std::endl; 2699 #endif 2700 break; 2701 } 2555 if (m_debug) 2556 { 2557 std::cout << " <MEMC " << name() << " CONFIG_HEAP_REQ>" 2558 << " Requesting HEAP lock" << std::endl; 2559 } 2560 #endif 2561 break; 2562 } 2563 ////////////////////// 2564 case CONFIG_HEAP_SCAN: // scan HEAP and send inval to CC_SEND FSM 2565 { 2566 HeapEntry entry = m_heap.read(r_config_heap_next.read()); 2567 bool last_copy = (entry.next == r_config_heap_next.read()); 2568 2569 config_to_cc_send_fifo_srcid = entry.owner.srcid; 2570 config_to_cc_send_fifo_inst = entry.owner.inst; 2571 config_to_cc_send_fifo_put = true; 2572 2573 if (m_config_to_cc_send_inst_fifo.wok()) // inval request accepted 2574 { 2575 r_config_heap_next = entry.next; 2576 if (last_copy) r_config_fsm = CONFIG_HEAP_LAST; 2577 2578 // <Activity counters> 2579 m_cpt_heap_slot_available++; 2580 // </Activity counters> 2581 } 2582 2583 #if DEBUG_MEMC_CONFIG 2584 if (m_debug) 2585 { 2586 std::cout << " <MEMC " << name() << " CONFIG_HEAP_SCAN>" 2587 << " Post multi inval request to CC_SEND FSM" 2588 << " / address = " << std::hex << r_config_address.read() 2589 << " / copy = " << entry.owner.srcid 2590 << " / inst = " << std::dec << entry.owner.inst << std::endl; 2591 } 2592 #endif 2593 break; 2594 } 2595 ////////////////////// 2596 case CONFIG_HEAP_LAST: // HEAP housekeeping 2597 { 2598 size_t free_pointer = m_heap.next_free_ptr(); 2599 HeapEntry last_entry; 2600 last_entry.owner.srcid = 0; 2601 last_entry.owner.inst = false; 2602 2603 if (m_heap.is_full()) 2604 { 2605 last_entry.next = r_config_dir_ptr.read(); 2606 m_heap.unset_full(); 2607 } 2608 else 2609 { 2610 last_entry.next = free_pointer; 2611 } 2612 2613 m_heap.write_free_ptr(r_config_dir_ptr.read()); 2614 m_heap.write(r_config_heap_next.read(), last_entry); 2615 2616 // prepare next iteration 2617 r_config_cmd_lines = r_config_cmd_lines.read() - 1; 2618 r_config_address = r_config_address.read() + (m_words << 2); 2619 r_config_fsm = CONFIG_LOOP; 2620 2621 #if DEBUG_MEMC_CONFIG 2622 if (m_debug) 2623 { 2624 std::cout << " <MEMC " << name() << " CONFIG_HEAP_LAST>" 2625 << " Heap housekeeping" << std::endl; 2626 } 2627 #endif 2628 break; 2629 } 2702 2630 } // end switch r_config_fsm 2631 2632 2703 2633 2704 2634 //////////////////////////////////////////////////////////////////////////////////// … … 2729 2659 /////////////// 2730 2660 case READ_IDLE: // waiting a read request 2731 2732 if(m_cmd_read_addr_fifo.rok())2733 2661 { 2662 if (m_cmd_read_addr_fifo.rok()) 2663 { 2734 2664 2735 2665 #if DEBUG_MEMC_READ 2736 if(m_debug) 2737 std::cout << " <MEMC " << name() << " READ_IDLE> Read request" 2738 << " : address = " << std::hex << m_cmd_read_addr_fifo.read() 2739 << " / srcid = " << m_cmd_read_srcid_fifo.read() 2740 << " / trdid = " << m_cmd_read_trdid_fifo.read() 2741 << " / pktid = " << m_cmd_read_pktid_fifo.read() 2742 << " / nwords = " << std::dec << m_cmd_read_length_fifo.read() << std::endl; 2743 #endif 2744 r_read_coherent = false; //WB by default 2745 r_read_ll_done = false; 2746 r_read_fsm = READ_DIR_REQ; 2747 } 2748 break; 2749 } 2750 2751 ////////////////// 2666 if (m_debug) 2667 { 2668 std::cout << " <MEMC " << name() << " READ_IDLE> Read request" 2669 << " : address = " << std::hex << m_cmd_read_addr_fifo.read() 2670 << " / srcid = " << m_cmd_read_srcid_fifo.read() 2671 << " / trdid = " << m_cmd_read_trdid_fifo.read() 2672 << " / pktid = " << m_cmd_read_pktid_fifo.read() 2673 << " / nwords = " << std::dec << m_cmd_read_length_fifo.read() << std::endl; 2674 } 2675 #endif 2676 r_read_coherent = false; // WB by default 2677 r_read_ll_done = false; 2678 r_read_fsm = READ_DIR_REQ; 2679 } 2680 break; 2681 } 2682 ////////////////// 2752 2683 case READ_DIR_REQ: // Get the lock to the directory 2753 2754 if(r_alloc_dir_fsm.read() == ALLOC_DIR_READ)2755 2756 2757 2684 { 2685 if (r_alloc_dir_fsm.read() == ALLOC_DIR_READ) 2686 { 2687 r_read_fsm = READ_DIR_LOCK; 2688 } 2758 2689 2759 2690 #if DEBUG_MEMC_READ 2760 if(m_debug) 2761 std::cout << " <MEMC " << name() << " READ_DIR_REQ> Requesting DIR lock " << std::endl; 2762 #endif 2763 break; 2764 } 2765 2766 /////////////////// 2691 if (m_debug) 2692 { 2693 std::cout << " <MEMC " << name() << " READ_DIR_REQ> Requesting DIR lock " << std::endl; 2694 } 2695 #endif 2696 break; 2697 } 2698 2699 /////////////////// 2767 2700 case READ_DIR_LOCK: // check directory for hit / miss 2768 { 2769 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_READ) and 2770 "MEMC ERROR in READ_DIR_LOCK state: Bad DIR allocation"); 2771 2772 size_t way = 0; 2773 DirectoryEntry entry = 2774 m_cache_directory.read(m_cmd_read_addr_fifo.read(), way); 2775 if(((m_cmd_read_pktid_fifo.read() & 0x7) == TYPE_LL) and not r_read_ll_done.read()) // access the global table ONLY when we have an LL cmd 2776 { 2777 r_read_ll_key = m_llsc_table.ll(m_cmd_read_addr_fifo.read()); 2778 r_read_ll_done = true; 2779 } 2780 r_read_is_cnt = entry.is_cnt; 2781 r_read_dirty = entry.dirty; 2782 r_read_lock = entry.lock; 2783 r_read_tag = entry.tag; 2784 r_read_way = way; 2785 r_read_count = entry.count; 2786 r_read_copy = entry.owner.srcid; 2787 2788 r_read_copy_inst = entry.owner.inst; 2789 r_read_ptr = entry.ptr; // pointer to the heap 2790 2791 // check if this is a cached read, this means pktid is either 2792 // TYPE_READ_DATA_MISS 0bX001 with TSAR encoding 2793 // TYPE_READ_INS_MISS 0bX011 with TSAR encoding 2794 bool cached_read = (m_cmd_read_pktid_fifo.read() & 0x1); 2795 2796 if(entry.valid) // hit 2797 { 2798 r_read_coherent = entry.cache_coherent; 2799 2800 // hit on a WT line or the owner has no more copy (if LL, the owner must be invalidated even if he made the request) 2801 if (entry.cache_coherent or (entry.count == 0))// or (entry.owner.srcid == m_cmd_read_srcid_fifo.read())) 2701 { 2702 assert((r_alloc_dir_fsm.read() == ALLOC_DIR_READ) and 2703 "MEMC ERROR in READ_DIR_LOCK state: Bad DIR allocation"); 2704 2705 size_t way = 0; 2706 DirectoryEntry entry = m_cache_directory.read(m_cmd_read_addr_fifo.read(), way); 2707 2708 // access the global table ONLY when we have an LL cmd 2709 if (((m_cmd_read_pktid_fifo.read() & 0x7) == TYPE_LL) and 2710 not r_read_ll_done.read()) 2711 { 2712 r_read_ll_key = m_llsc_table.ll(m_cmd_read_addr_fifo.read()); 2713 r_read_ll_done = true; 2714 } 2715 r_read_is_cnt = entry.is_cnt; 2716 r_read_dirty = entry.dirty; 2717 r_read_lock = entry.lock; 2718 r_read_tag = entry.tag; 2719 r_read_way = way; 2720 r_read_count = entry.count; 2721 r_read_copy = entry.owner.srcid; 2722 r_read_copy_inst = entry.owner.inst; 2723 r_read_ptr = entry.ptr; // pointer to the heap 2724 2725 // check if this is a cached read, this means pktid is either 2726 // TYPE_READ_DATA_MISS 0bX001 with TSAR encoding 2727 // TYPE_READ_INS_MISS 0bX011 with TSAR encoding 2728 bool cached_read = (m_cmd_read_pktid_fifo.read() & 0x1); 2729 if (entry.valid) // hit 2730 { 2731 r_read_coherent = entry.cache_coherent; 2732 2733 // hit on a WT line or the owner has no more copy (if LL, the owner must be invalidated even if he made the request) 2734 if (entry.cache_coherent or (entry.count == 0)) // or (entry.owner.srcid == m_cmd_read_srcid_fifo.read())) 2735 { 2736 // test if we need to register a new copy in the heap 2737 if (entry.is_cnt or (entry.count == 0) or !cached_read) 2802 2738 { 2803 // test if we need to register a new copy in the heap 2804 if(entry.is_cnt || (entry.count == 0) || !cached_read) 2805 { 2806 r_read_fsm = READ_DIR_HIT; 2807 } 2808 else 2809 { 2810 r_read_fsm = READ_HEAP_REQ; 2811 } 2812 } 2813 else //hit on a WB line owned by an other proc 2814 { 2815 r_read_fsm = READ_IVT_LOCK; 2816 } 2817 } 2818 else // miss 2819 { 2820 r_read_fsm = READ_TRT_LOCK; 2821 } 2822 2823 #if DEBUG_MEMC_READ 2824 if(m_debug) 2825 { 2826 std::cout << " <MEMC " << name() << " READ_DIR_LOCK> Accessing directory: " 2827 << " address = " << std::hex << m_cmd_read_addr_fifo.read() 2828 << " / hit = " << std::dec << entry.valid 2829 << " / count = " <<std::dec << entry.count 2830 << " / is_cnt = " << entry.is_cnt 2831 << " / is_coherent = " << entry.cache_coherent; 2832 if((m_cmd_read_pktid_fifo.read() & 0x7) == TYPE_LL) std::cout << " / LL access" << std::endl; 2833 else std::cout << std::endl; 2834 } 2835 #endif 2836 break; 2837 } 2838 2839 /////////////////// 2840 case READ_IVT_LOCK: 2841 { 2842 if (r_alloc_ivt_fsm.read() == ALLOC_IVT_READ) 2843 { 2844 size_t index; 2845 addr_t nline = m_nline[(addr_t)(m_cmd_read_addr_fifo.read())]; 2846 if(m_ivt.search_inval(nline, index) or m_ivt.is_full() or r_read_to_cc_send_req.read() or r_read_to_cleanup_req.read()) //Check pending inval 2847 { 2848 r_read_fsm = READ_WAIT; 2849 #if DEBUG_MEMC_READ 2850 if(m_debug) 2851 { 2852 std::cout 2853 << " <MEMC " << name() << " READ_IVT_LOCK>" 2854 << " Wait cleanup completion" 2855 << std::endl; 2856 } 2857 #endif 2739 r_read_fsm = READ_DIR_HIT; 2858 2740 } 2859 2741 else 2860 2742 { 2861 r_read_to_cc_send_req = true; 2862 r_read_to_cc_send_dest = r_read_copy.read(); 2863 r_read_to_cc_send_nline = nline; 2864 r_read_to_cc_send_inst = ((m_cmd_read_pktid_fifo.read() & 0x2) != 0); 2865 r_read_to_cleanup_req = true; 2866 r_read_to_cleanup_nline = nline; 2867 r_read_to_cleanup_srcid = m_cmd_read_srcid_fifo.read(); 2868 r_read_to_cleanup_inst = ((m_cmd_read_pktid_fifo.read() & 0x2) != 0); 2869 r_read_to_cleanup_length = m_cmd_read_length_fifo.read(); 2870 r_read_to_cleanup_first_word = m_x[(addr_t) m_cmd_read_addr_fifo.read()]; 2871 r_read_to_cleanup_cached_read = (m_cmd_read_pktid_fifo.read() & 0x1); 2872 r_read_to_cleanup_addr = m_cmd_read_addr_fifo.read(); 2873 r_read_to_cleanup_is_ll= ((m_cmd_read_pktid_fifo.read() & 0x7) == TYPE_LL); 2874 r_read_to_cleanup_ll_key = r_read_ll_key.read(); 2875 2876 m_ivt.set(false, // it's an inval transaction 2877 false, // it's not a broadcast 2878 false, // it needs a read response 2879 false, // no acknowledge required 2880 m_cmd_read_srcid_fifo.read(), 2881 m_cmd_read_trdid_fifo.read(), 2882 m_cmd_read_pktid_fifo.read(), 2883 nline, 2884 0x1, //Expect only one answer 2885 index); 2886 2887 cmd_read_fifo_get = true; 2888 r_read_fsm = READ_IDLE; 2743 r_read_fsm = READ_HEAP_REQ; 2744 } 2745 } 2746 else // hit on a WB line owned by an other proc 2747 { 2748 r_read_fsm = READ_IVT_LOCK; 2749 } 2750 } 2751 else // miss 2752 { 2753 r_read_fsm = READ_TRT_LOCK; 2754 } 2755 2889 2756 #if DEBUG_MEMC_READ 2890 if(m_debug) 2891 { 2892 std::cout 2893 << " <MEMC " << name() << " READ_IVT_LOCK>" 2894 << std::hex 2895 << " Inval req on an NCC line" 2896 << " | owner = " << r_read_copy.read() 2897 << " | nline = " << nline 2898 << std::dec 2899 << std::endl; 2900 } 2901 #endif 2757 if (m_debug) 2758 { 2759 std::cout << " <MEMC " << name() << " READ_DIR_LOCK> Accessing directory: " 2760 << " address = " << std::hex << m_cmd_read_addr_fifo.read() 2761 << " / hit = " << std::dec << entry.valid 2762 << " / count = " <<std::dec << entry.count 2763 << " / is_cnt = " << entry.is_cnt 2764 << " / is_coherent = " << entry.cache_coherent; 2765 if ((m_cmd_read_pktid_fifo.read() & 0x7) == TYPE_LL) std::cout << " / LL access" << std::endl; 2766 else std::cout << std::endl; 2767 } 2768 #endif 2769 break; 2770 } 2771 /////////////////// 2772 case READ_IVT_LOCK: 2773 { 2774 if (r_alloc_ivt_fsm.read() == ALLOC_IVT_READ) 2775 { 2776 size_t index; 2777 addr_t nline = m_nline[(addr_t) (m_cmd_read_addr_fifo.read())]; 2778 //Check pending inval 2779 if (m_ivt.search_inval(nline, index) or 2780 m_ivt.is_full() or 2781 r_read_to_cc_send_req.read() or 2782 r_read_to_cleanup_req.read()) 2783 { 2784 r_read_fsm = READ_WAIT; 2785 #if DEBUG_MEMC_READ 2786 if (m_debug) 2787 { 2788 std::cout 2789 << " <MEMC " << name() << " READ_IVT_LOCK>" 2790 << " Wait cleanup completion" 2791 << std::endl; 2902 2792 } 2903 } 2904 2905 2906 break; 2907 } 2908 2909 ////////////////// 2910 case READ_WAIT://Release the locks 2911 { 2912 r_read_fsm = READ_DIR_REQ; 2793 #endif 2794 } 2795 else 2796 { 2797 r_read_to_cc_send_req = true; 2798 r_read_to_cc_send_dest = r_read_copy.read(); 2799 r_read_to_cc_send_nline = nline; 2800 r_read_to_cc_send_inst = ((m_cmd_read_pktid_fifo.read() & 0x2) != 0); 2801 2802 r_read_to_cleanup_req = true; 2803 r_read_to_cleanup_nline = nline; 2804 r_read_to_cleanup_srcid = m_cmd_read_srcid_fifo.read(); 2805 r_read_to_cleanup_inst = ((m_cmd_read_pktid_fifo.read() & 0x2) != 0); 2806 r_read_to_cleanup_length = m_cmd_read_length_fifo.read(); 2807 r_read_to_cleanup_first_word = m_x[(addr_t) m_cmd_read_addr_fifo.read()]; 2808 r_read_to_cleanup_cached_read = (m_cmd_read_pktid_fifo.read() & 0x1); 2809 r_read_to_cleanup_addr = m_cmd_read_addr_fifo.read(); 2810 r_read_to_cleanup_is_ll = ((m_cmd_read_pktid_fifo.read() & 0x7) == TYPE_LL); 2811 r_read_to_cleanup_ll_key = r_read_ll_key.read(); 2812 2813 m_ivt.set(false, // it's an inval transaction 2814 false, // it's not a broadcast 2815 false, // it needs a read response 2816 false, // no acknowledge required 2817 m_cmd_read_srcid_fifo.read(), 2818 m_cmd_read_trdid_fifo.read(), 2819 m_cmd_read_pktid_fifo.read(), 2820 nline, 2821 0x1, //Expect only one answer 2822 index); 2823 2824 cmd_read_fifo_get = true; 2825 r_read_fsm = READ_IDLE; 2913 2826 #if DEBUG_MEMC_READ 2914 if(m_debug) 2915 { 2916 std::cout 2917 << " <MEMC " << name() << " READ_WAIT>" << std::endl; 2918 } 2919 #endif 2920 break; 2921 } 2922 /////////////////// 2827 if (m_debug) 2828 { 2829 std::cout 2830 << " <MEMC " << name() << " READ_IVT_LOCK>" 2831 << std::hex 2832 << " Inval req on an NCC line" 2833 << " | owner = " << r_read_copy.read() 2834 << " | nline = " << nline 2835 << std::dec 2836 << std::endl; 2837 } 2838 #endif 2839 } 2840 } 2841 break; 2842 } 2843 ////////////////// 2844 case READ_WAIT: //Release the locks 2845 { 2846 r_read_fsm = READ_DIR_REQ; 2847 #if DEBUG_MEMC_READ 2848 if (m_debug) 2849 { 2850 std::cout 2851 << " <MEMC " << name() << " READ_WAIT>" << std::endl; 2852 } 2853 #endif 2854 break; 2855 } 2856 ////////////////// 2923 2857 case READ_DIR_HIT: // read data in cache & update the directory 2924 // we enter this state in 3 cases: 2925 // - the read request is uncachable 2926 // - the cache line is in counter mode 2927 // - the cache line is valid but not replicated 2928 2929 { 2930 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_READ) and 2931 "MEMC ERROR in READ_DIR_HIT state: Bad DIR allocation"); 2932 // check if this is an instruction read, this means pktid is either 2933 // TYPE_READ_INS_UNC 0bX010 with TSAR encoding 2934 // TYPE_READ_INS_MISS 0bX011 with TSAR encoding 2935 bool inst_read = ((m_cmd_read_pktid_fifo.read() & 0x2) != 0); 2936 // check if this is a cached read, this means pktid is either 2937 // TYPE_READ_DATA_MISS 0bX001 with TSAR encoding 2938 // TYPE_READ_INS_MISS 0bX011 with TSAR encoding 2939 bool cached_read = (m_cmd_read_pktid_fifo.read() & 0x1); 2940 bool is_cnt = r_read_is_cnt.read(); 2941 2858 // we enter this state in 3 cases: 2859 // - the read request is uncachable 2860 // - the cache line is in counter mode 2861 // - the cache line is valid but not replicated 2862 { 2863 assert((r_alloc_dir_fsm.read() == ALLOC_DIR_READ) and 2864 "MEMC ERROR in READ_DIR_HIT state: Bad DIR allocation"); 2865 2866 // check if this is an instruction read, this means pktid is either 2867 // TYPE_READ_INS_UNC 0bX010 with TSAR encoding 2868 // TYPE_READ_INS_MISS 0bX011 with TSAR encoding 2869 bool inst_read = ((m_cmd_read_pktid_fifo.read() & 0x2) != 0); 2870 // check if this is a cached read, this means pktid is either 2871 // TYPE_READ_DATA_MISS 0bX001 with TSAR encoding 2872 // TYPE_READ_INS_MISS 0bX011 with TSAR encoding 2873 bool cached_read = (m_cmd_read_pktid_fifo.read() & 0x1); 2874 bool is_cnt = r_read_is_cnt.read(); 2875 2876 // read data in the cache 2877 size_t set = m_y[(addr_t)(m_cmd_read_addr_fifo.read())]; 2878 size_t way = r_read_way.read(); 2879 2880 m_cache_data.read_line(way, set, r_read_data); 2881 2882 // update the cache directory 2883 DirectoryEntry entry; 2884 entry.valid = true; 2885 2886 entry.cache_coherent = r_read_coherent.read() or inst_read or !cached_read; 2887 r_read_coherent = r_read_coherent.read() or inst_read or !cached_read; 2888 2889 entry.is_cnt = is_cnt; 2890 entry.dirty = r_read_dirty.read(); 2891 entry.tag = r_read_tag.read(); 2892 entry.lock = r_read_lock.read(); 2893 entry.ptr = r_read_ptr.read(); 2894 2895 if (cached_read) // Cached read => we must update the copies 2896 { 2897 if (!is_cnt) // Not counter mode 2898 { 2899 entry.owner.srcid = m_cmd_read_srcid_fifo.read(); 2900 entry.owner.inst = inst_read; 2901 entry.count = r_read_count.read() + 1; 2902 } 2903 else // Counter mode 2904 { 2905 entry.owner.srcid = 0; 2906 entry.owner.inst = false; 2907 entry.count = r_read_count.read() + 1; 2908 } 2909 } 2910 else // Uncached read 2911 { 2912 entry.owner.srcid = r_read_copy.read(); 2913 entry.owner.inst = r_read_copy_inst.read(); 2914 entry.count = r_read_count.read(); 2915 } 2916 2917 #if DEBUG_MEMC_READ 2918 if (m_debug) 2919 { 2920 std::cout << " <MEMC " << name() << " READ_DIR_HIT> Update directory entry:" 2921 << " addr = " << std::hex << m_cmd_read_addr_fifo.read() 2922 << " / set = " << std::dec << set 2923 << " / way = " << way 2924 << " / owner_id = " << std::hex << entry.owner.srcid 2925 << " / owner_ins = " << std::dec << entry.owner.inst 2926 << " / coherent = " << entry.cache_coherent 2927 << " / count = " << entry.count 2928 << " / is_cnt = " << entry.is_cnt << std::endl; 2929 } 2930 #endif 2931 m_cache_directory.write(set, way, entry); 2932 r_read_fsm = READ_RSP; 2933 break; 2934 } 2935 /////////////////// 2936 case READ_HEAP_REQ: // Get the lock to the HEAP directory 2937 { 2938 if (r_alloc_heap_fsm.read() == ALLOC_HEAP_READ) 2939 { 2940 r_read_fsm = READ_HEAP_LOCK; 2941 } 2942 2943 #if DEBUG_MEMC_READ 2944 if (m_debug) 2945 { 2946 std::cout << " <MEMC " << name() << " READ_HEAP_REQ>" 2947 << " Requesting HEAP lock " << std::endl; 2948 } 2949 #endif 2950 break; 2951 } 2952 2953 //////////////////// 2954 case READ_HEAP_LOCK: // read data in cache, update the directory 2955 // and prepare the HEAP update 2956 { 2957 if (r_alloc_heap_fsm.read() == ALLOC_HEAP_READ) 2958 { 2959 // enter counter mode when we reach the limit of copies or the heap is full 2960 bool go_cnt = (r_read_count.read() >= m_max_copies) or m_heap.is_full(); 2961 2962 assert(r_read_coherent.read() && "Heap access on line NCC"); 2942 2963 // read data in the cache 2943 size_t set = m_y[(addr_t)(m_cmd_read_addr_fifo.read())];2944 size_t way 2964 size_t set = m_y[(addr_t) (m_cmd_read_addr_fifo.read())]; 2965 size_t way = r_read_way.read(); 2945 2966 2946 2967 m_cache_data.read_line(way, set, r_read_data); … … 2948 2969 // update the cache directory 2949 2970 DirectoryEntry entry; 2950 entry.valid = true; 2951 //entry.cache_coherent = r_read_coherent.read() or inst_read or (!(cached_read)) or (r_read_copy.read() != m_cmd_read_srcid_fifo.read()); 2952 //r_read_coherent = r_read_coherent.read() or inst_read or (!(cached_read)) or (r_read_copy.read() != m_cmd_read_srcid_fifo.read()); 2953 2954 entry.cache_coherent = r_read_coherent.read() or inst_read or (!(cached_read)); 2955 r_read_coherent = r_read_coherent.read() or inst_read or (!(cached_read)); 2956 2957 entry.is_cnt = is_cnt; 2958 entry.dirty = r_read_dirty.read(); 2959 entry.tag = r_read_tag.read(); 2960 entry.lock = r_read_lock.read(); 2961 entry.ptr = r_read_ptr.read(); 2962 if(cached_read) // Cached read => we must update the copies 2963 { 2964 if(!is_cnt) // Not counter mode 2971 entry.valid = true; 2972 entry.is_cnt = go_cnt; 2973 entry.dirty = r_read_dirty.read(); 2974 entry.tag = r_read_tag.read(); 2975 entry.lock = r_read_lock.read(); 2976 entry.count = r_read_count.read() + 1; 2977 entry.cache_coherent = r_read_coherent.read(); 2978 2979 if (not go_cnt) // Not entering counter mode 2980 { 2981 entry.owner.srcid = r_read_copy.read(); 2982 entry.owner.inst = r_read_copy_inst.read(); 2983 entry.ptr = m_heap.next_free_ptr(); // set pointer on the heap 2984 } 2985 else // Entering Counter mode 2986 { 2987 entry.owner.srcid = 0; 2988 entry.owner.inst = false; 2989 entry.ptr = 0; 2990 } 2991 2992 m_cache_directory.write(set, way, entry); 2993 2994 // prepare the heap update (add an entry, or clear the linked list) 2995 if (not go_cnt) // not switching to counter mode 2996 { 2997 // We test if the next free entry in the heap is the last 2998 HeapEntry heap_entry = m_heap.next_free_entry(); 2999 r_read_next_ptr = heap_entry.next; 3000 r_read_last_free = (heap_entry.next == m_heap.next_free_ptr()); 3001 3002 r_read_fsm = READ_HEAP_WRITE; // add an entry in the HEAP 3003 } 3004 else // switching to counter mode 3005 { 3006 if (r_read_count.read() > 1) // heap must be cleared 2965 3007 { 2966 entry.owner.srcid = m_cmd_read_srcid_fifo.read(); 2967 entry.owner.inst = inst_read; 2968 entry.count = r_read_count.read() + 1; 2969 } 2970 else // Counter mode 2971 { 2972 entry.owner.srcid = 0; 2973 entry.owner.inst = false; 2974 entry.count = r_read_count.read() + 1; 2975 } 2976 } 2977 else // Uncached read 2978 { 2979 entry.owner.srcid = r_read_copy.read(); 2980 entry.owner.inst = r_read_copy_inst.read(); 2981 entry.count = r_read_count.read(); 2982 } 2983 2984 #if DEBUG_MEMC_READ 2985 if(m_debug) 2986 std::cout << " <MEMC " << name() << " READ_DIR_HIT> Update directory entry:" 2987 << " addr = " << std::hex << m_cmd_read_addr_fifo.read() 2988 << " / set = " << std::dec << set 2989 << " / way = " << way 2990 << " / owner_id = " << std::hex << entry.owner.srcid 2991 << " / owner_ins = " << std::dec << entry.owner.inst 2992 << " / coherent = " << entry.cache_coherent 2993 << " / count = " << entry.count 2994 << " / is_cnt = " << entry.is_cnt << std::endl; 2995 #endif 2996 2997 m_cache_directory.write(set, way, entry); 2998 r_read_fsm = READ_RSP; 2999 break; 3000 } 3001 /////////////////// 3002 case READ_HEAP_REQ: // Get the lock to the HEAP directory 3003 { 3004 if(r_alloc_heap_fsm.read() == ALLOC_HEAP_READ) 3005 { 3006 r_read_fsm = READ_HEAP_LOCK; 3007 } 3008 3009 #if DEBUG_MEMC_READ 3010 if(m_debug) 3011 std::cout << " <MEMC " << name() << " READ_HEAP_REQ>" 3012 << " Requesting HEAP lock " << std::endl; 3013 #endif 3014 break; 3015 } 3016 3017 //////////////////// 3018 case READ_HEAP_LOCK: // read data in cache, update the directory 3019 // and prepare the HEAP update 3020 { 3021 if(r_alloc_heap_fsm.read() == ALLOC_HEAP_READ) 3022 { 3023 // enter counter mode when we reach the limit of copies or the heap is full 3024 bool go_cnt = (r_read_count.read() >= m_max_copies) or m_heap.is_full(); 3025 3026 assert (r_read_coherent.read() && "Heap access on line NCC"); 3027 // read data in the cache 3028 size_t set = m_y[(addr_t)(m_cmd_read_addr_fifo.read())]; 3029 size_t way = r_read_way.read(); 3030 3031 m_cache_data.read_line(way, set, r_read_data); 3032 3033 // update the cache directory 3034 DirectoryEntry entry; 3035 entry.valid = true; 3036 entry.cache_coherent = r_read_coherent.read(); 3037 entry.is_cnt = go_cnt; 3038 entry.dirty = r_read_dirty.read(); 3039 entry.tag = r_read_tag.read(); 3040 entry.lock = r_read_lock.read(); 3041 entry.count = r_read_count.read() + 1; 3042 3043 if(not go_cnt) // Not entering counter mode 3044 { 3045 entry.owner.srcid = r_read_copy.read(); 3046 entry.owner.inst = r_read_copy_inst.read(); 3047 entry.ptr = m_heap.next_free_ptr(); // set pointer on the heap 3048 } 3049 else // Entering Counter mode 3050 { 3051 entry.owner.srcid = 0; 3052 entry.owner.inst = false; 3053 entry.ptr = 0; 3054 } 3055 3056 m_cache_directory.write(set, way, entry); 3057 3058 // prepare the heap update (add an entry, or clear the linked list) 3059 if(not go_cnt) // not switching to counter mode 3060 { 3061 // We test if the next free entry in the heap is the last 3062 HeapEntry heap_entry = m_heap.next_free_entry(); 3063 r_read_next_ptr = heap_entry.next; 3064 r_read_last_free = (heap_entry.next == m_heap.next_free_ptr()); 3065 3066 r_read_fsm = READ_HEAP_WRITE; // add an entry in the HEAP 3067 } 3068 else // switching to counter mode 3069 { 3070 if(r_read_count.read() > 1) // heap must be cleared 3008 HeapEntry next_entry = m_heap.read(r_read_ptr.read()); 3009 r_read_next_ptr = m_heap.next_free_ptr(); 3010 m_heap.write_free_ptr(r_read_ptr.read()); 3011 3012 if (next_entry.next == r_read_ptr.read()) // last entry 3071 3013 { 3072 HeapEntry next_entry = m_heap.read(r_read_ptr.read()); 3073 r_read_next_ptr = m_heap.next_free_ptr(); 3074 m_heap.write_free_ptr(r_read_ptr.read()); 3075 3076 if(next_entry.next == r_read_ptr.read()) // last entry 3077 { 3078 r_read_fsm = READ_HEAP_LAST; // erase the entry 3079 } 3080 else // not the last entry 3081 { 3082 r_read_ptr = next_entry.next; 3083 r_read_fsm = READ_HEAP_ERASE; // erase the list 3084 } 3014 r_read_fsm = READ_HEAP_LAST; // erase the entry 3085 3015 } 3086 else // the heap is not used / nothing to do3016 else // not the last entry 3087 3017 { 3088 r_read_fsm = READ_RSP; 3018 r_read_ptr = next_entry.next; 3019 r_read_fsm = READ_HEAP_ERASE; // erase the list 3089 3020 } 3090 3021 } 3022 else // the heap is not used / nothing to do 3023 { 3024 r_read_fsm = READ_RSP; 3025 } 3026 } 3091 3027 3092 3028 #if DEBUG_MEMC_READ 3093 if(m_debug) 3094 std::cout << " <MEMC " << name() << " READ_HEAP_LOCK> Update directory:" 3095 << " tag = " << std::hex << entry.tag 3096 << " set = " << std::dec << set 3097 << " way = " << way 3098 << " count = " << entry.count 3099 << " is_cnt = " << entry.is_cnt << std::endl; 3100 #endif 3029 if (m_debug) 3030 { 3031 std::cout << " <MEMC " << name() << " READ_HEAP_LOCK> Update directory:" 3032 << " tag = " << std::hex << entry.tag 3033 << " set = " << std::dec << set 3034 << " way = " << way 3035 << " count = " << entry.count 3036 << " is_cnt = " << entry.is_cnt << std::endl; 3037 } 3038 #endif 3039 } 3040 else 3041 { 3042 std::cout << "VCI_MEM_CACHE ERROR " << name() << " READ_HEAP_LOCK" 3043 << "Bad HEAP allocation" << std::endl; 3044 exit(0); 3045 } 3046 break; 3047 } 3048 ///////////////////// 3049 case READ_HEAP_WRITE: // add an entry in the heap 3050 { 3051 if (r_alloc_heap_fsm.read() == ALLOC_HEAP_READ) 3052 { 3053 HeapEntry heap_entry; 3054 heap_entry.owner.srcid = m_cmd_read_srcid_fifo.read(); 3055 heap_entry.owner.inst = ((m_cmd_read_pktid_fifo.read() & 0x2) != 0); 3056 3057 if (r_read_count.read() == 1) // creation of a new linked list 3058 { 3059 heap_entry.next = m_heap.next_free_ptr(); 3060 } 3061 else // head insertion in existing list 3062 { 3063 heap_entry.next = r_read_ptr.read(); 3064 } 3065 m_heap.write_free_entry(heap_entry); 3066 m_heap.write_free_ptr(r_read_next_ptr.read()); 3067 if (r_read_last_free.read()) { 3068 m_heap.set_full(); 3069 } 3070 3071 // <Activity counters> 3072 m_cpt_heap_slot_available--; 3073 // </Activity counters> 3074 3075 r_read_fsm = READ_RSP; 3076 3077 #if DEBUG_MEMC_READ 3078 if (m_debug) 3079 { 3080 std::cout << " <MEMC " << name() << " READ_HEAP_WRITE> Add an entry in the heap:" 3081 << " owner_id = " << std::hex << heap_entry.owner.srcid 3082 << " owner_ins = " << std::dec << heap_entry.owner.inst << std::endl; 3083 } 3084 #endif 3085 } 3086 else 3087 { 3088 std::cout << "VCI_MEM_CACHE ERROR " << name() << " READ_HEAP_WRITE" 3089 << "Bad HEAP allocation" << std::endl; 3090 exit(0); 3091 } 3092 break; 3093 } 3094 ///////////////////// 3095 case READ_HEAP_ERASE: 3096 { 3097 if (r_alloc_heap_fsm.read() == ALLOC_HEAP_READ) 3098 { 3099 HeapEntry next_entry = m_heap.read(r_read_ptr.read()); 3100 if (next_entry.next == r_read_ptr.read()) 3101 { 3102 r_read_fsm = READ_HEAP_LAST; 3101 3103 } 3102 3104 else 3103 3105 { 3104 std::cout << "VCI_MEM_CACHE ERROR " << name() << " READ_HEAP_LOCK" 3105 << "Bad HEAP allocation" << std::endl; 3106 exit(0); 3107 } 3108 break; 3109 } 3110 ///////////////////// 3111 case READ_HEAP_WRITE: // add an entry in the heap 3112 { 3113 if(r_alloc_heap_fsm.read() == ALLOC_HEAP_READ) 3114 { 3115 HeapEntry heap_entry; 3116 heap_entry.owner.srcid = m_cmd_read_srcid_fifo.read(); 3117 heap_entry.owner.inst = ((m_cmd_read_pktid_fifo.read() & 0x2) != 0); 3118 3119 if(r_read_count.read() == 1) // creation of a new linked list 3120 { 3121 heap_entry.next = m_heap.next_free_ptr(); 3122 } 3123 else // head insertion in existing list 3124 { 3125 heap_entry.next = r_read_ptr.read(); 3126 } 3127 m_heap.write_free_entry(heap_entry); 3128 m_heap.write_free_ptr(r_read_next_ptr.read()); 3129 3130 if(r_read_last_free.read()) { 3131 m_heap.set_full(); 3132 } 3133 3134 // <Activity counters> 3135 m_cpt_heap_slot_available--; 3136 // </Activity counters> 3137 3138 r_read_fsm = READ_RSP; 3106 r_read_ptr = next_entry.next; 3107 r_read_fsm = READ_HEAP_ERASE; 3108 } 3109 } 3110 else 3111 { 3112 std::cout << "VCI_MEM_CACHE ERROR " << name() << " READ_HEAP_ERASE" 3113 << "Bad HEAP allocation" << std::endl; 3114 exit(0); 3115 } 3116 break; 3117 } 3118 3119 //////////////////// 3120 case READ_HEAP_LAST: 3121 { 3122 if (r_alloc_heap_fsm.read() == ALLOC_HEAP_READ) 3123 { 3124 HeapEntry last_entry; 3125 last_entry.owner.srcid = 0; 3126 last_entry.owner.inst = false; 3127 3128 if (m_heap.is_full()) 3129 { 3130 last_entry.next = r_read_ptr.read(); 3131 m_heap.unset_full(); 3132 } 3133 else 3134 { 3135 last_entry.next = r_read_next_ptr.read(); 3136 } 3137 m_heap.write(r_read_ptr.read(),last_entry); 3138 r_read_fsm = READ_RSP; 3139 3140 // <Activity counters> 3141 m_cpt_heap_slot_available = m_cpt_heap_slot_available + (r_read_count.read() - 1); 3142 // </Activity counters> 3143 } 3144 else 3145 { 3146 std::cout << "VCI_MEM_CACHE ERROR " << name() << " READ_HEAP_LAST" 3147 << "Bad HEAP allocation" << std::endl; 3148 exit(0); 3149 } 3150 break; 3151 } 3152 ////////////// 3153 case READ_RSP: // request the TGT_RSP FSM to return data 3154 { 3155 if (!r_read_to_tgt_rsp_req) 3156 { 3157 for (size_t i = 0; i < m_words; i++) 3158 { 3159 r_read_to_tgt_rsp_data[i] = r_read_data[i]; 3160 } 3161 r_read_to_tgt_rsp_word = m_x[(addr_t) m_cmd_read_addr_fifo.read()]; 3162 r_read_to_tgt_rsp_length = m_cmd_read_length_fifo.read(); 3163 r_read_to_tgt_rsp_srcid = m_cmd_read_srcid_fifo.read(); 3164 3165 if (r_read_coherent.read()) 3166 { 3167 r_read_to_tgt_rsp_pktid = 0x0 + m_cmd_read_pktid_fifo.read(); 3168 } 3169 else 3170 { 3171 r_read_to_tgt_rsp_pktid = 0x8 + m_cmd_read_pktid_fifo.read(); 3172 } 3173 r_read_to_tgt_rsp_trdid = m_cmd_read_trdid_fifo.read(); 3174 r_read_to_tgt_rsp_ll_key = r_read_ll_key.read(); 3175 cmd_read_fifo_get = true; 3176 r_read_to_tgt_rsp_req = true; 3177 r_read_fsm = READ_IDLE; 3139 3178 3140 3179 #if DEBUG_MEMC_READ 3141 if(m_debug) 3142 std::cout << " <MEMC " << name() << " READ_HEAP_WRITE> Add an entry in the heap:" 3143 << " owner_id = " << std::hex << heap_entry.owner.srcid 3144 << " owner_ins = " << std::dec << heap_entry.owner.inst << std::endl; 3145 #endif 3146 } 3147 else 3148 { 3149 std::cout << "VCI_MEM_CACHE ERROR " << name() << " READ_HEAP_WRITE" 3150 << "Bad HEAP allocation" << std::endl; 3151 exit(0); 3152 } 3153 break; 3154 } 3155 ///////////////////// 3156 case READ_HEAP_ERASE: 3157 { 3158 if(r_alloc_heap_fsm.read() == ALLOC_HEAP_READ) 3159 { 3160 HeapEntry next_entry = m_heap.read(r_read_ptr.read()); 3161 if(next_entry.next == r_read_ptr.read()) 3162 { 3163 r_read_fsm = READ_HEAP_LAST; 3164 } 3165 else 3166 { 3167 r_read_ptr = next_entry.next; 3168 r_read_fsm = READ_HEAP_ERASE; 3169 } 3170 } 3171 else 3172 { 3173 std::cout << "VCI_MEM_CACHE ERROR " << name() << " READ_HEAP_ERASE" 3174 << "Bad HEAP allocation" << std::endl; 3175 exit(0); 3176 } 3177 break; 3178 } 3179 3180 //////////////////// 3181 case READ_HEAP_LAST: 3182 { 3183 if(r_alloc_heap_fsm.read() == ALLOC_HEAP_READ) 3184 { 3185 HeapEntry last_entry; 3186 last_entry.owner.srcid = 0; 3187 last_entry.owner.inst = false; 3188 3189 if(m_heap.is_full()) 3190 { 3191 last_entry.next = r_read_ptr.read(); 3192 m_heap.unset_full(); 3193 } 3194 else 3195 { 3196 last_entry.next = r_read_next_ptr.read(); 3197 } 3198 m_heap.write(r_read_ptr.read(),last_entry); 3199 r_read_fsm = READ_RSP; 3200 3201 // <Activity counters> 3202 m_cpt_heap_slot_available = m_cpt_heap_slot_available + (r_read_count.read() - 1); 3203 // </Activity counters> 3204 } 3205 else 3206 { 3207 std::cout << "VCI_MEM_CACHE ERROR " << name() << " READ_HEAP_LAST" 3208 << "Bad HEAP allocation" << std::endl; 3209 exit(0); 3210 } 3211 break; 3212 } 3213 ////////////// 3214 case READ_RSP: // request the TGT_RSP FSM to return data 3215 { 3216 if(!r_read_to_tgt_rsp_req) 3217 { 3218 for(size_t i=0 ; i<m_words ; i++) r_read_to_tgt_rsp_data[i] = r_read_data[i]; 3219 r_read_to_tgt_rsp_word = m_x[(addr_t) m_cmd_read_addr_fifo.read()]; 3220 r_read_to_tgt_rsp_length = m_cmd_read_length_fifo.read(); 3221 r_read_to_tgt_rsp_srcid = m_cmd_read_srcid_fifo.read(); 3222 3223 if (r_read_coherent.read()) 3224 { 3225 r_read_to_tgt_rsp_pktid = 0x0 + m_cmd_read_pktid_fifo.read(); 3226 } 3227 else 3228 { 3229 r_read_to_tgt_rsp_pktid = 0x8 + m_cmd_read_pktid_fifo.read(); 3230 } 3231 r_read_to_tgt_rsp_trdid = m_cmd_read_trdid_fifo.read(); 3232 r_read_to_tgt_rsp_ll_key = r_read_ll_key.read(); 3233 cmd_read_fifo_get = true; 3234 r_read_to_tgt_rsp_req = true; 3235 r_read_fsm = READ_IDLE; 3180 if (m_debug) 3181 { 3182 std::cout << " <MEMC " << name() << " READ_RSP> Request TGT_RSP FSM to return data:" 3183 << " rsrcid = " << std::hex << m_cmd_read_srcid_fifo.read() 3184 << " / address = " << std::hex << m_cmd_read_addr_fifo.read() 3185 << " / nwords = " << std::dec << m_cmd_read_length_fifo.read() << std::endl; 3186 } 3187 #endif 3188 } 3189 break; 3190 } 3191 /////////////////// 3192 case READ_TRT_LOCK: // read miss : check the Transaction Table 3193 { 3194 if (r_alloc_trt_fsm.read() == ALLOC_TRT_READ) 3195 { 3196 size_t index = 0; 3197 addr_t addr = (addr_t) m_cmd_read_addr_fifo.read(); 3198 bool hit_read = m_trt.hit_read(m_nline[addr], index); 3199 bool hit_write = m_trt.hit_write(m_nline[addr]); 3200 bool wok = not m_trt.full(index); 3201 3202 if (hit_read or !wok or hit_write) // line already requested or no space 3203 { 3204 if (!wok) m_cpt_trt_full++; 3205 if (hit_read or hit_write) m_cpt_trt_rb++; 3206 r_read_fsm = READ_IDLE; 3207 } 3208 else // missing line is requested to the XRAM 3209 { 3210 m_cpt_read_miss++; 3211 r_read_trt_index = index; 3212 r_read_fsm = READ_TRT_SET; 3213 } 3236 3214 3237 3215 #if DEBUG_MEMC_READ 3238 if(m_debug) 3239 std::cout << " <MEMC " << name() << " READ_RSP> Request TGT_RSP FSM to return data:" 3240 << " rsrcid = " << std::hex << m_cmd_read_srcid_fifo.read() 3241 << " / address = " << std::hex << m_cmd_read_addr_fifo.read() 3242 << " / nwords = " << std::dec << m_cmd_read_length_fifo.read() << std::endl; 3243 #endif 3244 } 3245 break; 3246 } 3247 /////////////////// 3248 case READ_TRT_LOCK: // read miss : check the Transaction Table 3249 { 3250 if(r_alloc_trt_fsm.read() == ALLOC_TRT_READ) 3251 { 3252 size_t index = 0; 3253 addr_t addr = (addr_t) m_cmd_read_addr_fifo.read(); 3254 bool hit_read = m_trt.hit_read(m_nline[addr], index); 3255 bool hit_write = m_trt.hit_write(m_nline[addr]); 3256 bool wok = !m_trt.full(index); 3257 3258 if(hit_read or !wok or hit_write) // missing line already requested or no space 3259 { 3260 if(!wok) 3261 { 3262 m_cpt_trt_full++; 3263 } 3264 if(hit_read or hit_write) m_cpt_trt_rb++; 3265 r_read_fsm = READ_IDLE; 3266 } 3267 else // missing line is requested to the XRAM 3268 { 3269 m_cpt_read_miss++; 3270 r_read_trt_index = index; 3271 r_read_fsm = READ_TRT_SET; 3272 } 3273 3216 if (m_debug) 3217 { 3218 std::cout << " <MEMC " << name() << " READ_TRT_LOCK> Check TRT:" 3219 << " hit_read = " << hit_read 3220 << " / hit_write = " << hit_write 3221 << " / full = " << !wok << std::endl; 3222 } 3223 #endif 3224 } 3225 break; 3226 } 3227 ////////////////// 3228 case READ_TRT_SET: // register get transaction in TRT 3229 { 3230 if (r_alloc_trt_fsm.read() == ALLOC_TRT_READ) 3231 { 3232 m_trt.set(r_read_trt_index.read(), 3233 true, // GET 3234 m_nline[(addr_t) (m_cmd_read_addr_fifo.read())], 3235 m_cmd_read_srcid_fifo.read(), 3236 m_cmd_read_trdid_fifo.read(), 3237 m_cmd_read_pktid_fifo.read(), 3238 true, // proc read 3239 m_cmd_read_length_fifo.read(), 3240 m_x[(addr_t) (m_cmd_read_addr_fifo.read())], 3241 std::vector<be_t> (m_words, 0), 3242 std::vector<data_t> (m_words, 0), 3243 r_read_ll_key.read()); 3274 3244 #if DEBUG_MEMC_READ 3275 if(m_debug) 3276 std::cout << " <MEMC " << name() << " READ_TRT_LOCK> Check TRT:" 3277 << " hit_read = " << hit_read 3278 << " / hit_write = " << hit_write 3279 << " / full = " << !wok << std::endl; 3280 #endif 3281 } 3282 break; 3283 } 3284 3285 ////////////////// 3286 case READ_TRT_SET: // register get transaction in TRT 3287 { 3288 if(r_alloc_trt_fsm.read() == ALLOC_TRT_READ) 3289 { 3290 m_trt.set(r_read_trt_index.read(), 3291 true, 3292 m_nline[(addr_t)(m_cmd_read_addr_fifo.read())], 3293 m_cmd_read_srcid_fifo.read(), 3294 m_cmd_read_trdid_fifo.read(), 3295 m_cmd_read_pktid_fifo.read(), 3296 true, 3297 m_cmd_read_length_fifo.read(), 3298 m_x[(addr_t)(m_cmd_read_addr_fifo.read())], 3299 std::vector<be_t> (m_words,0), 3300 std::vector<data_t> (m_words,0), 3301 r_read_ll_key.read()); 3245 if (m_debug) 3246 { 3247 std::cout << " <MEMC " << name() << " READ_TRT_SET> Set a GET in TRT:" 3248 << " address = " << std::hex << m_cmd_read_addr_fifo.read() 3249 << " / srcid = " << std::hex << m_cmd_read_srcid_fifo.read() << std::endl; 3250 } 3251 #endif 3252 r_read_fsm = READ_TRT_REQ; 3253 } 3254 break; 3255 } 3256 3257 ////////////////// 3258 case READ_TRT_REQ: // consume the read request in FIFO and send it to IXR_CMD_FSM 3259 { 3260 if (not r_read_to_ixr_cmd_req) 3261 { 3262 cmd_read_fifo_get = true; 3263 r_read_to_ixr_cmd_req = true; 3264 r_read_to_ixr_cmd_index = r_read_trt_index.read(); 3265 r_read_fsm = READ_IDLE; 3302 3266 3303 3267 #if DEBUG_MEMC_READ 3304 if(m_debug) 3305 std::cout << " <MEMC " << name() << " READ_TRT_SET> Set a GET in TGT:" 3306 << " address = " << std::hex << m_cmd_read_addr_fifo.read() 3307 << " / srcid = " << std::hex << m_cmd_read_srcid_fifo.read() << std::endl; 3308 #endif 3309 r_read_fsm = READ_TRT_REQ; 3310 } 3311 break; 3312 } 3313 3314 ////////////////// 3315 case READ_TRT_REQ: // consume the read request in FIFO and send it to IXR_CMD_FSM 3316 { 3317 if(not r_read_to_ixr_cmd_req) 3318 { 3319 cmd_read_fifo_get = true; 3320 r_read_to_ixr_cmd_req = true; 3321 //r_read_to_ixr_cmd_nline = m_nline[(addr_t)(m_cmd_read_addr_fifo.read())]; 3322 r_read_to_ixr_cmd_index = r_read_trt_index.read(); 3323 r_read_fsm = READ_IDLE; 3324 3325 #if DEBUG_MEMC_READ 3326 if(m_debug) 3327 std::cout << " <MEMC " << name() << " READ_TRT_REQ> Request GET transaction for address " 3328 << std::hex << m_cmd_read_addr_fifo.read() << std::endl; 3329 #endif 3330 } 3331 break; 3332 } 3268 if (m_debug) 3269 { 3270 std::cout << " <MEMC " << name() << " READ_TRT_REQ> Request GET transaction for address " 3271 << std::hex << m_cmd_read_addr_fifo.read() << std::endl; 3272 } 3273 #endif 3274 } 3275 break; 3276 } 3333 3277 } // end switch read_fsm 3334 3278 … … 3366 3310 ///////////////////////////////////////////////////////////////////////////////////// 3367 3311 3368 switch (r_write_fsm.read())3312 switch (r_write_fsm.read()) 3369 3313 { 3370 3314 //////////////// 3371 3315 case WRITE_IDLE: // copy first word of a write burst in local buffer 3372 { 3373 if (not m_cmd_write_addr_fifo.rok()) break; 3374 // consume a word in the FIFO & write it in the local buffer 3375 cmd_write_fifo_get = true; 3376 size_t index = m_x[(addr_t)(m_cmd_write_addr_fifo.read())]; 3377 3378 r_write_address = (addr_t)(m_cmd_write_addr_fifo.read()); 3379 r_write_word_index = index; 3380 r_write_word_count = 0; 3381 r_write_data[index] = m_cmd_write_data_fifo.read(); 3382 r_write_srcid = m_cmd_write_srcid_fifo.read(); 3383 r_write_trdid = m_cmd_write_trdid_fifo.read(); 3384 r_write_pktid = m_cmd_write_pktid_fifo.read(); 3385 3386 // if SC command, get the SC key 3387 if ((m_cmd_write_pktid_fifo.read() & 0x7) == TYPE_SC) 3388 { 3389 assert( not m_cmd_write_eop_fifo.read() && 3390 "MEMC ERROR in WRITE_IDLE state: " 3391 "invalid packet format for SC command"); 3392 3393 r_write_sc_key = m_cmd_write_data_fifo.read(); 3394 } 3395 // initialize the be field for all words 3396 for(size_t word=0 ; word<m_words ; word++) 3397 { 3398 if(word == index) r_write_be[word] = m_cmd_write_be_fifo.read(); 3399 else r_write_be[word] = 0x0; 3400 } 3401 3402 if (m_cmd_write_eop_fifo.read()) 3403 { 3404 r_write_fsm = WRITE_DIR_REQ; 3405 } 3406 else 3407 { 3408 r_write_fsm = WRITE_NEXT; 3409 } 3316 { 3317 if (not m_cmd_write_addr_fifo.rok()) break; 3318 3319 // consume a word in the FIFO & write it in the local buffer 3320 cmd_write_fifo_get = true; 3321 size_t index = m_x[(addr_t) (m_cmd_write_addr_fifo.read())]; 3322 3323 r_write_address = (addr_t) (m_cmd_write_addr_fifo.read()); 3324 r_write_word_index = index; 3325 r_write_word_count = 0; 3326 r_write_data[index] = m_cmd_write_data_fifo.read(); 3327 r_write_srcid = m_cmd_write_srcid_fifo.read(); 3328 r_write_trdid = m_cmd_write_trdid_fifo.read(); 3329 r_write_pktid = m_cmd_write_pktid_fifo.read(); 3330 3331 // if SC command, get the SC key 3332 if ((m_cmd_write_pktid_fifo.read() & 0x7) == TYPE_SC) 3333 { 3334 assert(not m_cmd_write_eop_fifo.read() && 3335 "MEMC ERROR in WRITE_IDLE state: " 3336 "invalid packet format for SC command"); 3337 3338 r_write_sc_key = m_cmd_write_data_fifo.read(); 3339 } 3340 3341 // initialize the be field for all words 3342 for (size_t word = 0; word < m_words; word++) 3343 { 3344 if (word == index) r_write_be[word] = m_cmd_write_be_fifo.read(); 3345 else r_write_be[word] = 0x0; 3346 } 3347 3348 if (m_cmd_write_eop_fifo.read()) 3349 { 3350 r_write_fsm = WRITE_DIR_REQ; 3351 } 3352 else 3353 { 3354 r_write_fsm = WRITE_NEXT; 3355 } 3410 3356 3411 3357 #if DEBUG_MEMC_WRITE 3412 if(m_debug) 3413 std::cout << " <MEMC " << name() << " WRITE_IDLE> Write request " 3414 << " srcid = " << std::hex << m_cmd_write_srcid_fifo.read() 3415 << " / address = " << std::hex << m_cmd_write_addr_fifo.read() 3416 << " / data = " << m_cmd_write_data_fifo.read() 3417 << " / pktid = " << m_cmd_write_pktid_fifo.read() 3418 << std::endl; 3419 #endif 3420 break; 3421 } 3422 3423 //////////////// 3358 if (m_debug) 3359 { 3360 std::cout << " <MEMC " << name() << " WRITE_IDLE> Write request " 3361 << " srcid = " << std::hex << m_cmd_write_srcid_fifo.read() 3362 << " / address = " << std::hex << m_cmd_write_addr_fifo.read() 3363 << " / data = " << m_cmd_write_data_fifo.read() 3364 << " / pktid = " << m_cmd_write_pktid_fifo.read() 3365 << std::dec << std::endl; 3366 } 3367 #endif 3368 break; 3369 } 3370 //////////////// 3424 3371 case WRITE_NEXT: // copy next word of a write burst in local buffer 3425 { 3426 if (not m_cmd_write_addr_fifo.rok()) break; 3427 3428 // check that the next word is in the same cache line 3429 assert((m_nline[(addr_t)(r_write_address.read())] == 3430 m_nline[(addr_t)(m_cmd_write_addr_fifo.read())]) && 3431 "MEMC ERROR in WRITE_NEXT state: Illegal write burst"); 3432 3433 size_t index = m_x[(addr_t)(m_cmd_write_addr_fifo.read())]; 3434 bool is_sc = ((m_cmd_write_pktid_fifo.read() & 0x7) == TYPE_SC); 3435 3436 // check that SC command has constant address 3437 assert((not is_sc or (index == r_write_word_index)) && 3438 "MEMC ERROR in WRITE_NEXT state: " 3439 "the address must be constant on a SC command"); 3440 3441 // check that SC command has two flits 3442 assert((not is_sc or m_cmd_write_eop_fifo.read()) && 3443 "MEMC ERROR in WRITE_NEXT state: " 3444 "invalid packet format for SC command"); 3445 // consume a word in the FIFO & write it in the local buffer 3446 cmd_write_fifo_get = true; 3447 3448 r_write_be[index] = m_cmd_write_be_fifo.read(); 3449 r_write_data[index] = m_cmd_write_data_fifo.read(); 3450 3451 // the first flit of a SC command is the reservation key and 3452 // therefore it must not be counted as a data to write 3453 if (not is_sc) 3454 { 3455 r_write_word_count = r_write_word_count.read() + 1; 3456 } 3457 3458 if (m_cmd_write_eop_fifo.read()) r_write_fsm = WRITE_DIR_REQ; 3372 { 3373 if (not m_cmd_write_addr_fifo.rok()) break; 3374 3375 // check that the next word is in the same cache line 3376 assert((m_nline[(addr_t)(r_write_address.read())] == 3377 m_nline[(addr_t)(m_cmd_write_addr_fifo.read())]) && 3378 "MEMC ERROR in WRITE_NEXT state: Illegal write burst"); 3379 3380 size_t index = m_x[(addr_t)(m_cmd_write_addr_fifo.read())]; 3381 bool is_sc = ((m_cmd_write_pktid_fifo.read() & 0x7) == TYPE_SC); 3382 3383 // check that SC command has constant address 3384 assert((not is_sc or (index == r_write_word_index)) && 3385 "MEMC ERROR in WRITE_NEXT state: " 3386 "the address must be constant on a SC command"); 3387 3388 // check that SC command has two flits 3389 assert((not is_sc or m_cmd_write_eop_fifo.read()) && 3390 "MEMC ERROR in WRITE_NEXT state: " 3391 "invalid packet format for SC command"); 3392 3393 // consume a word in the FIFO & write it in the local buffer 3394 cmd_write_fifo_get = true; 3395 3396 r_write_be[index] = m_cmd_write_be_fifo.read(); 3397 r_write_data[index] = m_cmd_write_data_fifo.read(); 3398 3399 // the first flit of a SC command is the reservation key and 3400 // therefore it must not be counted as a data to write 3401 if (not is_sc) 3402 { 3403 r_write_word_count = r_write_word_count.read() + 1; 3404 } 3405 3406 if (m_cmd_write_eop_fifo.read()) 3407 { 3408 r_write_fsm = WRITE_DIR_REQ; 3409 } 3459 3410 3460 3411 #if DEBUG_MEMC_WRITE 3461 3462 std::cout << " <MEMC " << name()3463 << " WRITE_NEXT> Write another word in local buffer"3464 << std::endl;3465 #endif 3466 break;3467 } 3468 3469 ////////////////////3470 case WRITE_DIR_REQ:3471 {3472 // Get the lock to the directory3473 // and access the llsc_global_table3474 if (r_alloc_dir_fsm.read() != ALLOC_DIR_WRITE) break;3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 r_write_sc_fail= not sc_success;3485 3486 3487 3488 3489 3490 3491 3412 if (m_debug) 3413 { 3414 std::cout << " <MEMC " << name() 3415 << " WRITE_NEXT> Write another word in local buffer" 3416 << std::endl; 3417 } 3418 #endif 3419 break; 3420 } 3421 /////////////////// 3422 case WRITE_DIR_REQ: // Get the lock to the directory 3423 // and access the llsc_global_table 3424 { 3425 if (r_alloc_dir_fsm.read() != ALLOC_DIR_WRITE) break; 3426 3427 if ((r_write_pktid.read() & 0x7) == TYPE_SC) 3428 { 3429 // test address and key match of the SC command on the 3430 // LL/SC table without removing reservation. The reservation 3431 // will be erased after in this FSM. 3432 bool sc_success = m_llsc_table.check(r_write_address.read(), 3433 r_write_sc_key.read()); 3434 3435 r_write_sc_fail = not sc_success; 3436 3437 if (not sc_success) r_write_fsm = WRITE_RSP; 3438 else r_write_fsm = WRITE_DIR_LOCK; 3439 } 3440 else 3441 { 3442 // write burst 3492 3443 #define L2 soclib::common::uint32_log2 3493 3494 3495 3444 addr_t min = r_write_address.read(); 3445 addr_t max = r_write_address.read() + 3446 (r_write_word_count.read() << L2(vci_param_int::B)); 3496 3447 #undef L2 3497 3448 3498 3499 3500 3501 3449 m_llsc_table.sw(min, max); 3450 3451 r_write_fsm = WRITE_DIR_LOCK; 3452 } 3502 3453 3503 3454 #if DEBUG_MEMC_WRITE 3504 if(m_debug) 3505 std::cout << " <MEMC " << name() << " WRITE_DIR_REQ> Requesting DIR lock " 3506 << std::endl; 3507 #endif 3508 break; 3509 } 3510 3511 //////////////////// 3455 if (m_debug) 3456 { 3457 std::cout << " <MEMC " << name() << " WRITE_DIR_REQ> Requesting DIR lock " 3458 << std::endl; 3459 } 3460 #endif 3461 break; 3462 } 3463 //////////////////// 3512 3464 case WRITE_DIR_LOCK: // access directory to check hit/miss 3513 { 3514 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE) and 3515 "MEMC ERROR in ALLOC_DIR_LOCK state: Bad DIR allocation"); 3516 size_t way = 0; 3517 DirectoryEntry entry(m_cache_directory.read(r_write_address.read(), way)); 3518 3519 if(entry.valid) // hit 3520 { 3521 // copy directory entry in local buffer in case of hit 3522 r_write_is_cnt = entry.is_cnt; 3523 r_write_lock = entry.lock; 3524 r_write_tag = entry.tag; 3525 r_write_copy = entry.owner.srcid; 3526 r_write_copy_inst = entry.owner.inst; 3527 r_write_count = entry.count; 3528 r_write_ptr = entry.ptr; 3529 r_write_way = way; 3530 3531 r_write_coherent = entry.cache_coherent; 3532 3533 if((entry.cache_coherent == false) and (entry.count != 0)) 3465 { 3466 assert((r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE) and 3467 "MEMC ERROR in ALLOC_DIR_LOCK state: Bad DIR allocation"); 3468 3469 size_t way = 0; 3470 DirectoryEntry entry(m_cache_directory.read(r_write_address.read(), way)); 3471 3472 if (entry.valid) // hit 3473 { 3474 // copy directory entry in local buffer in case of hit 3475 r_write_is_cnt = entry.is_cnt; 3476 r_write_lock = entry.lock; 3477 r_write_tag = entry.tag; 3478 r_write_copy = entry.owner.srcid; 3479 r_write_copy_inst = entry.owner.inst; 3480 r_write_count = entry.count; 3481 r_write_ptr = entry.ptr; 3482 r_write_way = way; 3483 r_write_coherent = entry.cache_coherent; 3484 3485 if ((entry.cache_coherent == false) and (entry.count != 0)) 3486 { 3487 m_cpt_write_ncc_miss++; 3488 } 3489 3490 if (entry.cache_coherent or (entry.owner.srcid == r_write_srcid.read()) or (entry.count == 0)) // hit WT 3491 { 3492 if (entry.is_cnt and entry.count) 3534 3493 { 3535 m_cpt_write_ncc_miss++; 3536 } 3537 3538 if (entry.cache_coherent or (entry.owner.srcid == r_write_srcid.read()) or (entry.count == 0)) // hit WT 3539 { 3540 if(entry.is_cnt && entry.count) 3541 { 3542 r_write_fsm = WRITE_BC_DIR_READ; 3543 } 3544 else 3545 { 3546 r_write_fsm = WRITE_DIR_HIT; 3547 } 3494 r_write_fsm = WRITE_BC_DIR_READ; 3548 3495 } 3549 3496 else 3550 3497 { 3551 if (r_write_to_cleanup_req.read())//inval already sent 3552 { 3553 r_write_fsm = WRITE_WAIT; 3554 } 3555 else // hit on a NCC line with a different owner 3556 { 3557 r_write_fsm = WRITE_IVT_LOCK_HIT_WB; 3558 } 3498 r_write_fsm = WRITE_DIR_HIT; 3559 3499 } 3560 3500 } 3561 else // miss 3562 { 3563 r_write_fsm = WRITE_MISS_IVT_LOCK; 3564 } 3501 else 3502 { 3503 if (r_write_to_cleanup_req.read()) //inval already sent 3504 { 3505 r_write_fsm = WRITE_WAIT; 3506 } 3507 else // hit on a NCC line with a different owner 3508 { 3509 r_write_fsm = WRITE_IVT_LOCK_HIT_WB; 3510 } 3511 } 3512 } 3513 else // miss 3514 { 3515 r_write_fsm = WRITE_MISS_IVT_LOCK; 3516 } 3565 3517 3566 3518 #if DEBUG_MEMC_WRITE 3567 if(m_debug) 3568 { 3569 std::cout << " <MEMC " << name() << " WRITE_DIR_LOCK> Check the directory: " 3570 << " address = " << std::hex << r_write_address.read() 3571 << " / hit = " << std::dec << entry.valid 3572 << " / count = " << entry.count 3573 << " / is_cnt = " << entry.is_cnt ; 3574 if((r_write_pktid.read() & 0x7) == TYPE_SC) 3575 std::cout << " / SC access" << std::endl; 3576 else 3577 std::cout << " / SW access" << std::endl; 3519 if (m_debug) 3520 { 3521 std::cout << " <MEMC " << name() << " WRITE_DIR_LOCK> Check the directory: " 3522 << " address = " << std::hex << r_write_address.read() 3523 << " / hit = " << std::dec << entry.valid 3524 << " / count = " << entry.count 3525 << " / is_cnt = " << entry.is_cnt ; 3526 if ((r_write_pktid.read() & 0x7) == TYPE_SC) 3527 std::cout << " / SC access" << std::endl; 3528 else 3529 std::cout << " / SW access" << std::endl; 3530 } 3531 #endif 3532 break; 3533 } 3534 //////////////////// 3535 case WRITE_IVT_LOCK_HIT_WB: 3536 { 3537 if (r_alloc_ivt_fsm.read() == ALLOC_IVT_WRITE) 3538 { 3539 3540 size_t index = 0; 3541 bool match_inval; 3542 addr_t nline = m_nline[(addr_t) (r_write_address.read())]; 3543 3544 // if there is a matched updt req, we should wait until it is over. 3545 // Because we need the lastest updt data. 3546 match_inval = m_ivt.search_inval(nline, index); 3547 3548 assert((r_write_count.read() == 1) and "NCC to CC req without copy"); 3549 3550 if (not match_inval and 3551 not m_ivt.is_full() and 3552 not r_write_to_cc_send_req.read() and 3553 not r_write_to_cc_send_multi_req.read() and 3554 not r_write_to_cc_send_brdcast_req.read()) 3555 { 3556 r_write_to_cc_send_req = true; 3557 r_write_to_cc_send_dest = r_write_copy; 3558 r_write_to_cc_send_nline = nline; 3559 r_write_to_cleanup_req = true; 3560 r_write_to_cleanup_nline = nline; 3561 3562 m_ivt.set(false, // it's an inval transaction 3563 false, // it's not a broadcast 3564 true, // it needs no read response 3565 false, // no acknowledge required 3566 m_cmd_write_srcid_fifo.read(), // never read, used for debug 3567 m_cmd_write_trdid_fifo.read(), // never read, used for debug 3568 m_cmd_write_pktid_fifo.read(), // never read, used for debug 3569 nline, 3570 0x1, //Expect only one answer 3571 index); 3572 } 3573 r_write_fsm = WRITE_WAIT; 3574 #if DEBUG_MEMC_WRITE 3575 if (m_debug) 3576 { 3577 std::cout << " <MEMC " << name() << " WRITE_IVT_LOCK_HIT_WB> get access to the IVT: " 3578 << " Inval requested = " << (not match_inval and not r_write_to_cc_send_req.read()) 3579 << std::endl; 3578 3580 } 3579 3581 #endif 3580 3582 break; 3581 3583 } 3582 //////////////////// 3583 case WRITE_IVT_LOCK_HIT_WB: 3584 { 3585 if(r_alloc_ivt_fsm.read() == ALLOC_IVT_WRITE) 3586 { 3587 3588 size_t index = 0; 3589 bool match_inval; 3590 addr_t nline = m_nline[(addr_t)(r_write_address.read())]; 3591 3592 //if there is a matched updt req, we should wait until it is over. Because 3593 //we need the lastest updt data. 3594 match_inval = m_ivt.search_inval(nline, index); 3595 3596 assert ((r_write_count.read() == 1) and "NCC to CC req without copy"); 3597 3598 if( not match_inval and 3599 not m_ivt.is_full() and 3600 not r_write_to_cc_send_req.read() and 3601 not r_write_to_cc_send_multi_req.read() and 3602 not r_write_to_cc_send_brdcast_req.read() ) 3603 { 3604 r_write_to_cc_send_req = true; 3605 r_write_to_cc_send_dest = r_write_copy; 3606 r_write_to_cc_send_nline = nline; 3607 r_write_to_cleanup_req = true; 3608 r_write_to_cleanup_nline = nline; 3609 3610 m_ivt.set(false, // it's an inval transaction 3611 false, // it's not a broadcast 3612 true, // it needs no read response 3613 false, // no acknowledge required 3614 m_cmd_write_srcid_fifo.read(), //never read, used for debug 3615 m_cmd_write_trdid_fifo.read(), //never read, used for debug 3616 m_cmd_write_pktid_fifo.read(), //never read, used for debug 3584 #if DEBUG_MEMC_WRITE 3585 if (m_debug) 3586 { 3587 std::cout << " <MEMC " << name() << " WRITE_IVT_LOCK_HIT_WB> failed to access to the IVT: " 3588 << std::endl; 3589 } 3590 #endif 3591 break; 3592 } 3593 /////////////////// 3594 case WRITE_DIR_HIT: // update the cache directory with Dirty bit 3595 // and update data cache 3596 { 3597 assert((r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE) and 3598 "MEMC ERROR in ALLOC_DIR_HIT state: Bad DIR allocation"); 3599 3600 DirectoryEntry entry; 3601 entry.valid = true; 3602 entry.dirty = true; 3603 entry.tag = r_write_tag.read(); 3604 entry.is_cnt = r_write_is_cnt.read(); 3605 entry.lock = r_write_lock.read(); 3606 entry.owner.srcid = r_write_copy.read(); 3607 entry.owner.inst = r_write_copy_inst.read(); 3608 entry.count = r_write_count.read(); 3609 entry.ptr = r_write_ptr.read(); 3610 entry.cache_coherent = r_write_coherent.read(); 3611 3612 size_t set = m_y[(addr_t) (r_write_address.read())]; 3613 size_t way = r_write_way.read(); 3614 3615 // update directory 3616 m_cache_directory.write(set, way, entry); 3617 3618 // owner is true when the the first registered copy is the writer itself 3619 bool owner = ((r_write_copy.read() == r_write_srcid.read()) 3620 and not r_write_copy_inst.read()); 3621 3622 // no_update is true when there is no need for coherence transaction 3623 bool no_update = ((r_write_count.read() == 0) or 3624 (owner and (r_write_count.read() == 1) and 3625 ((r_write_pktid.read() & 0x7) != TYPE_SC))); 3626 3627 // write data in the cache if no coherence transaction 3628 if (no_update) 3629 { 3630 // SC command but zero copies 3631 if ((r_write_pktid.read() & 0x7) == TYPE_SC) 3632 { 3633 m_llsc_table.sc(r_write_address.read(), 3634 r_write_sc_key.read()); 3635 } 3636 3637 for (size_t word = 0; word < m_words; word++) 3638 { 3639 m_cache_data.write(way, 3640 set, 3641 word, 3642 r_write_data[word].read(), 3643 r_write_be[word].read()); 3644 } 3645 } 3646 3647 if (owner and not no_update and ((r_write_pktid.read() & 0x7) != TYPE_SC)) 3648 { 3649 r_write_count = r_write_count.read() - 1; 3650 } 3651 3652 if (no_update) // Write transaction completed 3653 { 3654 r_write_fsm = WRITE_RSP; 3655 } 3656 else // coherence update required 3657 { 3658 if (not r_write_to_cc_send_multi_req.read() and 3659 not r_write_to_cc_send_brdcast_req.read() and 3660 not r_write_to_cc_send_req.read()) 3661 { 3662 r_write_fsm = WRITE_UPT_LOCK; 3663 } 3664 else 3665 { 3666 r_write_fsm = WRITE_WAIT; 3667 } 3668 } 3669 3670 #if DEBUG_MEMC_WRITE 3671 if (m_debug) 3672 { 3673 if (no_update) 3674 { 3675 std::cout << " <MEMC " << name() 3676 << " WRITE_DIR_HIT> Write into cache / No coherence transaction" 3677 << std::endl; 3678 } 3679 else 3680 { 3681 std::cout << " <MEMC " << name() << " WRITE_DIR_HIT> Coherence update required:" 3682 << " is_cnt = " << r_write_is_cnt.read() 3683 << " nb_copies = " << std::dec << r_write_count.read() << std::endl; 3684 if (owner) std::cout << " ... but the first copy is the writer" << std::endl; 3685 } 3686 } 3687 #endif 3688 break; 3689 } 3690 //////////////////// 3691 case WRITE_UPT_LOCK: // Try to register the update request in UPT 3692 { 3693 if (r_alloc_upt_fsm.read() == ALLOC_UPT_WRITE) 3694 { 3695 bool wok = false; 3696 size_t index = 0; 3697 size_t srcid = r_write_srcid.read(); 3698 size_t trdid = r_write_trdid.read(); 3699 size_t pktid = r_write_pktid.read(); 3700 addr_t nline = m_nline[(addr_t) (r_write_address.read())]; 3701 size_t nb_copies = r_write_count.read(); 3702 size_t set = m_y[(addr_t) (r_write_address.read())]; 3703 size_t way = r_write_way.read(); 3704 3705 wok = m_upt.set(true, // it's an update transaction 3706 false, // it's not a broadcast 3707 true, // response required 3708 false, // no acknowledge required 3709 srcid, 3710 trdid, 3711 pktid, 3617 3712 nline, 3618 0x1, //Expect only one answer3713 nb_copies, 3619 3714 index); 3620 } 3621 r_write_fsm = WRITE_WAIT; 3622 #if DEBUG_MEMC_WRITE 3623 if(m_debug) 3624 { 3625 std::cout << " <MEMC " << name() << " WRITE_IVT_LOCK_HIT_WB> get access to the IVT: " 3626 << " Inval requested = " << (not match_inval and not r_write_to_cc_send_req.read()) 3627 << std::endl; 3628 } 3629 #endif 3630 break; 3631 } 3632 #if DEBUG_MEMC_WRITE 3633 if(m_debug) 3634 { 3635 std::cout << " <MEMC " << name() << " WRITE_IVT_LOCK_HIT_WB> failed to access to the IVT: " 3636 << std::endl; 3637 } 3638 #endif 3639 break; 3640 } 3641 3642 /////////////////// 3643 case WRITE_DIR_HIT: 3644 { 3645 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE) and 3646 "MEMC ERROR in ALLOC_DIR_HIT state: Bad DIR allocation"); 3647 3648 // update the cache directory 3649 // update directory with Dirty bit 3650 DirectoryEntry entry; 3651 entry.valid = true; 3652 entry.cache_coherent = r_write_coherent.read(); 3653 entry.dirty = true; 3654 entry.tag = r_write_tag.read(); 3655 entry.is_cnt = r_write_is_cnt.read(); 3656 entry.lock = r_write_lock.read(); 3657 entry.owner.srcid = r_write_copy.read(); 3658 entry.owner.inst = r_write_copy_inst.read(); 3659 entry.count = r_write_count.read(); 3660 entry.ptr = r_write_ptr.read(); 3661 3662 size_t set = m_y[(addr_t)(r_write_address.read())]; 3663 size_t way = r_write_way.read(); 3664 3665 // update directory 3666 m_cache_directory.write(set, way, entry); 3667 3668 // owner is true when the the first registered copy is the writer itself 3669 bool owner = (((r_write_copy.read() == r_write_srcid.read()) 3670 ) and not r_write_copy_inst.read()); 3671 3672 // no_update is true when there is no need for coherence transaction 3673 bool no_update = ( (r_write_count.read() == 0) or 3674 (owner and (r_write_count.read() == 1) and 3675 ((r_write_pktid.read() & 0x7) != TYPE_SC))); 3676 3677 // write data in the cache if no coherence transaction 3678 if(no_update) 3679 { 3680 // SC command but zero copies 3715 3716 if (wok) // write data in cache 3717 { 3681 3718 if ((r_write_pktid.read() & 0x7) == TYPE_SC) 3682 3719 { … … 3685 3722 } 3686 3723 3687 for (size_t word=0 ; word<m_words; word++)3724 for (size_t word = 0; word < m_words; word++) 3688 3725 { 3689 m_cache_data.write(way, set, word, r_write_data[word].read(), r_write_be[word].read()); 3690 3726 m_cache_data.write(way, 3727 set, 3728 word, 3729 r_write_data[word].read(), 3730 r_write_be[word].read()); 3691 3731 } 3692 3732 } 3693 3733 3694 if (owner and not no_update and ((r_write_pktid.read() & 0x7) != TYPE_SC)) 3695 { 3696 r_write_count = r_write_count.read() - 1; 3697 } 3698 3699 if(no_update) 3700 // Write transaction completed 3701 { 3702 r_write_fsm = WRITE_RSP; 3734 #if DEBUG_MEMC_WRITE 3735 if (m_debug and wok) 3736 { 3737 if (wok) 3738 { 3739 std::cout << " <MEMC " << name() 3740 << " WRITE_UPT_LOCK> Register the multicast update in UPT / " 3741 << " nb_copies = " << r_write_count.read() << std::endl; 3742 } 3743 } 3744 #endif 3745 r_write_upt_index = index; 3746 // releases the lock protecting UPT and the DIR if no entry... 3747 if (wok) r_write_fsm = WRITE_UPT_HEAP_LOCK; 3748 else r_write_fsm = WRITE_WAIT; 3749 } 3750 break; 3751 } 3752 3753 ///////////////////////// 3754 case WRITE_UPT_HEAP_LOCK: // get access to heap 3755 { 3756 if (r_alloc_heap_fsm.read() == ALLOC_HEAP_WRITE) 3757 { 3758 3759 #if DEBUG_MEMC_WRITE 3760 if (m_debug) 3761 { 3762 std::cout << " <MEMC " << name() 3763 << " WRITE_UPT_HEAP_LOCK> Get acces to the HEAP" << std::endl; 3764 } 3765 #endif 3766 r_write_fsm = WRITE_UPT_REQ; 3767 } 3768 break; 3769 } 3770 3771 ////////////////// 3772 case WRITE_UPT_REQ: // prepare the coherence transaction for the CC_SEND FSM 3773 // and write the first copy in the FIFO 3774 // send the request if only one copy 3775 { 3776 assert(not r_write_to_cc_send_multi_req.read() and 3777 not r_write_to_cc_send_brdcast_req.read() and 3778 not r_write_to_cc_send_req.read() and 3779 "Error in VCI_MEM_CACHE : pending multicast or broadcast\n" 3780 "transaction in WRITE_UPT_REQ state"); 3781 3782 r_write_to_cc_send_brdcast_req = false; 3783 r_write_to_cc_send_trdid = r_write_upt_index.read(); 3784 r_write_to_cc_send_nline = m_nline[(addr_t)(r_write_address.read())]; 3785 r_write_to_cc_send_index = r_write_word_index.read(); 3786 r_write_to_cc_send_count = r_write_word_count.read(); 3787 3788 for (size_t i = 0; i < m_words; i++) 3789 { 3790 r_write_to_cc_send_be[i] = r_write_be[i].read(); 3791 } 3792 3793 size_t min = r_write_word_index.read(); 3794 size_t max = r_write_word_index.read() + r_write_word_count.read(); 3795 for (size_t i = min; i <= max; i++) 3796 { 3797 r_write_to_cc_send_data[i] = r_write_data[i]; 3798 } 3799 3800 if ((r_write_copy.read() != r_write_srcid.read()) or 3801 ((r_write_pktid.read() & 0x7) == TYPE_SC) or 3802 r_write_copy_inst.read()) 3803 { 3804 // put the first srcid in the fifo 3805 write_to_cc_send_fifo_put = true; 3806 write_to_cc_send_fifo_inst = r_write_copy_inst.read(); 3807 write_to_cc_send_fifo_srcid = r_write_copy.read(); 3808 if (r_write_count.read() == 1) 3809 { 3810 r_write_fsm = WRITE_IDLE; 3811 r_write_to_cc_send_multi_req = true; 3703 3812 } 3704 3813 else 3705 // coherence update required 3706 { 3707 if( not r_write_to_cc_send_multi_req.read() and 3708 not r_write_to_cc_send_brdcast_req.read() and 3709 not r_write_to_cc_send_req.read() ) 3814 { 3815 r_write_fsm = WRITE_UPT_NEXT; 3816 r_write_to_dec = false; 3817 3818 } 3819 } 3820 else 3821 { 3822 r_write_fsm = WRITE_UPT_NEXT; 3823 r_write_to_dec = false; 3824 } 3825 3826 #if DEBUG_MEMC_WRITE 3827 if (m_debug) 3828 { 3829 std::cout 3830 << " <MEMC " << name() 3831 << " WRITE_UPT_REQ> Post first request to CC_SEND FSM" 3832 << " / srcid = " << std::dec << r_write_copy.read() 3833 << " / inst = " << std::dec << r_write_copy_inst.read() << std::endl; 3834 3835 if (r_write_count.read() == 1) 3836 std::cout << " ... and this is the last" << std::endl; 3837 } 3838 #endif 3839 break; 3840 } 3841 3842 /////////////////// 3843 case WRITE_UPT_NEXT: 3844 { 3845 // continue the multi-update request to CC_SEND fsm 3846 // when there is copies in the heap. 3847 // if one copy in the heap is the writer itself 3848 // the corresponding SRCID should not be written in the fifo, 3849 // but the UPT counter must be decremented. 3850 // As this decrement is done in the WRITE_UPT_DEC state, 3851 // after the last copy has been found, the decrement request 3852 // must be registered in the r_write_to_dec flip-flop. 3853 3854 HeapEntry entry = m_heap.read(r_write_ptr.read()); 3855 3856 bool dec_upt_counter; 3857 3858 // put the next srcid in the fifo 3859 if ((entry.owner.srcid != r_write_srcid.read()) or 3860 ((r_write_pktid.read() & 0x7) == TYPE_SC) or 3861 entry.owner.inst) 3862 { 3863 dec_upt_counter = false; 3864 write_to_cc_send_fifo_put = true; 3865 write_to_cc_send_fifo_inst = entry.owner.inst; 3866 write_to_cc_send_fifo_srcid = entry.owner.srcid; 3867 3868 #if DEBUG_MEMC_WRITE 3869 if (m_debug) 3870 { 3871 std::cout << " <MEMC " << name() << " WRITE_UPT_NEXT> Post another request to CC_SEND FSM" 3872 << " / heap_index = " << std::dec << r_write_ptr.read() 3873 << " / srcid = " << std::dec << r_write_copy.read() 3874 << " / inst = " << std::dec << r_write_copy_inst.read() << std::endl; 3875 if (entry.next == r_write_ptr.read()) 3876 std::cout << " ... and this is the last" << std::endl; 3877 } 3878 #endif 3879 } 3880 else // the UPT counter must be decremented 3881 { 3882 dec_upt_counter = true; 3883 #if DEBUG_MEMC_WRITE 3884 if (m_debug) 3885 { 3886 std::cout << " <MEMC " << name() << " WRITE_UPT_NEXT> Skip one entry in heap matching the writer" 3887 << " / heap_index = " << std::dec << r_write_ptr.read() 3888 << " / srcid = " << std::dec << r_write_copy.read() 3889 << " / inst = " << std::dec << r_write_copy_inst.read() << std::endl; 3890 if (entry.next == r_write_ptr.read()) 3891 std::cout << " ... and this is the last" << std::endl; 3892 } 3893 #endif 3894 } 3895 3896 // register the possible UPT decrement request 3897 r_write_to_dec = dec_upt_counter or r_write_to_dec.read(); 3898 3899 if (not m_write_to_cc_send_inst_fifo.wok()) 3900 { 3901 std::cout << "*** VCI_MEM_CACHE ERROR " << name() << " WRITE_UPT_NEXT state" << std::endl 3902 << "The write_to_cc_send_fifo should not be full" << std::endl 3903 << "as the depth should be larger than the max number of copies" << std::endl; 3904 exit(0); 3905 } 3906 3907 r_write_ptr = entry.next; 3908 3909 if (entry.next == r_write_ptr.read()) // last copy 3910 { 3911 r_write_to_cc_send_multi_req = true; 3912 if (r_write_to_dec.read() or dec_upt_counter) r_write_fsm = WRITE_UPT_DEC; 3913 else r_write_fsm = WRITE_IDLE; 3914 } 3915 break; 3916 } 3917 3918 ////////////////// 3919 case WRITE_UPT_DEC: 3920 { 3921 // If the initial writer has a copy, it should not 3922 // receive an update request, but the counter in the 3923 // update table must be decremented by the MULTI_ACK FSM. 3924 3925 if (!r_write_to_multi_ack_req.read()) 3926 { 3927 r_write_to_multi_ack_req = true; 3928 r_write_to_multi_ack_upt_index = r_write_upt_index.read(); 3929 r_write_fsm = WRITE_IDLE; 3930 } 3931 break; 3932 } 3933 3934 /////////////// 3935 case WRITE_RSP: // Post a request to TGT_RSP FSM to acknowledge the write 3936 // In order to increase the Write requests throughput, 3937 // we don't wait to return in the IDLE state to consume 3938 // a new request in the write FIFO 3939 { 3940 if (not r_write_to_tgt_rsp_req.read()) 3941 { 3942 // post the request to TGT_RSP_FSM 3943 r_write_to_tgt_rsp_req = true; 3944 r_write_to_tgt_rsp_srcid = r_write_srcid.read(); 3945 r_write_to_tgt_rsp_trdid = r_write_trdid.read(); 3946 r_write_to_tgt_rsp_pktid = r_write_pktid.read(); 3947 r_write_to_tgt_rsp_sc_fail = r_write_sc_fail.read(); 3948 3949 // try to get a new write request from the FIFO 3950 if (not m_cmd_write_addr_fifo.rok()) 3951 { 3952 r_write_fsm = WRITE_IDLE; 3953 } 3954 else 3955 { 3956 // consume a word in the FIFO & write it in the local buffer 3957 cmd_write_fifo_get = true; 3958 size_t index = m_x[(addr_t) (m_cmd_write_addr_fifo.read())]; 3959 3960 r_write_address = (addr_t) (m_cmd_write_addr_fifo.read()); 3961 r_write_word_index = index; 3962 r_write_word_count = 0; 3963 r_write_data[index] = m_cmd_write_data_fifo.read(); 3964 r_write_srcid = m_cmd_write_srcid_fifo.read(); 3965 r_write_trdid = m_cmd_write_trdid_fifo.read(); 3966 r_write_pktid = m_cmd_write_pktid_fifo.read(); 3967 3968 // if SC command, get the SC key 3969 if ((m_cmd_write_pktid_fifo.read() & 0x7) == TYPE_SC) 3710 3970 { 3711 r_write_fsm = WRITE_UPT_LOCK; 3971 assert(not m_cmd_write_eop_fifo.read() && 3972 "MEMC ERROR in WRITE_RSP state: " 3973 "invalid packet format for SC command"); 3974 3975 r_write_sc_key = m_cmd_write_data_fifo.read(); 3976 } 3977 3978 // initialize the be field for all words 3979 for (size_t word = 0; word < m_words; word++) 3980 { 3981 if (word == index) r_write_be[word] = m_cmd_write_be_fifo.read(); 3982 else r_write_be[word] = 0x0; 3983 } 3984 3985 if (m_cmd_write_eop_fifo.read()) 3986 { 3987 r_write_fsm = WRITE_DIR_REQ; 3712 3988 } 3713 3989 else 3714 3990 { 3715 r_write_fsm = WRITE_ WAIT;3991 r_write_fsm = WRITE_NEXT; 3716 3992 } 3717 3993 } 3718 3994 3719 3995 #if DEBUG_MEMC_WRITE 3720 if(m_debug) 3721 { 3722 if(no_update) 3996 if (m_debug) 3997 { 3998 std::cout << " <MEMC " << name() << " WRITE_RSP> Post a request to TGT_RSP FSM" 3999 << " : rsrcid = " << std::hex << r_write_srcid.read() 4000 << " : rpktid = " << std::hex << r_write_pktid.read() 4001 << " : sc_fail= " << std::hex << r_write_sc_fail.read() 4002 << std::endl; 4003 if (m_cmd_write_addr_fifo.rok()) 3723 4004 { 3724 std::cout << " <MEMC " << name() 3725 << " WRITE_DIR_HIT> Write into cache / No coherence transaction" 4005 std::cout << " New Write request: " 4006 << " srcid = " << std::hex << m_cmd_write_srcid_fifo.read() 4007 << " / address = " << m_cmd_write_addr_fifo.read() 4008 << " / data = " << m_cmd_write_data_fifo.read() 4009 << " / pktid = " << m_cmd_write_pktid_fifo.read() 3726 4010 << std::endl; 3727 4011 } 3728 else 3729 { 3730 std::cout << " <MEMC " << name() << " WRITE_DIR_HIT> Coherence update required:" 3731 << " is_cnt = " << r_write_is_cnt.read() 3732 << " nb_copies = " << std::dec << r_write_count.read() << std::endl; 3733 if(owner) std::cout << " ... but the first copy is the writer" << std::endl; 3734 } 3735 } 3736 #endif 3737 break; 3738 } 3739 //////////////////// 3740 case WRITE_UPT_LOCK: // Try to register the update request in UPT 3741 { 3742 if(r_alloc_upt_fsm.read() == ALLOC_UPT_WRITE) 3743 { 3744 bool wok = false; 3745 size_t index = 0; 3746 size_t srcid = r_write_srcid.read(); 3747 size_t trdid = r_write_trdid.read(); 3748 size_t pktid = r_write_pktid.read(); 3749 addr_t nline = m_nline[(addr_t)(r_write_address.read())]; 3750 size_t nb_copies = r_write_count.read(); 3751 size_t set = m_y[(addr_t)(r_write_address.read())]; 3752 size_t way = r_write_way.read(); 3753 3754 3755 wok = m_upt.set(true, // it's an update transaction 3756 false, // it's not a broadcast 3757 true, // response required 3758 false, // no acknowledge required 3759 srcid, 3760 trdid, 3761 pktid, 3762 nline, 3763 nb_copies, 3764 index); 3765 if(wok) // write data in cache 3766 { 3767 3768 if ((r_write_pktid.read() & 0x7) == TYPE_SC) 3769 { 3770 m_llsc_table.sc(r_write_address.read(), 3771 r_write_sc_key.read()); 3772 } 3773 3774 for(size_t word=0 ; word<m_words ; word++) 3775 { 3776 m_cache_data.write(way, 3777 set, 3778 word, 3779 r_write_data[word].read(), 3780 r_write_be[word].read()); 3781 3782 } 3783 } 3784 3785 #if DEBUG_MEMC_WRITE 3786 if(m_debug and wok) 3787 { 3788 if(wok) 3789 { 3790 std::cout << " <MEMC " << name() 3791 << " WRITE_UPT_LOCK> Register the multicast update in UPT / " 3792 << " nb_copies = " << r_write_count.read() << std::endl; 3793 } 3794 } 3795 #endif 3796 r_write_upt_index = index; 3797 // releases the lock protecting UPT and the DIR if no entry... 3798 if(wok) r_write_fsm = WRITE_UPT_HEAP_LOCK; 3799 else r_write_fsm = WRITE_WAIT; 3800 } 3801 break; 3802 } 3803 3804 ///////////////////////// 3805 case WRITE_UPT_HEAP_LOCK: // get access to heap 3806 { 3807 if(r_alloc_heap_fsm.read() == ALLOC_HEAP_WRITE) 3808 { 3809 3810 #if DEBUG_MEMC_WRITE 3811 if(m_debug) 3812 std::cout << " <MEMC " << name() 3813 << " WRITE_UPT_HEAP_LOCK> Get acces to the HEAP" << std::endl; 3814 #endif 3815 r_write_fsm = WRITE_UPT_REQ; 3816 } 3817 break; 3818 } 3819 3820 ////////////////// 3821 case WRITE_UPT_REQ: // prepare the coherence transaction for the CC_SEND FSM 3822 // and write the first copy in the FIFO 3823 // send the request if only one copy 3824 { 3825 assert( not r_write_to_cc_send_multi_req.read() and 3826 not r_write_to_cc_send_brdcast_req.read() and 3827 not r_write_to_cc_send_req.read() and 3828 "Error in VCI_MEM_CACHE : pending multicast or broadcast\n" 3829 "transaction in WRITE_UPT_REQ state" 3830 ); 3831 3832 3833 r_write_to_cc_send_brdcast_req = false; 3834 r_write_to_cc_send_trdid = r_write_upt_index.read(); 3835 r_write_to_cc_send_nline = m_nline[(addr_t)(r_write_address.read())]; 3836 r_write_to_cc_send_index = r_write_word_index.read(); 3837 r_write_to_cc_send_count = r_write_word_count.read(); 3838 3839 for(size_t i=0; i<m_words ; i++) r_write_to_cc_send_be[i]=r_write_be[i].read(); 3840 3841 size_t min = r_write_word_index.read(); 3842 size_t max = r_write_word_index.read() + r_write_word_count.read(); 3843 for(size_t i=min ; i<=max ; i++) r_write_to_cc_send_data[i] = r_write_data[i]; 3844 3845 if ((r_write_copy.read() != r_write_srcid.read()) or 3846 ((r_write_pktid.read() & 0x7) == TYPE_SC) or 3847 r_write_copy_inst.read()) 3848 { 3849 // put the first srcid in the fifo 3850 write_to_cc_send_fifo_put = true; 3851 write_to_cc_send_fifo_inst = r_write_copy_inst.read(); 3852 write_to_cc_send_fifo_srcid = r_write_copy.read(); 3853 if(r_write_count.read() == 1) 3854 { 3855 r_write_fsm = WRITE_IDLE; 3856 r_write_to_cc_send_multi_req = true; 3857 } 3858 else 3859 { 3860 r_write_fsm = WRITE_UPT_NEXT; 3861 r_write_to_dec = false; 3862 3863 } 4012 } 4013 #endif 4014 } 4015 break; 4016 } 4017 ///////////////////////// RWT 4018 case WRITE_MISS_IVT_LOCK: 4019 { 4020 if (r_alloc_ivt_fsm.read() == ALLOC_IVT_WRITE) 4021 { 4022 size_t index; 4023 if (m_ivt.search_inval(m_nline[(addr_t) (r_write_address.read())], index)) 4024 { 4025 r_write_fsm = WRITE_WAIT; 3864 4026 } 3865 4027 else 3866 4028 { 3867 r_write_fsm = WRITE_UPT_NEXT; 3868 r_write_to_dec = false; 3869 } 3870 3871 #if DEBUG_MEMC_WRITE 3872 if(m_debug) 3873 { 3874 std::cout 3875 << " <MEMC " << name() 3876 << " WRITE_UPT_REQ> Post first request to CC_SEND FSM" 3877 << " / srcid = " << std::dec << r_write_copy.read() 3878 << " / inst = " << std::dec << r_write_copy_inst.read() << std::endl; 3879 3880 if(r_write_count.read() == 1) 3881 std::cout << " ... and this is the last" << std::endl; 3882 } 3883 #endif 3884 break; 3885 } 3886 3887 /////////////////// 3888 case WRITE_UPT_NEXT: 3889 { 3890 // continue the multi-update request to CC_SEND fsm 3891 // when there is copies in the heap. 3892 // if one copy in the heap is the writer itself 3893 // the corresponding SRCID should not be written in the fifo, 3894 // but the UPT counter must be decremented. 3895 // As this decrement is done in the WRITE_UPT_DEC state, 3896 // after the last copy has been found, the decrement request 3897 // must be registered in the r_write_to_dec flip-flop. 3898 3899 HeapEntry entry = m_heap.read(r_write_ptr.read()); 3900 3901 bool dec_upt_counter; 3902 3903 // put the next srcid in the fifo 3904 if ((entry.owner.srcid != r_write_srcid.read()) or 3905 ((r_write_pktid.read() & 0x7) == TYPE_SC) or 3906 entry.owner.inst) 3907 { 3908 dec_upt_counter = false; 3909 write_to_cc_send_fifo_put = true; 3910 write_to_cc_send_fifo_inst = entry.owner.inst; 3911 write_to_cc_send_fifo_srcid = entry.owner.srcid; 3912 3913 #if DEBUG_MEMC_WRITE 3914 if(m_debug) 3915 { 3916 std::cout << " <MEMC " << name() << " WRITE_UPT_NEXT> Post another request to CC_SEND FSM" 3917 << " / heap_index = " << std::dec << r_write_ptr.read() 3918 << " / srcid = " << std::dec << r_write_copy.read() 3919 << " / inst = " << std::dec << r_write_copy_inst.read() << std::endl; 3920 if(entry.next == r_write_ptr.read()) 3921 std::cout << " ... and this is the last" << std::endl; 3922 } 3923 #endif 3924 } 3925 else // the UPT counter must be decremented 3926 { 3927 dec_upt_counter = true; 3928 3929 #if DEBUG_MEMC_WRITE 3930 if(m_debug) 3931 { 3932 std::cout << " <MEMC " << name() << " WRITE_UPT_NEXT> Skip one entry in heap matching the writer" 3933 << " / heap_index = " << std::dec << r_write_ptr.read() 3934 << " / srcid = " << std::dec << r_write_copy.read() 3935 << " / inst = " << std::dec << r_write_copy_inst.read() << std::endl; 3936 if(entry.next == r_write_ptr.read()) 3937 std::cout << " ... and this is the last" << std::endl; 3938 } 3939 #endif 3940 } 3941 3942 // register the possible UPT decrement request 3943 r_write_to_dec = dec_upt_counter or r_write_to_dec.read(); 3944 3945 if(not m_write_to_cc_send_inst_fifo.wok()) 3946 { 3947 std::cout << "VCI_MEM_CACHE ERROR " << name() << " WRITE_UPT_NEXT state" << std::endl 3948 << "The write_to_cc_send_fifo should not be full" << std::endl 3949 << "as the depth should be larger than the max number of copies" << std::endl; 3950 exit(0); 3951 } 3952 3953 r_write_ptr = entry.next; 3954 3955 if(entry.next == r_write_ptr.read()) // last copy 3956 { 3957 r_write_to_cc_send_multi_req = true; 3958 if(r_write_to_dec.read() or dec_upt_counter) r_write_fsm = WRITE_UPT_DEC; 3959 else r_write_fsm = WRITE_IDLE; 3960 } 3961 break; 3962 } 3963 3964 ////////////////// 3965 case WRITE_UPT_DEC: 3966 { 3967 // If the initial writer has a copy, it should not 3968 // receive an update request, but the counter in the 3969 // update table must be decremented by the MULTI_ACK FSM. 3970 3971 if(!r_write_to_multi_ack_req.read()) 3972 { 3973 r_write_to_multi_ack_req = true; 3974 r_write_to_multi_ack_upt_index = r_write_upt_index.read(); 3975 r_write_fsm = WRITE_IDLE; 3976 } 3977 break; 3978 } 3979 3980 /////////////// 3981 case WRITE_RSP: 3982 { 3983 // Post a request to TGT_RSP FSM to acknowledge the write 3984 // In order to increase the Write requests throughput, 3985 // we don't wait to return in the IDLE state to consume 3986 // a new request in the write FIFO 3987 3988 if (not r_write_to_tgt_rsp_req.read()) 3989 { 3990 // post the request to TGT_RSP_FSM 3991 r_write_to_tgt_rsp_req = true; 3992 r_write_to_tgt_rsp_srcid = r_write_srcid.read(); 3993 r_write_to_tgt_rsp_trdid = r_write_trdid.read(); 3994 r_write_to_tgt_rsp_pktid = r_write_pktid.read(); 3995 r_write_to_tgt_rsp_sc_fail = r_write_sc_fail.read(); 3996 3997 // try to get a new write request from the FIFO 3998 if (not m_cmd_write_addr_fifo.rok()) 3999 { 4000 r_write_fsm = WRITE_IDLE; 4001 } 4002 else 4003 { 4004 // consume a word in the FIFO & write it in the local buffer 4005 cmd_write_fifo_get = true; 4006 size_t index = m_x[(addr_t)(m_cmd_write_addr_fifo.read())]; 4007 4008 r_write_address = (addr_t)(m_cmd_write_addr_fifo.read()); 4009 r_write_word_index = index; 4010 r_write_word_count = 0; 4011 r_write_data[index] = m_cmd_write_data_fifo.read(); 4012 r_write_srcid = m_cmd_write_srcid_fifo.read(); 4013 r_write_trdid = m_cmd_write_trdid_fifo.read(); 4014 r_write_pktid = m_cmd_write_pktid_fifo.read(); 4015 4016 // if SC command, get the SC key 4017 if ((m_cmd_write_pktid_fifo.read() & 0x7) == TYPE_SC) 4018 { 4019 assert( not m_cmd_write_eop_fifo.read() && 4020 "MEMC ERROR in WRITE_RSP state: " 4021 "invalid packet format for SC command"); 4022 4023 r_write_sc_key = m_cmd_write_data_fifo.read(); 4024 } 4025 4026 // initialize the be field for all words 4027 for(size_t word=0 ; word<m_words ; word++) 4028 { 4029 if(word == index) r_write_be[word] = m_cmd_write_be_fifo.read(); 4030 else r_write_be[word] = 0x0; 4031 } 4032 4033 if( m_cmd_write_eop_fifo.read()) 4034 { 4035 r_write_fsm = WRITE_DIR_REQ; 4036 } 4037 else 4038 { 4039 r_write_fsm = WRITE_NEXT; 4040 } 4041 } 4042 4043 #if DEBUG_MEMC_WRITE 4044 if(m_debug) 4045 { 4046 std::cout << " <MEMC " << name() << " WRITE_RSP> Post a request to TGT_RSP FSM" 4047 << " : rsrcid = " << std::hex << r_write_srcid.read() 4048 << " : rpktid = " << std::hex << r_write_pktid.read() 4049 << " : sc_fail= " << std::hex << r_write_sc_fail.read() 4050 << std::endl; 4051 if(m_cmd_write_addr_fifo.rok()) 4052 { 4053 std::cout << " New Write request: " 4054 << " srcid = " << std::hex << m_cmd_write_srcid_fifo.read() 4055 << " / address = " << m_cmd_write_addr_fifo.read() 4056 << " / data = " << m_cmd_write_data_fifo.read() 4057 << " / pktid = " << m_cmd_write_pktid_fifo.read() 4058 << std::endl; 4059 } 4060 } 4061 #endif 4062 } 4063 break; 4064 } 4065 ///////////////////////// RWT 4066 case WRITE_MISS_IVT_LOCK: 4067 { 4068 if (r_alloc_ivt_fsm.read() == ALLOC_IVT_WRITE) 4069 { 4070 size_t index; 4071 if(m_ivt.search_inval(m_nline[(addr_t)(r_write_address.read())], index)) 4072 { 4073 r_write_fsm = WRITE_WAIT; 4074 } 4075 else 4076 { 4077 r_write_fsm = WRITE_MISS_TRT_LOCK; 4078 } 4079 } 4080 break; 4081 } 4082 4083 ///////////////////////// 4029 r_write_fsm = WRITE_MISS_TRT_LOCK; 4030 } 4031 } 4032 break; 4033 } 4034 4035 ///////////////////////// 4084 4036 case WRITE_MISS_TRT_LOCK: // Miss : check Transaction Table 4085 { 4086 if(r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE) 4087 { 4088 4089 #if DEBUG_MEMC_WRITE 4090 if(m_debug) 4091 std::cout << " <MEMC " << name() << " WRITE_MISS_TRT_LOCK> Check the TRT" << std::endl; 4092 #endif 4093 size_t hit_index = 0; 4094 size_t wok_index = 0; 4095 addr_t addr = (addr_t) r_write_address.read(); 4096 bool hit_read = m_trt.hit_read(m_nline[addr], hit_index); 4097 bool hit_write = m_trt.hit_write(m_nline[addr]); 4098 bool wok = not m_trt.full(wok_index); 4099 4100 // wait an empty entry in TRT 4101 if(not hit_read and (not wok or hit_write)) 4102 { 4103 r_write_fsm = WRITE_WAIT; 4104 m_cpt_trt_full++; 4105 4106 break; 4107 } 4108 4109 if ((r_write_pktid.read() & 0x7) == TYPE_SC) 4110 { 4111 m_llsc_table.sc(r_write_address.read(), 4112 r_write_sc_key.read()); 4113 } 4114 4115 // register the modified data in TRT 4116 if (hit_read) 4117 { 4118 r_write_trt_index = hit_index; 4119 r_write_fsm = WRITE_MISS_TRT_DATA; 4120 m_cpt_write_miss++; 4121 break; 4122 } 4123 // set a new entry in TRT 4124 if (wok and not hit_write) 4125 { 4126 r_write_trt_index = wok_index; 4127 r_write_fsm = WRITE_MISS_TRT_SET; 4128 m_cpt_write_miss++; 4129 break; 4130 } 4131 4132 assert(false && "VCI_MEM_CACHE ERROR: this part must not be reached"); 4133 } 4134 break; 4135 } 4136 4137 //////////////// 4138 case WRITE_WAIT: // release the locks protecting the shared ressources 4139 { 4140 4141 #if DEBUG_MEMC_WRITE 4142 if(m_debug) 4143 std::cout << " <MEMC " << name() << " WRITE_WAIT> Releases the locks before retry" << std::endl; 4144 #endif 4145 r_write_fsm = WRITE_DIR_REQ; 4146 break; 4147 } 4148 4149 //////////////////////// 4150 case WRITE_MISS_TRT_SET: // register a new transaction in TRT (Write Buffer) 4151 { 4152 if(r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE) 4153 { 4154 std::vector<be_t> be_vector; 4155 std::vector<data_t> data_vector; 4156 be_vector.clear(); 4157 data_vector.clear(); 4158 for(size_t i=0; i<m_words; i++) 4159 { 4160 be_vector.push_back(r_write_be[i]); 4161 data_vector.push_back(r_write_data[i]); 4162 } 4163 m_trt.set(r_write_trt_index.read(), 4164 true, // read request to XRAM 4165 m_nline[(addr_t)(r_write_address.read())], 4166 r_write_srcid.read(), 4167 r_write_trdid.read(), 4168 r_write_pktid.read(), 4169 false, // not a processor read 4170 0, // not a single word 4171 0, // word index 4172 be_vector, 4173 data_vector); 4174 r_write_fsm = WRITE_MISS_XRAM_REQ; 4175 4176 #if DEBUG_MEMC_WRITE 4177 if(m_debug) 4178 std::cout << " <MEMC " << name() << " WRITE_MISS_TRT_SET> Set a new entry in TRT" << std::endl; 4179 #endif 4180 } 4181 break; 4182 } 4183 4184 ///////////////////////// 4185 case WRITE_MISS_TRT_DATA: // update an entry in TRT (used as a Write Buffer) 4186 { 4187 if(r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE) 4188 { 4189 std::vector<be_t> be_vector; 4190 std::vector<data_t> data_vector; 4191 be_vector.clear(); 4192 data_vector.clear(); 4193 for(size_t i=0; i<m_words; i++) 4194 { 4195 be_vector.push_back(r_write_be[i]); 4196 data_vector.push_back(r_write_data[i]); 4197 } 4198 m_trt.write_data_mask(r_write_trt_index.read(), 4199 be_vector, 4200 data_vector); 4201 r_write_fsm = WRITE_RSP; 4202 4203 #if DEBUG_MEMC_WRITE 4204 if(m_debug) 4205 std::cout << " <MEMC " << name() << " WRITE_MISS_TRT_DATA> Modify an existing entry in TRT" << std::endl; 4206 #endif 4207 } 4208 break; 4209 } 4210 4211 ///////////////////////// 4212 case WRITE_MISS_XRAM_REQ: // send a GET request to IXR_CMD FSM 4213 { 4214 if(not r_write_to_ixr_cmd_req.read()) 4215 { 4216 r_write_to_ixr_cmd_req = true; 4217 r_write_to_ixr_cmd_index = r_write_trt_index.read(); 4218 r_write_fsm = WRITE_RSP; 4219 4220 #if DEBUG_MEMC_WRITE 4221 if(m_debug) 4222 std::cout << " <MEMC " << name() 4223 << " WRITE_MISS_XRAM_REQ> Post a GET request to the" 4224 << " IXR_CMD FSM" << std::endl; 4225 #endif 4226 } 4227 break; 4228 } 4229 4230 /////////////////////// 4231 case WRITE_BC_DIR_READ: // enter this state if a broadcast-inval is required 4232 // the cache line must be erased in mem-cache, and written 4233 // into XRAM. 4234 { 4235 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE) and 4236 "MEMC ERROR in WRITE_BC_DIR_READ state: Bad DIR allocation"); 4237 4238 m_cpt_write_broadcast++; 4239 4240 // write enable signal for data buffer. 4241 r_write_bc_data_we = true; 4242 4243 r_write_fsm = WRITE_BC_TRT_LOCK; 4037 { 4038 if (r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE) 4039 { 4244 4040 4245 4041 #if DEBUG_MEMC_WRITE 4246 4042 if (m_debug) 4247 std::cout << " <MEMC " << name() << " WRITE_BC_DIR_READ>" 4248 << " Read the cache to complete local buffer" << std::endl; 4249 #endif 4250 break; 4251 } 4252 /////////////////////// 4253 case WRITE_BC_TRT_LOCK: // get TRT lock to check TRT not full 4254 { 4255 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE) and 4256 "MEMC ERROR in WRITE_BC_TRT_LOCK state: Bad DIR allocation"); 4257 4258 // We read the cache and complete the buffer. As the DATA cache uses a 4259 // synchronous RAM, the read DATA request has been performed in the 4260 // WRITE_BC_DIR_READ state but the data is available in this state. 4261 if (r_write_bc_data_we.read()) 4262 { 4263 size_t set = m_y[(addr_t)(r_write_address.read())]; 4264 size_t way = r_write_way.read(); 4265 for(size_t word=0 ; word<m_words ; word++) 4266 { 4267 data_t mask = 0; 4268 if(r_write_be[word].read() & 0x1) mask = mask | 0x000000FF; 4269 if(r_write_be[word].read() & 0x2) mask = mask | 0x0000FF00; 4270 if(r_write_be[word].read() & 0x4) mask = mask | 0x00FF0000; 4271 if(r_write_be[word].read() & 0x8) mask = mask | 0xFF000000; 4272 4273 // complete only if mask is not null (for energy consumption) 4274 r_write_data[word] = 4275 (r_write_data[word].read() & mask) | 4276 (m_cache_data.read(way, set, word) & ~mask); 4277 } 4278 #if DEBUG_MEMC_WRITE 4279 if(m_debug) 4280 std::cout 4281 << " <MEMC " << name() 4282 << " WRITE_BC_TRT_LOCK> Complete data buffer" << std::endl; 4283 #endif 4284 } 4285 4286 if (r_alloc_trt_fsm.read() != ALLOC_TRT_WRITE) 4287 { 4288 // if we loop in this state, the data does not need to be 4289 // rewritten (for energy consuption) 4290 r_write_bc_data_we = false; 4043 { 4044 std::cout << " <MEMC " << name() << " WRITE_MISS_TRT_LOCK> Check the TRT" << std::endl; 4045 } 4046 #endif 4047 size_t hit_index = 0; 4048 size_t wok_index = 0; 4049 addr_t addr = (addr_t) r_write_address.read(); 4050 bool hit_read = m_trt.hit_read(m_nline[addr], hit_index); 4051 bool hit_write = m_trt.hit_write(m_nline[addr]); 4052 bool wok = not m_trt.full(wok_index); 4053 4054 // wait an empty entry in TRT 4055 if (not hit_read and (not wok or hit_write)) 4056 { 4057 r_write_fsm = WRITE_WAIT; 4058 m_cpt_trt_full++; 4291 4059 break; 4292 4060 } 4293 4294 size_t wok_index = 0;4295 bool wok = not m_trt.full(wok_index);4296 if(wok) // set a new entry in TRT4297 {4298 r_write_trt_index = wok_index;4299 r_write_fsm = WRITE_BC_IVT_LOCK;4300 }4301 else // wait an empty entry in TRT4302 {4303 r_write_fsm = WRITE_WAIT;4304 }4305 4306 #if DEBUG_MEMC_WRITE4307 if(m_debug)4308 std::cout << " <MEMC " << name()4309 << " WRITE_BC_TRT_LOCK> Check TRT : wok = " << wok4310 << " / index = " << wok_index << std::endl;4311 #endif4312 break;4313 }4314 4315 //////////////////////4316 case WRITE_BC_IVT_LOCK: // register BC transaction in IVT4317 {4318 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE) and4319 "MEMC ERROR in WRITE_BC_IVT_LOCK state: Bad DIR allocation");4320 4321 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE) and4322 "MEMC ERROR in WRITE_BC_IVT_LOCK state: Bad TRT allocation");4323 4324 if(r_alloc_ivt_fsm.read() == ALLOC_IVT_WRITE)4325 {4326 bool wok = false;4327 size_t index = 0;4328 size_t srcid = r_write_srcid.read();4329 size_t trdid = r_write_trdid.read();4330 size_t pktid = r_write_pktid.read();4331 addr_t nline = m_nline[(addr_t)(r_write_address.read())];4332 size_t nb_copies = r_write_count.read();4333 4334 wok = m_ivt.set(false, // it's an inval transaction4335 true, // it's a broadcast4336 true, // response required4337 false, // no acknowledge required4338 srcid,4339 trdid,4340 pktid,4341 nline,4342 nb_copies,4343 index);4344 #if DEBUG_MEMC_WRITE4345 if( m_debug and wok )4346 std::cout << " <MEMC " << name() << " WRITE_BC_IVT_LOCK> Register broadcast inval in IVT"4347 << " / nb_copies = " << r_write_count.read() << std::endl;4348 #endif4349 r_write_upt_index = index;4350 4351 if(wok) r_write_fsm = WRITE_BC_DIR_INVAL;4352 else r_write_fsm = WRITE_WAIT;4353 }4354 break;4355 }4356 4357 ////////////////////////4358 case WRITE_BC_DIR_INVAL:4359 {4360 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE) and4361 "MEMC ERROR in WRITE_BC_DIR_INVAL state: Bad DIR allocation");4362 4363 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE) and4364 "MEMC ERROR in WRITE_BC_DIR_INVAL state: Bad TRT allocation");4365 4366 assert( (r_alloc_ivt_fsm.read() == ALLOC_IVT_WRITE) and4367 "MEMC ERROR in WRITE_BC_DIR_INVAL state: Bad IVT allocation");4368 4369 // register PUT request in TRT4370 std::vector<data_t> data_vector;4371 data_vector.clear();4372 for(size_t i=0; i<m_words; i++) data_vector.push_back(r_write_data[i].read());4373 m_trt.set( r_write_trt_index.read(),4374 false, // PUT request4375 m_nline[(addr_t)(r_write_address.read())],4376 0, // unused4377 0, // unused4378 0, // unused4379 false, // not a processor read4380 0, // unused4381 0, // unused4382 std::vector<be_t> (m_words,0),4383 data_vector );4384 4385 // invalidate directory entry4386 DirectoryEntry entry;4387 entry.valid = false;4388 entry.cache_coherent= false;4389 entry.dirty = false;4390 entry.tag = 0;4391 entry.is_cnt = false;4392 entry.lock = false;4393 entry.owner.srcid = 0;4394 entry.owner.inst = false;4395 entry.ptr = 0;4396 entry.count = 0;4397 size_t set = m_y[(addr_t)(r_write_address.read())];4398 size_t way = r_write_way.read();4399 4400 m_cache_directory.write(set, way, entry);4401 4061 4402 4062 if ((r_write_pktid.read() & 0x7) == TYPE_SC) … … 4406 4066 } 4407 4067 4068 // register the modified data in TRT 4069 if (hit_read) 4070 { 4071 r_write_trt_index = hit_index; 4072 r_write_fsm = WRITE_MISS_TRT_DATA; 4073 m_cpt_write_miss++; 4074 break; 4075 } 4076 4077 // set a new entry in TRT 4078 if (wok and not hit_write) 4079 { 4080 r_write_trt_index = wok_index; 4081 r_write_fsm = WRITE_MISS_TRT_SET; 4082 m_cpt_write_miss++; 4083 break; 4084 } 4085 4086 assert(false && "VCI_MEM_CACHE ERROR: this part must not be reached"); 4087 } 4088 break; 4089 } 4090 4091 //////////////// 4092 case WRITE_WAIT: // release the locks protecting the shared ressources 4093 { 4094 4408 4095 #if DEBUG_MEMC_WRITE 4409 if(m_debug) 4410 std::cout << " <MEMC " << name() << " WRITE_BC_DIR_INVAL> Invalidate the directory entry: @ = " 4411 << r_write_address.read() << " / register the put transaction in TRT:" << std::endl; 4412 #endif 4413 r_write_fsm = WRITE_BC_CC_SEND; 4096 if (m_debug) 4097 { 4098 std::cout << " <MEMC " << name() << " WRITE_WAIT> Releases the locks before retry" << std::endl; 4099 } 4100 #endif 4101 r_write_fsm = WRITE_DIR_REQ; 4102 break; 4103 } 4104 4105 //////////////////////// 4106 case WRITE_MISS_TRT_SET: // register a new transaction in TRT (Write Buffer) 4107 { 4108 if (r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE) 4109 { 4110 std::vector<be_t> be_vector; 4111 std::vector<data_t> data_vector; 4112 be_vector.clear(); 4113 data_vector.clear(); 4114 for (size_t i = 0; i < m_words; i++) 4115 { 4116 be_vector.push_back(r_write_be[i]); 4117 data_vector.push_back(r_write_data[i]); 4118 } 4119 m_trt.set(r_write_trt_index.read(), 4120 true, // read request to XRAM 4121 m_nline[(addr_t)(r_write_address.read())], 4122 r_write_srcid.read(), 4123 r_write_trdid.read(), 4124 r_write_pktid.read(), 4125 false, // not a processor read 4126 0, // not a single word 4127 0, // word index 4128 be_vector, 4129 data_vector); 4130 r_write_fsm = WRITE_MISS_XRAM_REQ; 4131 4132 #if DEBUG_MEMC_WRITE 4133 if (m_debug) 4134 { 4135 std::cout << " <MEMC " << name() << " WRITE_MISS_TRT_SET> Set a new entry in TRT" << std::endl; 4136 } 4137 #endif 4138 } 4139 break; 4140 } 4141 4142 ///////////////////////// 4143 case WRITE_MISS_TRT_DATA: // update an entry in TRT (used as a Write Buffer) 4144 { 4145 if (r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE) 4146 { 4147 std::vector<be_t> be_vector; 4148 std::vector<data_t> data_vector; 4149 be_vector.clear(); 4150 data_vector.clear(); 4151 for (size_t i = 0; i < m_words; i++) 4152 { 4153 be_vector.push_back(r_write_be[i]); 4154 data_vector.push_back(r_write_data[i]); 4155 } 4156 m_trt.write_data_mask(r_write_trt_index.read(), 4157 be_vector, 4158 data_vector); 4159 r_write_fsm = WRITE_RSP; 4160 4161 #if DEBUG_MEMC_WRITE 4162 if (m_debug) 4163 { 4164 std::cout << " <MEMC " << name() << " WRITE_MISS_TRT_DATA> Modify an existing entry in TRT" << std::endl; 4165 } 4166 #endif 4167 } 4168 break; 4169 } 4170 ///////////////////////// 4171 case WRITE_MISS_XRAM_REQ: // send a GET request to IXR_CMD FSM 4172 { 4173 if (not r_write_to_ixr_cmd_req.read()) 4174 { 4175 r_write_to_ixr_cmd_req = true; 4176 r_write_to_ixr_cmd_index = r_write_trt_index.read(); 4177 r_write_fsm = WRITE_RSP; 4178 4179 #if DEBUG_MEMC_WRITE 4180 if (m_debug) 4181 { 4182 std::cout << " <MEMC " << name() 4183 << " WRITE_MISS_XRAM_REQ> Post a GET request to the" 4184 << " IXR_CMD FSM" << std::endl; 4185 } 4186 #endif 4187 } 4188 break; 4189 } 4190 /////////////////////// 4191 case WRITE_BC_DIR_READ: // enter this state if a broadcast-inval is required 4192 // the cache line must be erased in mem-cache, and written 4193 // into XRAM. 4194 { 4195 assert((r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE) and 4196 "MEMC ERROR in WRITE_BC_DIR_READ state: Bad DIR allocation"); 4197 4198 m_cpt_write_broadcast++; 4199 4200 // write enable signal for data buffer. 4201 r_write_bc_data_we = true; 4202 4203 r_write_fsm = WRITE_BC_TRT_LOCK; 4204 4205 #if DEBUG_MEMC_WRITE 4206 if (m_debug) 4207 { 4208 std::cout << " <MEMC " << name() << " WRITE_BC_DIR_READ>" 4209 << " Read the cache to complete local buffer" << std::endl; 4210 } 4211 #endif 4212 break; 4213 } 4214 /////////////////////// 4215 case WRITE_BC_TRT_LOCK: // get TRT lock to check TRT not full 4216 { 4217 assert((r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE) and 4218 "MEMC ERROR in WRITE_BC_TRT_LOCK state: Bad DIR allocation"); 4219 4220 // We read the cache and complete the buffer. As the DATA cache uses a 4221 // synchronous RAM, the read DATA request has been performed in the 4222 // WRITE_BC_DIR_READ state but the data is available in this state. 4223 if (r_write_bc_data_we.read()) 4224 { 4225 size_t set = m_y[(addr_t) (r_write_address.read())]; 4226 size_t way = r_write_way.read(); 4227 for (size_t word = 0; word < m_words; word++) 4228 { 4229 data_t mask = 0; 4230 if (r_write_be[word].read() & 0x1) mask = mask | 0x000000FF; 4231 if (r_write_be[word].read() & 0x2) mask = mask | 0x0000FF00; 4232 if (r_write_be[word].read() & 0x4) mask = mask | 0x00FF0000; 4233 if (r_write_be[word].read() & 0x8) mask = mask | 0xFF000000; 4234 4235 // complete only if mask is not null (for energy consumption) 4236 r_write_data[word] = 4237 (r_write_data[word].read() & mask) | 4238 (m_cache_data.read(way, set, word) & ~mask); 4239 } 4240 #if DEBUG_MEMC_WRITE 4241 if (m_debug) 4242 { 4243 std::cout << " <MEMC " << name() 4244 << " WRITE_BC_TRT_LOCK> Complete data buffer" << std::endl; 4245 } 4246 #endif 4247 } 4248 4249 if (r_alloc_trt_fsm.read() != ALLOC_TRT_WRITE) 4250 { 4251 // if we loop in this state, the data does not need to be 4252 // rewritten (for energy consuption) 4253 r_write_bc_data_we = false; 4414 4254 break; 4415 4255 } 4416 4256 4417 ////////////////////// 4257 size_t wok_index = 0; 4258 bool wok = not m_trt.full(wok_index); 4259 if (wok) // set a new entry in TRT 4260 { 4261 r_write_trt_index = wok_index; 4262 r_write_fsm = WRITE_BC_IVT_LOCK; 4263 } 4264 else // wait an empty entry in TRT 4265 { 4266 r_write_fsm = WRITE_WAIT; 4267 } 4268 4269 #if DEBUG_MEMC_WRITE 4270 if (m_debug) 4271 { 4272 std::cout << " <MEMC " << name() 4273 << " WRITE_BC_TRT_LOCK> Check TRT : wok = " << wok 4274 << " / index = " << wok_index << std::endl; 4275 } 4276 #endif 4277 break; 4278 } 4279 ////////////////////// 4280 case WRITE_BC_IVT_LOCK: // register BC transaction in IVT 4281 { 4282 assert((r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE) and 4283 "MEMC ERROR in WRITE_BC_IVT_LOCK state: Bad DIR allocation"); 4284 4285 assert((r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE) and 4286 "MEMC ERROR in WRITE_BC_IVT_LOCK state: Bad TRT allocation"); 4287 4288 if (r_alloc_ivt_fsm.read() == ALLOC_IVT_WRITE) 4289 { 4290 bool wok = false; 4291 size_t index = 0; 4292 size_t srcid = r_write_srcid.read(); 4293 size_t trdid = r_write_trdid.read(); 4294 size_t pktid = r_write_pktid.read(); 4295 addr_t nline = m_nline[(addr_t) (r_write_address.read())]; 4296 size_t nb_copies = r_write_count.read(); 4297 4298 wok = m_ivt.set(false, // it's an inval transaction 4299 true, // it's a broadcast 4300 true, // response required 4301 false, // no acknowledge required 4302 srcid, 4303 trdid, 4304 pktid, 4305 nline, 4306 nb_copies, 4307 index); 4308 #if DEBUG_MEMC_WRITE 4309 if (m_debug and wok) 4310 { 4311 std::cout << " <MEMC " << name() << " WRITE_BC_IVT_LOCK> Register broadcast inval in IVT" 4312 << " / nb_copies = " << r_write_count.read() << std::endl; 4313 } 4314 #endif 4315 r_write_upt_index = index; 4316 4317 if (wok) r_write_fsm = WRITE_BC_DIR_INVAL; 4318 else r_write_fsm = WRITE_WAIT; 4319 } 4320 break; 4321 } 4322 //////////////////////// 4323 case WRITE_BC_DIR_INVAL: 4324 { 4325 assert((r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE) and 4326 "MEMC ERROR in WRITE_BC_DIR_INVAL state: Bad DIR allocation"); 4327 4328 assert((r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE) and 4329 "MEMC ERROR in WRITE_BC_DIR_INVAL state: Bad TRT allocation"); 4330 4331 assert((r_alloc_ivt_fsm.read() == ALLOC_IVT_WRITE) and 4332 "MEMC ERROR in WRITE_BC_DIR_INVAL state: Bad IVT allocation"); 4333 4334 // register PUT request in TRT 4335 std::vector<data_t> data_vector; 4336 data_vector.clear(); 4337 for (size_t i = 0; i < m_words; i++) 4338 { 4339 data_vector.push_back(r_write_data[i].read()); 4340 } 4341 m_trt.set(r_write_trt_index.read(), 4342 false, // PUT request 4343 m_nline[(addr_t) (r_write_address.read())], 4344 0, // unused 4345 0, // unused 4346 0, // unused 4347 false, // not a processor read 4348 0, // unused 4349 0, // unused 4350 std::vector<be_t> (m_words, 0), 4351 data_vector); 4352 4353 // invalidate directory entry 4354 DirectoryEntry entry; 4355 entry.valid = false; 4356 entry.dirty = false; 4357 entry.tag = 0; 4358 entry.is_cnt = false; 4359 entry.lock = false; 4360 entry.owner.srcid = 0; 4361 entry.owner.inst = false; 4362 entry.ptr = 0; 4363 entry.count = 0; 4364 entry.cache_coherent = false; 4365 4366 size_t set = m_y[(addr_t) (r_write_address.read())]; 4367 size_t way = r_write_way.read(); 4368 4369 m_cache_directory.write(set, way, entry); 4370 4371 if ((r_write_pktid.read() & 0x7) == TYPE_SC) 4372 { 4373 m_llsc_table.sc(r_write_address.read(), 4374 r_write_sc_key.read()); 4375 } 4376 4377 #if DEBUG_MEMC_WRITE 4378 if (m_debug) 4379 { 4380 std::cout << " <MEMC " << name() << " WRITE_BC_DIR_INVAL> Invalidate the directory entry: @ = " 4381 << r_write_address.read() << " / register the put transaction in TRT:" << std::endl; 4382 } 4383 #endif 4384 r_write_fsm = WRITE_BC_CC_SEND; 4385 break; 4386 } 4387 4388 ////////////////////// 4418 4389 case WRITE_BC_CC_SEND: // Post a coherence broadcast request to CC_SEND FSM 4419 4420 if(not r_write_to_cc_send_multi_req.read() and4421 4422 not r_write_to_cc_send_req.read())4423 4424 4425 4426 4427 r_write_to_cc_send_nline = m_nline[(addr_t)(r_write_address.read())];4428 4429 4430 4431 for(size_t i=0; i<m_words; i++)4432 4433 r_write_to_cc_send_be[i]=0;4434 4435 4436 4390 { 4391 if(not r_write_to_cc_send_multi_req.read() and 4392 not r_write_to_cc_send_brdcast_req.read() and 4393 not r_write_to_cc_send_req.read()) 4394 { 4395 r_write_to_cc_send_multi_req = false; 4396 r_write_to_cc_send_brdcast_req = true; 4397 r_write_to_cc_send_trdid = r_write_upt_index.read(); 4398 r_write_to_cc_send_nline = m_nline[(addr_t) (r_write_address.read())]; 4399 r_write_to_cc_send_index = 0; 4400 r_write_to_cc_send_count = 0; 4401 4402 for (size_t i = 0; i < m_words; i++) 4403 { 4404 r_write_to_cc_send_be[i] = 0; 4405 r_write_to_cc_send_data[i] = 0; 4406 } 4407 r_write_fsm = WRITE_BC_XRAM_REQ; 4437 4408 4438 4409 #if DEBUG_MEMC_WRITE 4439 if(m_debug) 4440 std::cout << " <MEMC " << name() 4441 << " WRITE_BC_CC_SEND> Post a broadcast request to CC_SEND FSM" << std::endl; 4442 #endif 4443 } 4444 break; 4445 } 4446 4447 /////////////////////// 4448 case WRITE_BC_XRAM_REQ: // Post a put request to IXR_CMD FSM 4449 { 4450 if( not r_write_to_ixr_cmd_req.read() ) 4451 { 4452 r_write_to_ixr_cmd_req = true; 4453 r_write_to_ixr_cmd_index = r_write_trt_index.read(); 4454 r_write_fsm = WRITE_IDLE; 4410 if (m_debug) 4411 { 4412 std::cout << " <MEMC " << name() 4413 << " WRITE_BC_CC_SEND> Post a broadcast request to CC_SEND FSM" << std::endl; 4414 } 4415 #endif 4416 } 4417 break; 4418 } 4419 4420 /////////////////////// 4421 case WRITE_BC_XRAM_REQ: // Post a PUT request to IXR_CMD FSM 4422 { 4423 if (not r_write_to_ixr_cmd_req.read()) 4424 { 4425 r_write_to_ixr_cmd_req = true; 4426 r_write_to_ixr_cmd_index = r_write_trt_index.read(); 4427 r_write_fsm = WRITE_IDLE; 4455 4428 4456 4429 #if DEBUG_MEMC_WRITE 4457 if(m_debug) 4458 std::cout << " <MEMC " << name() 4459 << " WRITE_BC_XRAM_REQ> Post a put request to IXR_CMD FSM" << std::endl; 4460 #endif 4461 } 4462 break; 4463 } 4430 if (m_debug) 4431 { 4432 std::cout << " <MEMC " << name() 4433 << " WRITE_BC_XRAM_REQ> Post a put request to IXR_CMD FSM" << std::endl; 4434 } 4435 #endif 4436 } 4437 break; 4438 } 4464 4439 } // end switch r_write_fsm 4465 4440 … … 4485 4460 //////////////////////////////////////////////////////////////////////// 4486 4461 4487 switch (r_ixr_cmd_fsm.read())4462 switch (r_ixr_cmd_fsm.read()) 4488 4463 { 4464 /////////////////////// 4465 case IXR_CMD_READ_IDLE: 4466 { 4467 if (r_write_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_WRITE_TRT; 4468 else if (r_cas_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CAS_TRT; 4469 else if (r_cleanup_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CLEANUP_TRT; 4470 else if (r_xram_rsp_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_XRAM_TRT; 4471 else if (r_config_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CONFIG_TRT; 4472 else if (r_read_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_READ_TRT; 4473 break; 4474 } 4489 4475 //////////////////////// 4490 case IXR_CMD_READ_IDLE:4491 if (r_write_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_WRITE_TRT;4492 else if(r_cas_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CAS_TRT;4493 else if(r_cleanup_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CLEANUP_TRT;4494 else if(r_xram_rsp_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_XRAM_TRT;4495 else if(r_config_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CONFIG_TRT;4496 else if(r_read_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_READ_TRT;4497 break;4498 ////////////////////////4499 4476 case IXR_CMD_WRITE_IDLE: 4500 if (r_cas_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CAS_TRT; 4501 else if(r_cleanup_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CLEANUP_TRT; 4502 else if(r_xram_rsp_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_XRAM_TRT; 4503 else if(r_config_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CONFIG_TRT; 4504 else if(r_read_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_READ_TRT; 4505 else if(r_write_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_WRITE_TRT; 4506 break; 4507 //////////////////////// 4477 { 4478 if (r_cas_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CAS_TRT; 4479 else if (r_cleanup_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CLEANUP_TRT; 4480 else if (r_xram_rsp_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_XRAM_TRT; 4481 else if (r_config_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CONFIG_TRT; 4482 else if (r_read_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_READ_TRT; 4483 else if (r_write_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_WRITE_TRT; 4484 break; 4485 } 4486 ////////////////////// 4508 4487 case IXR_CMD_CAS_IDLE: 4509 if (r_cleanup_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CLEANUP_TRT; 4510 else if(r_xram_rsp_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_XRAM_TRT; 4511 else if(r_config_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CONFIG_TRT; 4512 else if(r_read_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_READ_TRT; 4513 else if(r_write_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_WRITE_TRT; 4514 else if(r_cas_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CAS_TRT; 4515 break; 4516 //////////////////////// 4488 { 4489 if (r_cleanup_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CLEANUP_TRT; 4490 else if (r_xram_rsp_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_XRAM_TRT; 4491 else if (r_config_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CONFIG_TRT; 4492 else if (r_read_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_READ_TRT; 4493 else if (r_write_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_WRITE_TRT; 4494 else if (r_cas_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CAS_TRT; 4495 break; 4496 } 4497 /////////////////////// 4517 4498 case IXR_CMD_XRAM_IDLE: 4518 if (r_config_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CONFIG_TRT; 4519 else if(r_read_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_READ_TRT; 4520 else if(r_write_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_WRITE_TRT; 4521 else if(r_cas_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CAS_TRT; 4522 else if(r_cleanup_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CLEANUP_TRT; 4523 else if(r_xram_rsp_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_XRAM_TRT; 4524 break; 4525 //////////////////////// 4499 { 4500 if (r_config_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CONFIG_TRT; 4501 else if (r_read_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_READ_TRT; 4502 else if (r_write_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_WRITE_TRT; 4503 else if (r_cas_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CAS_TRT; 4504 else if (r_cleanup_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CLEANUP_TRT; 4505 else if (r_xram_rsp_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_XRAM_TRT; 4506 break; 4507 } 4508 //////////////////////// 4526 4509 case IXR_CMD_CLEANUP_IDLE: 4527 if (r_read_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_READ_TRT; 4528 else if(r_write_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_WRITE_TRT; 4529 else if(r_cas_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CAS_TRT; 4530 else if(r_xram_rsp_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_XRAM_TRT; 4531 else if(r_config_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CONFIG_TRT; 4532 else if(r_cleanup_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_CLEANUP_TRT; 4533 break; 4534 ///////////////////////// 4510 { 4511 if (r_read_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_READ_TRT; 4512 else if (r_write_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_WRITE_TRT; 4513 else if (r_cas_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CAS_TRT; 4514 else if (r_xram_rsp_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_XRAM_TRT; 4515 else if (r_config_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CONFIG_TRT; 4516 else if (r_cleanup_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_CLEANUP_TRT; 4517 break; 4518 } 4519 ///////////////////////// 4535 4520 case IXR_CMD_CONFIG_IDLE: 4536 { 4537 if (r_read_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_READ_TRT; 4538 else if(r_write_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_WRITE_TRT; 4539 else if(r_cas_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CAS_TRT; 4540 else if(r_cleanup_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_CLEANUP_TRT; 4541 else if(r_xram_rsp_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_XRAM_TRT; 4542 else if(r_config_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CONFIG_TRT; 4543 break; 4544 } 4545 4546 ////////////////////// 4521 { 4522 if (r_read_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_READ_TRT; 4523 else if (r_write_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_WRITE_TRT; 4524 else if (r_cas_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CAS_TRT; 4525 else if (r_cleanup_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_CLEANUP_TRT; 4526 else if (r_xram_rsp_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_XRAM_TRT; 4527 else if (r_config_to_ixr_cmd_req.read()) r_ixr_cmd_fsm = IXR_CMD_CONFIG_TRT; 4528 break; 4529 } 4530 ////////////////////// 4547 4531 case IXR_CMD_READ_TRT: // access TRT for a GET 4548 4549 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_CMD)4550 4551 TransactionTabEntry entry = m_trt.read( r_read_to_ixr_cmd_index.read());4552 r_ixr_cmd_address = entry.nline * (m_words<<2);4553 4554 4555 4556 4532 { 4533 if (r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_CMD) 4534 { 4535 TransactionTabEntry entry = m_trt.read(r_read_to_ixr_cmd_index.read()); 4536 r_ixr_cmd_address = entry.nline * (m_words << 2); 4537 r_ixr_cmd_trdid = r_read_to_ixr_cmd_index.read(); 4538 r_ixr_cmd_get = true; 4539 r_ixr_cmd_word = 0; 4540 r_ixr_cmd_fsm = IXR_CMD_READ_SEND; 4557 4541 4558 4542 #if DEBUG_MEMC_IXR_CMD 4559 if(m_debug) 4560 std::cout << " <MEMC " << name() << " IXR_CMD_READ_TRT> TRT access" 4561 << " index = " << std::dec << r_read_to_ixr_cmd_index.read() 4562 << " / address = " << std::hex << (entry.nline*(m_words<<2)) << std::endl; 4563 #endif 4564 } 4565 break; 4566 } 4567 /////////////////////// 4568 case IXR_CMD_WRITE_TRT: // access TRT for a PUT or a GET 4569 { 4570 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_CMD ) 4571 { 4572 TransactionTabEntry entry = m_trt.read( r_write_to_ixr_cmd_index.read() ); 4573 r_ixr_cmd_address = entry.nline * (m_words<<2); 4574 r_ixr_cmd_trdid = r_write_to_ixr_cmd_index.read(); 4575 r_ixr_cmd_get = entry.xram_read; 4576 r_ixr_cmd_word = 0; 4577 r_ixr_cmd_fsm = IXR_CMD_WRITE_SEND; 4578 4579 // Read data from TRT if PUT transaction 4580 if (not entry.xram_read) 4543 if (m_debug) 4544 { 4545 std::cout << " <MEMC " << name() << " IXR_CMD_READ_TRT> TRT access" 4546 << " index = " << std::dec << r_read_to_ixr_cmd_index.read() 4547 << " / address = " << std::hex << (entry.nline * (m_words << 2)) << std::endl; 4548 } 4549 #endif 4550 } 4551 break; 4552 } 4553 /////////////////////// 4554 case IXR_CMD_WRITE_TRT: // access TRT for a PUT or a GET 4555 { 4556 if (r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_CMD) 4557 { 4558 TransactionTabEntry entry = m_trt.read(r_write_to_ixr_cmd_index.read()); 4559 r_ixr_cmd_address = entry.nline * (m_words << 2); 4560 r_ixr_cmd_trdid = r_write_to_ixr_cmd_index.read(); 4561 r_ixr_cmd_get = entry.xram_read; 4562 r_ixr_cmd_word = 0; 4563 r_ixr_cmd_fsm = IXR_CMD_WRITE_SEND; 4564 4565 // Read data from TRT if PUT transaction 4566 if (not entry.xram_read) 4567 { 4568 for (size_t i = 0; i < m_words; i++) 4581 4569 { 4582 for( size_t i=0 ; i<m_words ; i++ )r_ixr_cmd_wdata[i] = entry.wdata[i];4570 r_ixr_cmd_wdata[i] = entry.wdata[i]; 4583 4571 } 4572 } 4584 4573 4585 4574 #if DEBUG_MEMC_IXR_CMD 4586 if(m_debug) 4587 std::cout << " <MEMC " << name() << " IXR_CMD_WRITE_TRT> TRT access" 4588 << " index = " << std::dec << r_write_to_ixr_cmd_index.read() 4589 << " / address = " << std::hex << (entry.nline*(m_words<<2)) << std::endl; 4590 #endif 4591 } 4592 break; 4593 } 4594 ///////////////////// 4575 if (m_debug) 4576 { 4577 std::cout << " <MEMC " << name() << " IXR_CMD_WRITE_TRT> TRT access" 4578 << " index = " << std::dec << r_write_to_ixr_cmd_index.read() 4579 << " / address = " << std::hex << (entry.nline * (m_words << 2)) << std::endl; 4580 } 4581 #endif 4582 } 4583 break; 4584 } 4585 ///////////////////// 4595 4586 case IXR_CMD_CAS_TRT: // access TRT for a PUT or a GET 4596 { 4597 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_CMD ) 4598 { 4599 TransactionTabEntry entry = m_trt.read( r_cas_to_ixr_cmd_index.read() ); 4600 r_ixr_cmd_address = entry.nline * (m_words<<2); 4601 r_ixr_cmd_trdid = r_cas_to_ixr_cmd_index.read(); 4602 r_ixr_cmd_get = entry.xram_read; 4603 r_ixr_cmd_word = 0; 4604 r_ixr_cmd_fsm = IXR_CMD_CAS_SEND; 4605 4606 // Read data from TRT if PUT transaction 4607 if (not entry.xram_read) 4587 { 4588 if (r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_CMD) 4589 { 4590 TransactionTabEntry entry = m_trt.read(r_cas_to_ixr_cmd_index.read()); 4591 r_ixr_cmd_address = entry.nline * (m_words << 2); 4592 r_ixr_cmd_trdid = r_cas_to_ixr_cmd_index.read(); 4593 r_ixr_cmd_get = entry.xram_read; 4594 r_ixr_cmd_word = 0; 4595 r_ixr_cmd_fsm = IXR_CMD_CAS_SEND; 4596 4597 // Read data from TRT if PUT transaction 4598 if (not entry.xram_read) 4599 { 4600 for (size_t i = 0; i < m_words; i++) 4608 4601 { 4609 for( size_t i=0 ; i<m_words ; i++ )r_ixr_cmd_wdata[i] = entry.wdata[i];4602 r_ixr_cmd_wdata[i] = entry.wdata[i]; 4610 4603 } 4604 } 4611 4605 4612 4606 #if DEBUG_MEMC_IXR_CMD 4613 if(m_debug) 4614 std::cout << " <MEMC " << name() << " IXR_CMD_CAS_TRT> TRT access" 4615 << " index = " << std::dec << r_cas_to_ixr_cmd_index.read() 4616 << " / address = " << std::hex << (entry.nline*(m_words<<2)) << std::endl; 4617 #endif 4618 } 4619 break; 4620 } 4621 ////////////////////// 4607 if (m_debug) 4608 { 4609 std::cout << " <MEMC " << name() << " IXR_CMD_CAS_TRT> TRT access" 4610 << " index = " << std::dec << r_cas_to_ixr_cmd_index.read() 4611 << " / address = " << std::hex << (entry.nline * (m_words << 2)) << std::endl; 4612 } 4613 #endif 4614 } 4615 break; 4616 } 4617 ////////////////////// 4622 4618 case IXR_CMD_XRAM_TRT: // access TRT for a PUT 4623 { 4624 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_CMD ) 4625 { 4626 TransactionTabEntry entry = m_trt.read( r_xram_rsp_to_ixr_cmd_index.read() ); 4627 r_ixr_cmd_address = entry.nline * (m_words<<2); 4628 r_ixr_cmd_trdid = r_xram_rsp_to_ixr_cmd_index.read(); 4629 r_ixr_cmd_get = false; 4630 r_ixr_cmd_word = 0; 4631 r_ixr_cmd_fsm = IXR_CMD_XRAM_SEND; 4632 for( size_t i=0 ; i<m_words ; i++ ) r_ixr_cmd_wdata[i] = entry.wdata[i]; 4619 { 4620 if (r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_CMD) 4621 { 4622 TransactionTabEntry entry = m_trt.read(r_xram_rsp_to_ixr_cmd_index.read()); 4623 r_ixr_cmd_address = entry.nline * (m_words << 2); 4624 r_ixr_cmd_trdid = r_xram_rsp_to_ixr_cmd_index.read(); 4625 r_ixr_cmd_get = false; 4626 r_ixr_cmd_word = 0; 4627 r_ixr_cmd_fsm = IXR_CMD_XRAM_SEND; 4628 for (size_t i = 0; i < m_words; i++) 4629 { 4630 r_ixr_cmd_wdata[i] = entry.wdata[i]; 4631 } 4633 4632 4634 4633 #if DEBUG_MEMC_IXR_CMD 4635 if(m_debug) 4636 std::cout << " <MEMC " << name() << " IXR_CMD_XRAM_TRT> TRT access" 4637 << " index = " << std::dec << r_xram_rsp_to_ixr_cmd_index.read() 4638 << " / address = " << std::hex << (entry.nline*(m_words<<2)) << std::endl; 4639 #endif 4640 } 4641 break; 4642 } 4643 ////////////////////// 4634 if (m_debug) 4635 { 4636 std::cout << " <MEMC " << name() << " IXR_CMD_XRAM_TRT> TRT access" 4637 << " index = " << std::dec << r_xram_rsp_to_ixr_cmd_index.read() 4638 << " / address = " << std::hex << (entry.nline * (m_words << 2)) << std::endl; 4639 } 4640 #endif 4641 } 4642 break; 4643 } 4644 ////////////////////// 4644 4645 case IXR_CMD_CLEANUP_TRT: // access TRT for a PUT 4645 { 4646 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_CMD ) 4647 { 4648 4649 TransactionTabEntry entry = m_trt.read( r_cleanup_to_ixr_cmd_index.read() ); 4650 r_ixr_cmd_address = entry.nline * (m_words<<2); 4651 r_ixr_cmd_trdid = r_cleanup_to_ixr_cmd_index.read(); 4652 r_ixr_cmd_get = false; 4653 r_ixr_cmd_word = 0; 4654 r_ixr_cmd_fsm = IXR_CMD_CLEANUP_DATA_SEND; 4655 for( size_t i=0 ; i<m_words ; i++ ) r_ixr_cmd_wdata[i] = entry.wdata[i]; 4646 { 4647 if (r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_CMD) 4648 { 4649 TransactionTabEntry entry = m_trt.read(r_cleanup_to_ixr_cmd_index.read()); 4650 r_ixr_cmd_address = entry.nline * (m_words << 2); 4651 r_ixr_cmd_trdid = r_cleanup_to_ixr_cmd_index.read(); 4652 r_ixr_cmd_get = false; 4653 r_ixr_cmd_word = 0; 4654 r_ixr_cmd_fsm = IXR_CMD_CLEANUP_DATA_SEND; 4655 for (size_t i = 0; i < m_words; i++) 4656 { 4657 r_ixr_cmd_wdata[i] = entry.wdata[i]; 4658 } 4656 4659 4657 4660 #if DEBUG_MEMC_IXR_CMD 4658 if(m_debug) 4659 std::cout << " <MEMC " << name() << " IXR_CMD_CLEANUP_TRT> TRT access" 4660 << " index = " << std::dec << r_cleanup_to_ixr_cmd_index.read() 4661 << " / address = " << std::hex << (entry.nline*(m_words<<2)) << std::endl; 4662 #endif 4663 } 4664 break; 4665 } 4666 //////////////////////// 4661 if (m_debug) 4662 { 4663 std::cout << " <MEMC " << name() << " IXR_CMD_CLEANUP_TRT> TRT access" 4664 << " index = " << std::dec << r_cleanup_to_ixr_cmd_index.read() 4665 << " / address = " << std::hex << (entry.nline * (m_words << 2)) << std::endl; 4666 } 4667 #endif 4668 } 4669 break; 4670 } 4671 //////////////////////// 4667 4672 case IXR_CMD_CONFIG_TRT: // access TRT for a PUT 4668 { 4669 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_CMD ) 4670 { 4671 TransactionTabEntry entry = m_trt.read( r_config_to_ixr_cmd_index.read() ); 4672 r_ixr_cmd_address = entry.nline * (m_words<<2); 4673 r_ixr_cmd_trdid = r_config_to_ixr_cmd_index.read(); 4674 r_ixr_cmd_get = false; 4675 r_ixr_cmd_word = 0; 4676 r_ixr_cmd_fsm = IXR_CMD_CONFIG_SEND; 4677 for( size_t i=0 ; i<m_words ; i++ ) r_ixr_cmd_wdata[i] = entry.wdata[i]; 4673 { 4674 if (r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_CMD) 4675 { 4676 TransactionTabEntry entry = m_trt.read(r_config_to_ixr_cmd_index.read()); 4677 r_ixr_cmd_address = entry.nline * (m_words << 2); 4678 r_ixr_cmd_trdid = r_config_to_ixr_cmd_index.read(); 4679 r_ixr_cmd_get = false; 4680 r_ixr_cmd_word = 0; 4681 r_ixr_cmd_fsm = IXR_CMD_CONFIG_SEND; 4682 for (size_t i = 0; i < m_words; i++) 4683 { 4684 r_ixr_cmd_wdata[i] = entry.wdata[i]; 4685 } 4678 4686 4679 4687 #if DEBUG_MEMC_IXR_CMD 4680 if(m_debug) 4681 std::cout << " <MEMC " << name() << " IXR_CMD_CONFIG_TRT> TRT access" 4682 << " index = " << std::dec << r_config_to_ixr_cmd_index.read() 4683 << " / address = " << std::hex << (entry.nline*(m_words<<2)) << std::endl; 4684 #endif 4685 } 4686 break; 4687 } 4688 4689 /////////////////////// 4688 if (m_debug) 4689 { 4690 std::cout << " <MEMC " << name() << " IXR_CMD_CONFIG_TRT> TRT access" 4691 << " index = " << std::dec << r_config_to_ixr_cmd_index.read() 4692 << " / address = " << std::hex << (entry.nline * (m_words << 2)) << std::endl; 4693 } 4694 #endif 4695 } 4696 break; 4697 } 4698 4699 /////////////////////// 4690 4700 case IXR_CMD_READ_SEND: // send a get from READ FSM 4691 4692 if(p_vci_ixr.cmdack)4693 4694 4695 4701 { 4702 if (p_vci_ixr.cmdack) 4703 { 4704 r_ixr_cmd_fsm = IXR_CMD_READ_IDLE; 4705 r_read_to_ixr_cmd_req = false; 4696 4706 4697 4707 #if DEBUG_MEMC_IXR_CMD 4698 if(m_debug) 4699 std::cout << " <MEMC " << name() << " IXR_CMD_READ_SEND> GET request:" << std::hex 4700 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2) << std::endl; 4701 #endif 4702 } 4703 break; 4704 } 4705 //////////////////////// 4708 if (m_debug) 4709 { 4710 std::cout << " <MEMC " << name() << " IXR_CMD_READ_SEND> GET request:" << std::hex 4711 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read() << 2) << std::endl; 4712 } 4713 #endif 4714 } 4715 break; 4716 } 4717 //////////////////////// 4706 4718 case IXR_CMD_WRITE_SEND: // send a put or get from WRITE FSM 4707 { 4708 if(p_vci_ixr.cmdack) 4709 { 4710 if (not r_ixr_cmd_get.read()) // PUT 4711 { 4712 if(r_ixr_cmd_word.read() == (m_words - 2)) 4713 { 4714 r_ixr_cmd_fsm = IXR_CMD_WRITE_IDLE; 4715 r_write_to_ixr_cmd_req = false; 4716 } 4717 else 4718 { 4719 r_ixr_cmd_word = r_ixr_cmd_word.read() + 2; 4720 } 4721 4722 #if DEBUG_MEMC_IXR_CMD 4723 if(m_debug) 4724 std::cout << " <MEMC " << name() << " IXR_CMD_WRITE_SEND> PUT request:" << std::hex 4725 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2) << std::endl; 4726 #endif 4727 } 4728 else // GET 4719 { 4720 if (p_vci_ixr.cmdack) 4721 { 4722 if (not r_ixr_cmd_get.read()) // PUT 4723 { 4724 if (r_ixr_cmd_word.read() == (m_words - 2)) 4729 4725 { 4730 4726 r_ixr_cmd_fsm = IXR_CMD_WRITE_IDLE; 4731 4727 r_write_to_ixr_cmd_req = false; 4732 4733 #if DEBUG_MEMC_IXR_CMD4734 if(m_debug)4735 std::cout << " <MEMC " << name() << " IXR_CMD_WRITE_SEND> GET request:" << std::hex4736 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2) << std::endl;4737 #endif4738 }4739 }4740 break;4741 }4742 //////////////////////4743 case IXR_CMD_CAS_SEND: // send a put or get command from CAS FSM4744 {4745 if(p_vci_ixr.cmdack)4746 {4747 if (not r_ixr_cmd_get.read()) // PUT4748 {4749 if(r_ixr_cmd_word.read() == (m_words - 2))4750 {4751 r_ixr_cmd_fsm = IXR_CMD_CAS_IDLE;4752 r_cas_to_ixr_cmd_req = false;4753 }4754 else4755 {4756 r_ixr_cmd_word = r_ixr_cmd_word.read() + 2;4757 }4758 4759 #if DEBUG_MEMC_IXR_CMD4760 if(m_debug)4761 std::cout << " <MEMC " << name() << " IXR_CMD_CAS_SEND> PUT request:" << std::hex4762 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2) << std::endl;4763 #endif4764 }4765 else // GET4766 {4767 r_ixr_cmd_fsm = IXR_CMD_CAS_IDLE;4768 r_cas_to_ixr_cmd_req = false;4769 4770 #if DEBUG_MEMC_IXR_CMD4771 if(m_debug)4772 std::cout << " <MEMC " << name() << " IXR_CMD_CAS_SEND> GET request:" << std::hex4773 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2) << std::endl;4774 #endif4775 }4776 }4777 break;4778 }4779 ///////////////////////4780 case IXR_CMD_XRAM_SEND: // send a put from XRAM_RSP FSM4781 {4782 if(p_vci_ixr.cmdack.read())4783 {4784 if(r_ixr_cmd_word.read() == (m_words - 2))4785 {4786 r_ixr_cmd_fsm = IXR_CMD_XRAM_IDLE;4787 r_xram_rsp_to_ixr_cmd_req = false;4788 4728 } 4789 4729 else … … 4791 4731 r_ixr_cmd_word = r_ixr_cmd_word.read() + 2; 4792 4732 } 4733 4793 4734 #if DEBUG_MEMC_IXR_CMD 4794 if(m_debug) 4795 std::cout << " <MEMC " << name() << " IXR_CMD_XRAM_SEND> PUT request:" << std::hex 4796 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2) << std::endl; 4797 #endif 4798 } 4799 break; 4800 } 4801 4802 //////////////////////// 4803 case IXR_CMD_CLEANUP_DATA_SEND: // send a put command to XRAM 4804 { 4805 if(p_vci_ixr.cmdack.read()) 4806 { 4807 if(r_ixr_cmd_word.read() == (m_words - 2)) 4735 if (m_debug) 4808 4736 { 4809 r_ixr_cmd_fsm = IXR_CMD_CLEANUP_IDLE; 4810 r_cleanup_to_ixr_cmd_req = false; 4811 //r_ixr_cmd_word = 0; 4812 //r_xram_rsp_to_ixr_cmd_inval_ncc_pending = false; 4737 std::cout << " <MEMC " << name() << " IXR_CMD_WRITE_SEND> PUT request:" << std::hex 4738 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read() << 2) << std::endl; 4739 } 4740 #endif 4741 } 4742 else // GET 4743 { 4744 r_ixr_cmd_fsm = IXR_CMD_WRITE_IDLE; 4745 r_write_to_ixr_cmd_req = false; 4746 4747 #if DEBUG_MEMC_IXR_CMD 4748 if (m_debug) 4749 { 4750 std::cout << " <MEMC " << name() << " IXR_CMD_WRITE_SEND> GET request:" << std::hex 4751 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read() << 2) << std::endl; 4752 } 4753 #endif 4754 } 4755 } 4756 break; 4757 } 4758 ////////////////////// 4759 case IXR_CMD_CAS_SEND: // send a put or get command from CAS FSM 4760 { 4761 if (p_vci_ixr.cmdack) 4762 { 4763 if (not r_ixr_cmd_get.read()) // PUT 4764 { 4765 if (r_ixr_cmd_word.read() == (m_words - 2)) 4766 { 4767 r_ixr_cmd_fsm = IXR_CMD_CAS_IDLE; 4768 r_cas_to_ixr_cmd_req = false; 4813 4769 } 4814 4770 else … … 4818 4774 4819 4775 #if DEBUG_MEMC_IXR_CMD 4820 if (m_debug)4776 if (m_debug) 4821 4777 { 4822 std::cout << " <MEMC " << name() << ".IXR_CMD_CLEANUP_DATA_SEND> Send a put request to xram" << std::endl; 4778 std::cout << " <MEMC " << name() << " IXR_CMD_CAS_SEND> PUT request:" << std::hex 4779 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read() << 2) << std::endl; 4823 4780 } 4824 4781 #endif 4825 4782 } 4826 break; 4827 } 4828 4829 ///////////////////////// 4783 else // GET 4784 { 4785 r_ixr_cmd_fsm = IXR_CMD_CAS_IDLE; 4786 r_cas_to_ixr_cmd_req = false; 4787 4788 #if DEBUG_MEMC_IXR_CMD 4789 if (m_debug) 4790 { 4791 std::cout << " <MEMC " << name() << " IXR_CMD_CAS_SEND> GET request:" << std::hex 4792 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read() << 2) << std::endl; 4793 } 4794 #endif 4795 } 4796 } 4797 break; 4798 } 4799 /////////////////////// 4800 case IXR_CMD_XRAM_SEND: // send a put from XRAM_RSP FSM 4801 { 4802 if (p_vci_ixr.cmdack.read()) 4803 { 4804 if (r_ixr_cmd_word.read() == (m_words - 2)) 4805 { 4806 r_ixr_cmd_fsm = IXR_CMD_XRAM_IDLE; 4807 r_xram_rsp_to_ixr_cmd_req = false; 4808 } 4809 else 4810 { 4811 r_ixr_cmd_word = r_ixr_cmd_word.read() + 2; 4812 } 4813 #if DEBUG_MEMC_IXR_CMD 4814 if (m_debug) 4815 { 4816 std::cout << " <MEMC " << name() << " IXR_CMD_XRAM_SEND> PUT request:" << std::hex 4817 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read() << 2) << std::endl; 4818 } 4819 #endif 4820 } 4821 break; 4822 } 4823 4824 //////////////////////// 4825 case IXR_CMD_CLEANUP_DATA_SEND: // send a put command to XRAM 4826 { 4827 if (p_vci_ixr.cmdack.read()) 4828 { 4829 if (r_ixr_cmd_word.read() == (m_words - 2)) 4830 { 4831 r_ixr_cmd_fsm = IXR_CMD_CLEANUP_IDLE; 4832 r_cleanup_to_ixr_cmd_req = false; 4833 } 4834 else 4835 { 4836 r_ixr_cmd_word = r_ixr_cmd_word.read() + 2; 4837 } 4838 4839 #if DEBUG_MEMC_IXR_CMD 4840 if (m_debug) 4841 { 4842 std::cout << " <MEMC " << name() << ".IXR_CMD_CLEANUP_DATA_SEND> Send a put request to xram" << std::hex 4843 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read() << 2) << std::endl; 4844 } 4845 #endif 4846 } 4847 break; 4848 } 4849 ///////////////////////// 4830 4850 case IXR_CMD_CONFIG_SEND: // send a put from CONFIG FSM 4831 4832 if(p_vci_ixr.cmdack.read())4833 4834 if(r_ixr_cmd_word.read() == (m_words - 2))4835 4836 4837 4838 4839 4840 4841 4842 4851 { 4852 if (p_vci_ixr.cmdack.read()) 4853 { 4854 if (r_ixr_cmd_word.read() == (m_words - 2)) 4855 { 4856 r_ixr_cmd_fsm = IXR_CMD_CONFIG_IDLE; 4857 r_config_to_ixr_cmd_req = false; 4858 } 4859 else 4860 { 4861 r_ixr_cmd_word = r_ixr_cmd_word.read() + 2; 4862 } 4843 4863 4844 4864 #if DEBUG_MEMC_IXR_CMD 4845 if(m_debug) 4846 std::cout << " <MEMC " << name() << " IXR_CMD_CONFIG_SEND> PUT request:" << std::hex 4847 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2) << std::endl; 4848 #endif 4849 } 4850 break; 4851 } 4865 if (m_debug) 4866 { 4867 std::cout << " <MEMC " << name() << " IXR_CMD_CONFIG_SEND> PUT request:" << std::hex 4868 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read() << 2) << std::endl; 4869 } 4870 #endif 4871 } 4872 break; 4873 } 4852 4874 } // end switch r_ixr_cmd_fsm 4853 4875 … … 4877 4899 ////////////////// 4878 4900 case IXR_RSP_IDLE: // test transaction type: PUT/GET 4879 4880 if(p_vci_ixr.rspval.read())4881 4882 4883 4884 4885 4901 { 4902 if (p_vci_ixr.rspval.read()) 4903 { 4904 r_ixr_rsp_cpt = 0; 4905 r_ixr_rsp_trt_index = p_vci_ixr.rtrdid.read(); 4906 4907 if (p_vci_ixr.reop.read() and not 4886 4908 p_vci_ixr.rerror.read()) // PUT 4909 { 4910 r_ixr_rsp_fsm = IXR_RSP_TRT_ERASE; 4911 4912 #if DEBUG_MEMC_IXR_RSP 4913 if (m_debug) 4887 4914 { 4888 r_ixr_rsp_fsm = IXR_RSP_TRT_ERASE; 4915 std::cout << " <MEMC " << name() 4916 << " IXR_RSP_IDLE> Response from XRAM to a put transaction" << std::endl; 4889 4917 } 4890 4891 else // GET transaction 4918 #endif 4919 } 4920 else // GET 4921 { 4922 r_ixr_rsp_fsm = IXR_RSP_TRT_READ; 4923 4924 #if DEBUG_MEMC_IXR_RSP 4925 if (m_debug) 4892 4926 { 4893 r_ixr_rsp_fsm = IXR_RSP_TRT_READ; 4894 4895 #if DEBUG_MEMC_IXR_RSP 4896 if(m_debug) 4897 std::cout << " <MEMC " << name() 4898 << " IXR_RSP_IDLE> Response from XRAM to a get transaction" << std::endl; 4899 #endif 4927 std::cout << " <MEMC " << name() 4928 << " IXR_RSP_IDLE> Response from XRAM to a get transaction" << std::endl; 4900 4929 } 4901 } 4902 break; 4903 } 4904 //////////////////////// 4905 case IXR_RSP_ACK: // Acknowledge PUT transaction 4906 { 4907 r_ixr_rsp_fsm = IXR_RSP_IDLE; 4908 break; 4909 } 4910 4911 /////////////////////// 4930 #endif 4931 } 4932 } 4933 break; 4934 } 4935 ////////////////////////// 4912 4936 case IXR_RSP_TRT_ERASE: // erase the entry in the TRT 4913 4937 // decrease the line counter if config request 4914 { 4915 if(r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_RSP) 4916 { 4917 size_t index = r_ixr_rsp_trt_index.read(); 4918 if (m_trt.is_config(index)) // it's a config transaction 4919 { 4920 config_rsp_lines_ixr_rsp_decr = true; 4921 } 4922 m_trt.erase(index); 4938 { 4939 if (r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_RSP) 4940 { 4941 size_t index = r_ixr_rsp_trt_index.read(); 4942 4943 if (m_trt.is_config(index)) // it's a config transaction 4944 { 4945 config_rsp_lines_ixr_rsp_decr = true; 4946 } 4947 4948 m_trt.erase(index); 4949 r_ixr_rsp_fsm = IXR_RSP_IDLE; 4950 4951 #if DEBUG_MEMC_IXR_RSP 4952 if (m_debug) 4953 { 4954 std::cout << " <MEMC " << name() << " IXR_RSP_TRT_ERASE> Erase TRT entry " 4955 << r_ixr_rsp_trt_index.read() << std::endl; 4956 } 4957 #endif 4958 } 4959 break; 4960 } 4961 ////////////////////// 4962 case IXR_RSP_TRT_READ: // write a 64 bits data word in TRT 4963 { 4964 if ((r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_RSP) and p_vci_ixr.rspval) 4965 { 4966 size_t index = r_ixr_rsp_trt_index.read(); 4967 size_t word = r_ixr_rsp_cpt.read(); 4968 bool eop = p_vci_ixr.reop.read(); 4969 wide_data_t data = p_vci_ixr.rdata.read(); 4970 bool rerror = ((p_vci_ixr.rerror.read() & 0x1) == 1); 4971 4972 assert(((eop == (word == (m_words - 2))) or rerror) and 4973 "MEMC ERROR in IXR_RSP_TRT_READ state : invalid response from XRAM"); 4974 4975 m_trt.write_rsp(index, word, data, rerror); 4976 4977 r_ixr_rsp_cpt = word + 2; 4978 4979 if (eop) 4980 { 4981 r_ixr_rsp_to_xram_rsp_rok[r_ixr_rsp_trt_index.read()] = true; 4923 4982 r_ixr_rsp_fsm = IXR_RSP_IDLE; 4983 } 4924 4984 4925 4985 #if DEBUG_MEMC_IXR_RSP 4926 if(m_debug) 4927 std::cout << " <MEMC " << name() << " IXR_RSP_TRT_ERASE> Erase TRT entry " 4928 << r_ixr_rsp_trt_index.read() << std::endl; 4929 #endif 4930 } 4931 break; 4932 } 4933 ////////////////////// 4934 case IXR_RSP_TRT_READ: // write a 64 bits data in the TRT 4935 { 4936 if((r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_RSP) and p_vci_ixr.rspval) 4937 { 4938 size_t index = r_ixr_rsp_trt_index.read(); 4939 size_t word = r_ixr_rsp_cpt.read(); 4940 bool eop = p_vci_ixr.reop.read(); 4941 wide_data_t data = p_vci_ixr.rdata.read(); 4942 bool rerror = ((p_vci_ixr.rerror.read() & 0x1) == 1); 4943 4944 assert(((eop == (word == (m_words-2))) or rerror) and 4945 "MEMC ERROR in IXR_RSP_TRT_READ state : invalid response from XRAM"); 4946 4947 m_trt.write_rsp( index, word, data, rerror ); 4948 4949 r_ixr_rsp_cpt = word + 2; 4950 4951 if(eop) 4952 { 4953 r_ixr_rsp_to_xram_rsp_rok[r_ixr_rsp_trt_index.read()]=true; 4954 r_ixr_rsp_fsm = IXR_RSP_IDLE; 4955 } 4956 4957 #if DEBUG_MEMC_IXR_RSP 4958 if(m_debug) 4959 std::cout << " <MEMC " << name() << " IXR_RSP_TRT_READ> Writing a word in TRT : " 4960 << " index = " << std::dec << index 4961 << " / word = " << word 4962 << " / data = " << std::hex << data << std::endl; 4963 #endif 4964 } 4965 break; 4966 } 4986 if (m_debug) 4987 { 4988 std::cout << " <MEMC " << name() << " IXR_RSP_TRT_READ> Writing 2 words in TRT : " 4989 << " index = " << std::dec << index 4990 << " / word = " << word 4991 << " / data = " << std::hex << data << std::endl; 4992 } 4993 #endif 4994 } 4995 break; 4996 } 4967 4997 } // end swich r_ixr_rsp_fsm 4968 4998 … … 4994 5024 /////////////////// 4995 5025 case XRAM_RSP_IDLE: // scan the XRAM responses / select a TRT index (round robin) 4996 { 4997 size_t old = r_xram_rsp_trt_index.read(); 4998 size_t lines = m_trt_lines; 4999 for(size_t i=0 ; i<lines ; i++) 5000 { 5001 size_t index = (i+old+1) %lines; 5002 if(r_ixr_rsp_to_xram_rsp_rok[index]) 5026 { 5027 size_t old = r_xram_rsp_trt_index.read(); 5028 size_t lines = m_trt_lines; 5029 for (size_t i = 0; i < lines; i++) 5030 { 5031 size_t index = (i + old + 1) % lines; 5032 if (r_ixr_rsp_to_xram_rsp_rok[index]) 5033 { 5034 r_xram_rsp_trt_index = index; 5035 r_ixr_rsp_to_xram_rsp_rok[index] = false; 5036 r_xram_rsp_fsm = XRAM_RSP_DIR_LOCK; 5037 5038 #if DEBUG_MEMC_XRAM_RSP 5039 if (m_debug) 5003 5040 { 5004 r_xram_rsp_trt_index = index; 5005 r_ixr_rsp_to_xram_rsp_rok[index] = false; 5006 r_xram_rsp_fsm = XRAM_RSP_DIR_LOCK; 5041 std::cout << " <MEMC " << name() << " XRAM_RSP_IDLE>" 5042 << " Available cache line in TRT:" 5043 << " index = " << std::dec << index << std::endl; 5044 } 5045 #endif 5046 break; 5047 } 5048 } 5049 break; 5050 } 5051 /////////////////////// 5052 case XRAM_RSP_DIR_LOCK: // Takes the DIR lock and the TRT lock 5053 // Copy the TRT entry in a local buffer 5054 { 5055 if ((r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP) and 5056 (r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP)) 5057 { 5058 // copy the TRT entry in the r_xram_rsp_trt_buf local buffer 5059 size_t index = r_xram_rsp_trt_index.read(); 5060 r_xram_rsp_trt_buf.copy(m_trt.read(index)); 5061 r_xram_rsp_fsm = XRAM_RSP_TRT_COPY; 5007 5062 5008 5063 #if DEBUG_MEMC_XRAM_RSP 5009 if(m_debug) 5010 std::cout << " <MEMC " << name() << " XRAM_RSP_IDLE>" 5011 << " Available cache line in TRT:" 5012 << " index = " << std::dec << index << std::endl; 5013 #endif 5014 break; 5064 if (m_debug) 5065 { 5066 std::cout << " <MEMC " << name() << " XRAM_RSP_DIR_LOCK>" 5067 << " Get access to DIR and TRT" << std::endl; 5068 } 5069 #endif 5070 } 5071 break; 5072 } 5073 /////////////////////// 5074 case XRAM_RSP_TRT_COPY: // Select a victim cache line 5075 // and copy it in a local buffer 5076 { 5077 assert((r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP) and 5078 "MEMC ERROR in XRAM_RSP_TRT_COPY state: Bad DIR allocation"); 5079 5080 assert((r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP) and 5081 "MEMC ERROR in XRAM_RSP_TRT_COPY state: Bad TRT allocation"); 5082 5083 // selects & extracts a victim line from cache 5084 size_t way = 0; 5085 size_t set = m_y[(addr_t) (r_xram_rsp_trt_buf.nline * m_words * 4)]; 5086 5087 DirectoryEntry victim(m_cache_directory.select(set, way)); 5088 5089 bool inval = (victim.count and victim.valid) or 5090 (!victim.cache_coherent and (victim.count == 1)); 5091 5092 // copy the victim line in a local buffer (both data dir) 5093 m_cache_data.read_line(way, set, r_xram_rsp_victim_data); 5094 5095 r_xram_rsp_victim_copy = victim.owner.srcid; 5096 r_xram_rsp_victim_coherent = victim.cache_coherent; 5097 r_xram_rsp_victim_copy_inst = victim.owner.inst; 5098 r_xram_rsp_victim_count = victim.count; 5099 r_xram_rsp_victim_ptr = victim.ptr; 5100 r_xram_rsp_victim_way = way; 5101 r_xram_rsp_victim_set = set; 5102 r_xram_rsp_victim_nline = (addr_t) victim.tag * m_sets + set; 5103 r_xram_rsp_victim_is_cnt = victim.is_cnt; 5104 r_xram_rsp_victim_inval = inval; 5105 // a NCC line is by default considered as dirty in the L1: we must take a reservation on a TRT entry 5106 r_xram_rsp_victim_dirty = victim.dirty or (!victim.cache_coherent and (victim.count == 1)); 5107 5108 // A line that undergoes a change in its state (ncc to cc), should not be evicted from the memory cache. 5109 if (((addr_t) victim.tag * m_sets + set) == r_read_to_cleanup_nline.read() and r_read_to_cleanup_req.read()) 5110 { 5111 r_xram_rsp_fsm = XRAM_RSP_INVAL_WAIT; 5112 5113 #if DEBUG_MEMC_XRAM_RSP 5114 if (m_debug) 5115 { 5116 std::cout << " <MEMC " << name() << " XRAM_RSP_TRT_COPY>" 5117 << " Victim line is in ncc to cc mecanism" 5118 << " / nline = " << std::hex << (victim.tag * m_sets + set) 5119 << std::endl; 5120 } 5121 #endif 5122 } 5123 else if (not r_xram_rsp_trt_buf.rerror) 5124 { 5125 r_xram_rsp_fsm = XRAM_RSP_IVT_LOCK; 5126 } 5127 else 5128 { 5129 r_xram_rsp_fsm = XRAM_RSP_ERROR_ERASE; 5130 } 5131 5132 #if DEBUG_MEMC_XRAM_RSP 5133 if (m_debug) 5134 { 5135 std::cout << " <MEMC " << name() << " XRAM_RSP_TRT_COPY>" 5136 << " Select a victim slot: " 5137 << " way = " << std::dec << way 5138 << " / set = " << set 5139 << "/ count = " << victim.count 5140 << " / inval_required = " << inval << std::endl; 5141 } 5142 #endif 5143 break; 5144 } 5145 /////////////////////// 5146 case XRAM_RSP_IVT_LOCK: // Keep DIR and TRT locks and take the IVT lock 5147 // to check a possible pending inval 5148 { 5149 assert((r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP) and 5150 "MEMC ERROR in XRAM_RSP_IVT_LOCK state: Bad DIR allocation"); 5151 5152 assert((r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP) and 5153 "MEMC ERROR in XRAM_RSP_IVT_LOCK state: Bad TRT allocation"); 5154 5155 if (r_alloc_ivt_fsm == ALLOC_IVT_XRAM_RSP) 5156 { 5157 size_t index = 0; 5158 if (m_ivt.search_inval(r_xram_rsp_trt_buf.nline, index)) // pending inval 5159 { 5160 r_xram_rsp_fsm = XRAM_RSP_INVAL_WAIT; 5161 5162 #if DEBUG_MEMC_XRAM_RSP 5163 if (m_debug) 5164 { 5165 std::cout << " <MEMC " << name() << " XRAM_RSP_IVT_LOCK>" 5166 << " Get acces to IVT, but line invalidation registered" 5167 << " / address = " << std::hex << r_xram_rsp_trt_buf.nline * m_words * 4 5168 << " / index = " << std::dec << index << std::endl; 5015 5169 } 5016 } 5017 break; 5018 } 5019 /////////////////////// 5020 case XRAM_RSP_DIR_LOCK: // Takes the DIR lock and the TRT lock 5021 // Copy the TRT entry in a local buffer 5022 { 5023 if((r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP) and 5024 (r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP)) 5025 { 5026 // copy the TRT entry in the r_xram_rsp_trt_buf local buffer 5027 size_t index = r_xram_rsp_trt_index.read(); 5028 r_xram_rsp_trt_buf.copy( m_trt.read(index) ); 5029 r_xram_rsp_fsm = XRAM_RSP_TRT_COPY; 5170 #endif 5171 5172 } 5173 else if (m_ivt.is_full() and r_xram_rsp_victim_inval.read()) // IVT full 5174 { 5175 r_xram_rsp_fsm = XRAM_RSP_INVAL_WAIT; 5030 5176 5031 5177 #if DEBUG_MEMC_XRAM_RSP 5032 if(m_debug) 5033 std::cout << " <MEMC " << name() << " XRAM_RSP_DIR_LOCK>" 5034 << " Get access to DIR and TRT" << std::endl; 5035 #endif 5036 } 5037 break; 5038 } 5039 /////////////////////// 5040 case XRAM_RSP_TRT_COPY: // Select a victim cache line 5041 // and copy it in a local buffer 5042 { 5043 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP) and 5044 "MEMC ERROR in XRAM_RSP_TRT_COPY state: Bad DIR allocation"); 5045 5046 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP) and 5047 "MEMC ERROR in XRAM_RSP_TRT_COPY state: Bad TRT allocation"); 5048 5049 // selects & extracts a victim line from cache 5050 size_t way = 0; 5051 size_t set = m_y[(addr_t)(r_xram_rsp_trt_buf.nline * m_words * 4)]; 5052 5053 DirectoryEntry victim(m_cache_directory.select(set, way)); 5054 5055 bool inval = (victim.count && victim.valid) or (!victim.cache_coherent and (victim.count == 1)) ; 5056 5057 5058 // copy the victim line in a local buffer 5059 m_cache_data.read_line(way, set, r_xram_rsp_victim_data); 5060 5061 r_xram_rsp_victim_copy = victim.owner.srcid; 5062 r_xram_rsp_victim_coherent = victim.cache_coherent; 5063 r_xram_rsp_victim_copy_inst = victim.owner.inst; 5064 r_xram_rsp_victim_count = victim.count; 5065 r_xram_rsp_victim_ptr = victim.ptr; 5066 r_xram_rsp_victim_way = way; 5067 r_xram_rsp_victim_set = set; 5068 r_xram_rsp_victim_nline = victim.tag*m_sets + set; 5069 r_xram_rsp_victim_is_cnt = victim.is_cnt; 5070 r_xram_rsp_victim_inval = inval ; 5071 // a NCC line is by default considered as dirty in the L1: we must take a reservation on a TRT entry 5072 r_xram_rsp_victim_dirty = victim.dirty or (!victim.cache_coherent && (victim.count == 1)); 5073 5074 5075 // A line that undergoes a change in its state (ncc to cc), should not be evicted from the memory cache. 5076 if((victim.tag * m_sets + set) == r_read_to_cleanup_nline.read() and r_read_to_cleanup_req.read()) 5077 { 5078 r_xram_rsp_fsm = XRAM_RSP_INVAL_WAIT; 5178 if (m_debug) 5179 { 5180 std::cout << " <MEMC " << name() << " XRAM_RSP_IVT_LOCK>" 5181 << " Get acces to IVT, but inval required and IVT full" << std::endl; 5182 } 5183 #endif 5184 } 5185 else 5186 { 5187 r_xram_rsp_fsm = XRAM_RSP_DIR_UPDT; 5079 5188 5080 5189 #if DEBUG_MEMC_XRAM_RSP 5081 if(m_debug) 5082 std::cout << " <MEMC " << name() << " XRAM_RSP_TRT_COPY>" 5083 << " Victim line is in ncc to cc mecanism" 5084 << " / nline = " << std::hex << (victim.tag * m_sets + set) 5085 << std::endl; 5086 #endif 5087 } 5088 else if( not r_xram_rsp_trt_buf.rerror ) 5089 { 5090 r_xram_rsp_fsm = XRAM_RSP_IVT_LOCK; 5190 if (m_debug) 5191 { 5192 std::cout << " <MEMC " << name() << " XRAM_RSP_IVT_LOCK>" 5193 << " Get acces to IVT / no pending inval request" << std::endl; 5194 } 5195 #endif 5196 } 5197 } 5198 break; 5199 } 5200 ///////////////////////// 5201 case XRAM_RSP_INVAL_WAIT: // release all locks and returns to DIR_LOCK to retry 5202 { 5203 5204 #if DEBUG_MEMC_XRAM_RSP 5205 if (m_debug) 5206 { 5207 std::cout << " <MEMC " << name() << " XRAM_RSP_INVAL_WAIT>" 5208 << " Release all locks and retry" << std::endl; 5209 } 5210 #endif 5211 r_xram_rsp_fsm = XRAM_RSP_DIR_LOCK; 5212 break; 5213 } 5214 /////////////////////// 5215 case XRAM_RSP_DIR_UPDT: // updates the cache (both data & directory), 5216 // erases the TRT entry if victim not dirty, 5217 // and set inval request in IVT if required 5218 { 5219 assert((r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP) and 5220 "MEMC ERROR in XRAM_RSP_DIR_UPDT state: Bad DIR allocation"); 5221 5222 assert((r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP) and 5223 "MEMC ERROR in XRAM_RSP_DIR_UPDT state: Bad TRT allocation"); 5224 5225 assert((r_alloc_ivt_fsm.read() == ALLOC_IVT_XRAM_RSP) and 5226 "MEMC ERROR in XRAM_RSP_DIR_UPDT state: Bad IVT allocation"); 5227 5228 // check if this is an instruction read, this means pktid is either 5229 // TYPE_READ_INS_UNC 0bX010 with TSAR encoding 5230 // TYPE_READ_INS_MISS 0bX011 with TSAR encoding 5231 bool inst_read = (r_xram_rsp_trt_buf.pktid & 0x2) and r_xram_rsp_trt_buf.proc_read; 5232 5233 // check if this is a cached read, this means pktid is either 5234 // TYPE_READ_DATA_MISS 0bX001 with TSAR encoding 5235 // TYPE_READ_INS_MISS 0bX011 with TSAR encoding 5236 bool cached_read = (r_xram_rsp_trt_buf.pktid & 0x1) and r_xram_rsp_trt_buf.proc_read; 5237 5238 bool dirty = false; 5239 5240 // update cache data 5241 size_t set = r_xram_rsp_victim_set.read(); 5242 size_t way = r_xram_rsp_victim_way.read(); 5243 5244 for (size_t word = 0; word < m_words; word++) 5245 { 5246 m_cache_data.write(way, set, word, r_xram_rsp_trt_buf.wdata[word]); 5247 dirty = dirty or (r_xram_rsp_trt_buf.wdata_be[word] != 0); 5248 } 5249 5250 // update cache directory 5251 DirectoryEntry entry; 5252 entry.valid = true; 5253 entry.is_cnt = false; 5254 entry.lock = false; 5255 entry.dirty = dirty; 5256 entry.tag = r_xram_rsp_trt_buf.nline / m_sets; 5257 entry.ptr = 0; 5258 entry.cache_coherent = (inst_read or (!cached_read)) and (r_xram_rsp_trt_buf.proc_read); 5259 5260 if (cached_read) 5261 { 5262 entry.owner.srcid = r_xram_rsp_trt_buf.srcid; 5263 entry.owner.inst = inst_read; 5264 entry.count = 1; 5265 } 5266 else 5267 { 5268 entry.owner.srcid = 0; 5269 entry.owner.inst = 0; 5270 entry.count = 0; 5271 } 5272 m_cache_directory.write(set, way, entry); 5273 //RWT: keep the coherence information in order to send it to the read_rsp 5274 r_xram_rsp_coherent = inst_read or (!cached_read); 5275 // request an invalidattion request in IVT for victim line 5276 if (r_xram_rsp_victim_inval.read()) 5277 { 5278 bool broadcast = r_xram_rsp_victim_is_cnt.read(); 5279 size_t index = 0; 5280 size_t count_copies = r_xram_rsp_victim_count.read(); 5281 5282 bool wok = m_ivt.set(false, // it's an inval transaction 5283 broadcast, // set broadcast bit 5284 false, // no response required 5285 false, // no acknowledge required 5286 0, // srcid 5287 0, // trdid 5288 0, // pktid 5289 r_xram_rsp_victim_nline.read(), 5290 count_copies, 5291 index); 5292 5293 r_xram_rsp_ivt_index = index; 5294 assert(wok and 5295 "MEMC ERROR in XRAM_RSP_DIR_UPDT state: IVT should not be full"); 5296 5297 } 5298 if (!r_xram_rsp_victim_coherent.read()) 5299 { 5300 addr_t min = r_xram_rsp_victim_nline.read() * m_words * 4; 5301 addr_t max = r_xram_rsp_victim_nline.read() * m_words * 4 + (m_words - 1) * 4; 5302 m_llsc_table.sw(min, max); 5303 } 5304 #if DEBUG_MEMC_XRAM_RSP 5305 if (m_debug) 5306 { 5307 std::cout << " <MEMC " << name() << " XRAM_RSP_DIR_UPDT>" 5308 << " Cache update: " 5309 << " way = " << std::dec << way 5310 << " / set = " << set 5311 << " / owner_id = " << std::hex << entry.owner.srcid 5312 << " / owner_ins = " << std::dec << entry.owner.inst 5313 << " / count = " << entry.count 5314 << " / nline = " << r_xram_rsp_trt_buf.nline 5315 << " / is_cnt = " << entry.is_cnt << std::endl; 5316 if (r_xram_rsp_victim_inval.read()) 5317 { 5318 std::cout << " Invalidation request for address " 5319 << std::hex << r_xram_rsp_victim_nline.read() * m_words * 4 5320 << " / broadcast = " << r_xram_rsp_victim_is_cnt.read() << std::endl; 5321 } 5322 } 5323 #endif 5324 5325 // If the victim is not dirty (RWT: if it is not coherent, 5326 // we can not know wether it is dirty or not), we don't 5327 // need another XRAM put transaction, and we can erase the 5328 // TRT entry 5329 if (!r_xram_rsp_victim_dirty.read() and 5330 (r_xram_rsp_victim_coherent.read() or (r_xram_rsp_victim_count.read() == 0))) 5331 { 5332 m_trt.erase(r_xram_rsp_trt_index.read()); 5333 } 5334 5335 // Next state 5336 if (r_xram_rsp_victim_dirty.read() or (!r_xram_rsp_victim_coherent.read() and (r_xram_rsp_victim_count.read() == 1))) r_xram_rsp_fsm = XRAM_RSP_TRT_DIRTY; 5337 else if (r_xram_rsp_trt_buf.proc_read) r_xram_rsp_fsm = XRAM_RSP_DIR_RSP; 5338 else if (r_xram_rsp_victim_inval.read()) r_xram_rsp_fsm = XRAM_RSP_INVAL; 5339 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 5340 break; 5341 } 5342 //////////////////////// 5343 case XRAM_RSP_TRT_DIRTY: // set the TRT entry (PUT to XRAM) if the victim is dirty or not coherent (RWT) 5344 { 5345 if (r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP) 5346 { 5347 std::vector<data_t> data_vector; 5348 data_vector.clear(); 5349 for (size_t i = 0; i < m_words; i++) 5350 { 5351 data_vector.push_back(r_xram_rsp_victim_data[i].read()); 5352 } 5353 m_trt.set(r_xram_rsp_trt_index.read(), 5354 false, // PUT 5355 r_xram_rsp_victim_nline.read(), // line index 5356 0, // unused 5357 0, // unused 5358 0, // unused 5359 false, // not proc_read 5360 0, // unused 5361 0, // unused 5362 std::vector<be_t>(m_words,0xF), 5363 data_vector); 5364 5365 #if DEBUG_MEMC_XRAM_RSP 5366 if (m_debug) 5367 { 5368 std::cout << " <MEMC " << name() << " XRAM_RSP_TRT_DIRTY>" 5369 << " Set TRT entry for the put transaction" 5370 << " / address = " << (r_xram_rsp_victim_nline.read() * m_words * 4) << std::endl; 5371 } 5372 #endif 5373 if (r_xram_rsp_trt_buf.proc_read) r_xram_rsp_fsm = XRAM_RSP_DIR_RSP; 5374 else if (r_xram_rsp_victim_inval.read()) r_xram_rsp_fsm = XRAM_RSP_INVAL; 5375 else r_xram_rsp_fsm = XRAM_RSP_WRITE_DIRTY; 5376 } 5377 break; 5378 } 5379 ////////////////////// 5380 case XRAM_RSP_DIR_RSP: // Request a response to TGT_RSP FSM 5381 { 5382 if (not r_xram_rsp_to_tgt_rsp_req.read()) 5383 { 5384 r_xram_rsp_to_tgt_rsp_srcid = r_xram_rsp_trt_buf.srcid; 5385 r_xram_rsp_to_tgt_rsp_trdid = r_xram_rsp_trt_buf.trdid; 5386 if (r_xram_rsp_coherent.read()) 5387 { 5388 r_xram_rsp_to_tgt_rsp_pktid = 0x0 + r_xram_rsp_trt_buf.pktid; //RWT CC 5091 5389 } 5092 5390 else 5093 5391 { 5094 r_xram_rsp_fsm = XRAM_RSP_ERROR_ERASE; 5095 } 5392 r_xram_rsp_to_tgt_rsp_pktid = 0x8 + r_xram_rsp_trt_buf.pktid; //RWT NCC 5393 } 5394 for (size_t i = 0; i < m_words; i++) 5395 { 5396 r_xram_rsp_to_tgt_rsp_data[i] = r_xram_rsp_trt_buf.wdata[i]; 5397 } 5398 r_xram_rsp_to_tgt_rsp_word = r_xram_rsp_trt_buf.word_index; 5399 r_xram_rsp_to_tgt_rsp_length = r_xram_rsp_trt_buf.read_length; 5400 r_xram_rsp_to_tgt_rsp_ll_key = r_xram_rsp_trt_buf.ll_key; 5401 r_xram_rsp_to_tgt_rsp_rerror = false; 5402 r_xram_rsp_to_tgt_rsp_req = true; 5403 5404 if (r_xram_rsp_victim_inval.read()) r_xram_rsp_fsm = XRAM_RSP_INVAL; 5405 else if (r_xram_rsp_victim_dirty.read()) r_xram_rsp_fsm = XRAM_RSP_WRITE_DIRTY; 5406 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 5096 5407 5097 5408 #if DEBUG_MEMC_XRAM_RSP 5098 if(m_debug) 5099 std::cout << " <MEMC " << name() << " XRAM_RSP_TRT_COPY>" 5100 << " Select a victim slot: " 5101 << " way = " << std::dec << way 5102 << " / set = " << set 5103 << "/ count = " << victim.count 5104 << " / inval_required = " << inval << std::endl; 5105 #endif 5106 break; 5107 } 5108 /////////////////////// 5109 case XRAM_RSP_IVT_LOCK: // Keep DIR and TRT locks and take the IVT lock 5110 // to check a possible pending inval 5111 { 5112 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP) and 5113 "MEMC ERROR in XRAM_RSP_IVT_LOCK state: Bad DIR allocation"); 5114 5115 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP) and 5116 "MEMC ERROR in XRAM_RSP_IVT_LOCK state: Bad TRT allocation"); 5117 5118 if(r_alloc_ivt_fsm == ALLOC_IVT_XRAM_RSP) 5119 { 5120 size_t index = 0; 5121 if(m_ivt.search_inval(r_xram_rsp_trt_buf.nline, index)) // pending inval 5409 if (m_debug) 5410 { 5411 std::cout << " <MEMC " << name() << " XRAM_RSP_DIR_RSP>" 5412 << " Request the TGT_RSP FSM to return data:" 5413 << " rsrcid = " << std::hex << r_xram_rsp_trt_buf.srcid 5414 << " / address = " << std::hex << r_xram_rsp_trt_buf.nline * m_words * 4 5415 << " / nwords = " << std::dec << r_xram_rsp_trt_buf.read_length << std::endl; 5416 } 5417 #endif 5418 } 5419 break; 5420 } 5421 //////////////////// 5422 case XRAM_RSP_INVAL: // send invalidate request to CC_SEND FSM 5423 { 5424 if (!r_xram_rsp_to_cc_send_multi_req.read() and 5425 !r_xram_rsp_to_cc_send_brdcast_req.read()) 5426 { 5427 bool multi_req = !r_xram_rsp_victim_is_cnt.read(); 5428 bool last_multi_req = multi_req and (r_xram_rsp_victim_count.read() == 1); 5429 bool not_last_multi_req = multi_req and (r_xram_rsp_victim_count.read() != 1); 5430 5431 r_xram_rsp_to_cc_send_multi_req = last_multi_req; 5432 r_xram_rsp_to_cc_send_brdcast_req = r_xram_rsp_victim_is_cnt.read(); 5433 r_xram_rsp_to_cc_send_nline = r_xram_rsp_victim_nline.read(); 5434 r_xram_rsp_to_cc_send_trdid = r_xram_rsp_ivt_index; 5435 xram_rsp_to_cc_send_fifo_srcid = r_xram_rsp_victim_copy.read(); 5436 xram_rsp_to_cc_send_fifo_inst = r_xram_rsp_victim_copy_inst.read(); 5437 xram_rsp_to_cc_send_fifo_put = multi_req; 5438 r_xram_rsp_next_ptr = r_xram_rsp_victim_ptr.read(); 5439 5440 if (r_xram_rsp_victim_dirty and r_xram_rsp_victim_coherent) r_xram_rsp_fsm = XRAM_RSP_WRITE_DIRTY; 5441 else if (not_last_multi_req) r_xram_rsp_fsm = XRAM_RSP_HEAP_REQ; 5442 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 5443 5444 #if DEBUG_MEMC_XRAM_RSP 5445 if (m_debug) 5446 { 5447 std::cout << " <MEMC " << name() << " XRAM_RSP_INVAL>" 5448 << " Send an inval request to CC_SEND FSM" 5449 << " / address = " << r_xram_rsp_victim_nline.read() * m_words * 4 << std::endl; 5450 } 5451 #endif 5452 } 5453 break; 5454 } 5455 ////////////////////////// 5456 case XRAM_RSP_WRITE_DIRTY: // send a write request to IXR_CMD FSM 5457 { 5458 if (not r_xram_rsp_to_ixr_cmd_req.read()) 5459 { 5460 r_xram_rsp_to_ixr_cmd_req = true; 5461 r_xram_rsp_to_ixr_cmd_index = r_xram_rsp_trt_index.read(); 5462 5463 m_cpt_write_dirty++; 5464 5465 bool multi_req = not r_xram_rsp_victim_is_cnt.read() and 5466 r_xram_rsp_victim_inval.read(); 5467 bool not_last_multi_req = multi_req and (r_xram_rsp_victim_count.read() != 1); 5468 5469 if (not_last_multi_req) r_xram_rsp_fsm = XRAM_RSP_HEAP_REQ; 5470 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 5471 5472 #if DEBUG_MEMC_XRAM_RSP 5473 if (m_debug) 5474 { 5475 std::cout << " <MEMC " << name() << " XRAM_RSP_WRITE_DIRTY>" 5476 << " Send the put request to IXR_CMD FSM" 5477 << " / address = " << r_xram_rsp_victim_nline.read() * m_words * 4 << std::endl; 5478 } 5479 #endif 5480 } 5481 break; 5482 } 5483 ///////////////////////// 5484 case XRAM_RSP_HEAP_REQ: // Get the lock to the HEAP 5485 { 5486 if (r_alloc_heap_fsm.read() == ALLOC_HEAP_XRAM_RSP) 5487 { 5488 r_xram_rsp_fsm = XRAM_RSP_HEAP_ERASE; 5489 } 5490 5491 #if DEBUG_MEMC_XRAM_RSP 5492 if (m_debug) 5493 { 5494 std::cout << " <MEMC " << name() << " XRAM_RSP_HEAP_REQ>" 5495 << " Requesting HEAP lock" << std::endl; 5496 } 5497 #endif 5498 break; 5499 } 5500 ///////////////////////// 5501 case XRAM_RSP_HEAP_ERASE: // erase the copies and send invalidations 5502 { 5503 if (r_alloc_heap_fsm.read() == ALLOC_HEAP_XRAM_RSP) 5504 { 5505 HeapEntry entry = m_heap.read(r_xram_rsp_next_ptr.read()); 5506 5507 xram_rsp_to_cc_send_fifo_srcid = entry.owner.srcid; 5508 xram_rsp_to_cc_send_fifo_inst = entry.owner.inst; 5509 xram_rsp_to_cc_send_fifo_put = true; 5510 if (m_xram_rsp_to_cc_send_inst_fifo.wok()) 5511 { 5512 r_xram_rsp_next_ptr = entry.next; 5513 if (entry.next == r_xram_rsp_next_ptr.read()) // last copy 5122 5514 { 5123 r_xram_rsp_fsm = XRAM_RSP_INVAL_WAIT; 5124 5125 #if DEBUG_MEMC_XRAM_RSP 5126 if(m_debug) 5127 std::cout << " <MEMC " << name() << " XRAM_RSP_IVT_LOCK>" 5128 << " Get acces to IVT, but line invalidation registered" 5129 << " / address = " << std::hex << r_xram_rsp_trt_buf.nline*m_words*4 5130 << " / index = " << std::dec << index << std::endl; 5131 #endif 5132 5133 } 5134 else if(m_ivt.is_full() and r_xram_rsp_victim_inval.read()) // IVT full 5135 { 5136 r_xram_rsp_fsm = XRAM_RSP_INVAL_WAIT; 5137 5138 #if DEBUG_MEMC_XRAM_RSP 5139 if(m_debug) 5140 std::cout << " <MEMC " << name() << " XRAM_RSP_IVT_LOCK>" 5141 << " Get acces to IVT, but inval required and IVT full" << std::endl; 5142 #endif 5143 } 5144 else 5145 { 5146 r_xram_rsp_fsm = XRAM_RSP_DIR_UPDT; 5147 5148 #if DEBUG_MEMC_XRAM_RSP 5149 if(m_debug) 5150 std::cout << " <MEMC " << name() << " XRAM_RSP_IVT_LOCK>" 5151 << " Get acces to IVT / no pending inval request" << std::endl; 5152 #endif 5153 } 5154 } 5155 break; 5156 } 5157 ///////////////////////// 5158 case XRAM_RSP_INVAL_WAIT: // release all locks and returns to DIR_LOCK to retry 5159 { 5160 5161 #if DEBUG_MEMC_XRAM_RSP 5162 if(m_debug) 5163 std::cout << " <MEMC " << name() << " XRAM_RSP_INVAL_WAIT>" 5164 << " Release all locks and retry" << std::endl; 5165 #endif 5166 r_xram_rsp_fsm = XRAM_RSP_DIR_LOCK; 5167 break; 5168 } 5169 /////////////////////// 5170 case XRAM_RSP_DIR_UPDT: // updates the cache (both data & directory), 5171 // erases the TRT entry if victim not dirty, 5172 // and set inval request in IVT if required 5173 { 5174 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP) and 5175 "MEMC ERROR in XRAM_RSP_DIR_UPDT state: Bad DIR allocation"); 5176 5177 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP) and 5178 "MEMC ERROR in XRAM_RSP_DIR_UPDT state: Bad TRT allocation"); 5179 5180 assert( (r_alloc_ivt_fsm.read() == ALLOC_IVT_XRAM_RSP) and 5181 "MEMC ERROR in XRAM_RSP_DIR_UPDT state: Bad IVT allocation"); 5182 5183 // check if this is an instruction read, this means pktid is either 5184 // TYPE_READ_INS_UNC 0bX010 with TSAR encoding 5185 // TYPE_READ_INS_MISS 0bX011 with TSAR encoding 5186 5187 // check if this is a cached read, this means pktid is either 5188 // TYPE_READ_DATA_MISS 0bX001 with TSAR encoding 5189 // TYPE_READ_INS_MISS 0bX011 with TSAR encoding 5190 bool cached_read = (r_xram_rsp_trt_buf.pktid & 0x1) and r_xram_rsp_trt_buf.proc_read; 5191 5192 bool inst_read = (r_xram_rsp_trt_buf.pktid & 0x2) and r_xram_rsp_trt_buf.proc_read; 5193 5194 bool dirty = false; 5195 5196 // update cache data 5197 size_t set = r_xram_rsp_victim_set.read(); 5198 size_t way = r_xram_rsp_victim_way.read(); 5199 for(size_t word=0; word<m_words ; word++) 5200 { 5201 m_cache_data.write(way, set, word, r_xram_rsp_trt_buf.wdata[word]); 5202 dirty = dirty or (r_xram_rsp_trt_buf.wdata_be[word] != 0); 5203 5204 } 5205 5206 // update cache directory 5207 DirectoryEntry entry; 5208 entry.valid = true; 5209 entry.cache_coherent = (inst_read or (not(cached_read))) and (r_xram_rsp_trt_buf.proc_read); 5210 entry.is_cnt = false; 5211 entry.lock = false; 5212 entry.dirty = dirty; 5213 entry.tag = r_xram_rsp_trt_buf.nline / m_sets; 5214 entry.ptr = 0; 5215 if(cached_read) 5216 { 5217 entry.owner.srcid = r_xram_rsp_trt_buf.srcid; 5218 entry.owner.inst = inst_read; 5219 entry.count = 1; 5220 } 5221 else 5222 { 5223 entry.owner.srcid = 0; 5224 entry.owner.inst = 0; 5225 entry.count = 0; 5226 } 5227 m_cache_directory.write(set, way, entry); 5228 //RWT: keep the coherence information in order to send it to the read_rsp 5229 r_xram_rsp_coherent = inst_read or (not(cached_read)); 5230 // request an invalidattion request in IVT for victim line 5231 if(r_xram_rsp_victim_inval.read()) 5232 { 5233 bool broadcast = r_xram_rsp_victim_is_cnt.read(); 5234 size_t index = 0; 5235 size_t count_copies = r_xram_rsp_victim_count.read(); 5236 5237 bool wok = m_ivt.set(false, // it's an inval transaction 5238 broadcast, // set broadcast bit 5239 false, // no response required 5240 false, // no acknowledge required 5241 0, // srcid 5242 0, // trdid 5243 0, // pktid 5244 r_xram_rsp_victim_nline.read(), 5245 count_copies, 5246 index); 5247 5248 r_xram_rsp_ivt_index = index; 5249 assert( wok and 5250 "MEMC ERROR in XRAM_RSP_DIR_UPDT state: IVT should not be full"); 5251 5252 } 5253 if (!r_xram_rsp_victim_coherent.read()) 5254 { 5255 addr_t min = r_xram_rsp_victim_nline.read()*m_words*4 ; 5256 addr_t max = r_xram_rsp_victim_nline.read()*m_words*4 + (m_words - 1)*4; 5257 m_llsc_table.sw(min, max); 5258 } 5259 #if DEBUG_MEMC_XRAM_RSP 5260 if(m_debug) 5261 { 5262 std::cout << " <MEMC " << name() << " XRAM_RSP_DIR_UPDT>" 5263 << " Cache update: " 5264 << " way = " << std::dec << way 5265 << " / set = " << set 5266 << " / owner_id = " << std::hex << entry.owner.srcid 5267 << " / owner_ins = " << std::dec << entry.owner.inst 5268 << " / count = " << entry.count 5269 << " / nline = " << r_xram_rsp_trt_buf.nline 5270 << " / is_cnt = " << entry.is_cnt << std::endl; 5271 if(r_xram_rsp_victim_inval.read()) 5272 std::cout << " Invalidation request for victim line " 5273 << std::hex << r_xram_rsp_victim_nline.read() 5274 << " / broadcast = " << r_xram_rsp_victim_is_cnt.read() << std::endl; 5275 } 5276 #endif 5277 5278 // If the victim is not dirty (RWT: if it is not coherent, 5279 // we can not know wether it is dirty or not), we don't 5280 // need another XRAM put transaction, and we can erase the 5281 // TRT entry 5282 if(!r_xram_rsp_victim_dirty.read() and (r_xram_rsp_victim_coherent.read() or (r_xram_rsp_victim_count.read() == 0))) m_trt.erase(r_xram_rsp_trt_index.read()); 5283 5284 // Next state 5285 if(r_xram_rsp_victim_dirty.read() or (!r_xram_rsp_victim_coherent.read() and (r_xram_rsp_victim_count.read() == 1))) r_xram_rsp_fsm = XRAM_RSP_TRT_DIRTY; 5286 else if(r_xram_rsp_trt_buf.proc_read) r_xram_rsp_fsm = XRAM_RSP_DIR_RSP; 5287 else if(r_xram_rsp_victim_inval.read()) r_xram_rsp_fsm = XRAM_RSP_INVAL; 5288 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 5289 break; 5290 } 5291 //////////////////////// 5292 case XRAM_RSP_TRT_DIRTY: // set the TRT entry (write to XRAM) if the victim is dirty or not coherent (RWT) 5293 { 5294 if(r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP) 5295 { 5296 std::vector<data_t> data_vector; 5297 data_vector.clear(); 5298 for(size_t i=0; i<m_words; i++) 5299 { 5300 data_vector.push_back(r_xram_rsp_victim_data[i].read()); 5301 } 5302 m_trt.set( r_xram_rsp_trt_index.read(), 5303 false, // PUT 5304 r_xram_rsp_victim_nline.read(), // line index 5305 0, // unused 5306 0, // unused 5307 0, // unused 5308 false, // not proc_read 5309 0, // unused 5310 0, // unused 5311 std::vector<be_t>(m_words,0xF), 5312 data_vector); 5313 #if DEBUG_MEMC_XRAM_RSP 5314 if(m_debug) 5315 std::cout << " <MEMC " << name() << " XRAM_RSP_TRT_DIRTY>" 5316 << " Set TRT entry for the put transaction" 5317 << " / dirty victim line = " << r_xram_rsp_victim_nline.read() << std::endl; 5318 #endif 5319 5320 if(r_xram_rsp_trt_buf.proc_read) r_xram_rsp_fsm = XRAM_RSP_DIR_RSP; 5321 else if(r_xram_rsp_victim_inval.read()) r_xram_rsp_fsm = XRAM_RSP_INVAL; 5322 else r_xram_rsp_fsm = XRAM_RSP_WRITE_DIRTY; 5323 } 5324 break; 5325 } 5326 ////////////////////// 5327 case XRAM_RSP_DIR_RSP: // Request a response to TGT_RSP FSM 5328 { 5329 if( not r_xram_rsp_to_tgt_rsp_req.read()) 5330 { 5331 r_xram_rsp_to_tgt_rsp_srcid = r_xram_rsp_trt_buf.srcid; 5332 r_xram_rsp_to_tgt_rsp_trdid = r_xram_rsp_trt_buf.trdid; 5333 if (r_xram_rsp_coherent.read()) 5334 { 5335 r_xram_rsp_to_tgt_rsp_pktid = 0x0 + r_xram_rsp_trt_buf.pktid;//RWT CC 5336 } 5337 else 5338 { 5339 r_xram_rsp_to_tgt_rsp_pktid = 0x8 + r_xram_rsp_trt_buf.pktid;//RWT NCC 5340 } 5341 for(size_t i=0; i < m_words; i++) 5342 { 5343 r_xram_rsp_to_tgt_rsp_data[i] = r_xram_rsp_trt_buf.wdata[i]; 5344 } 5345 r_xram_rsp_to_tgt_rsp_word = r_xram_rsp_trt_buf.word_index; 5346 r_xram_rsp_to_tgt_rsp_length = r_xram_rsp_trt_buf.read_length; 5347 r_xram_rsp_to_tgt_rsp_ll_key = r_xram_rsp_trt_buf.ll_key; 5348 r_xram_rsp_to_tgt_rsp_rerror = false; 5349 r_xram_rsp_to_tgt_rsp_req = true; 5350 5351 5352 if(r_xram_rsp_victim_inval) r_xram_rsp_fsm = XRAM_RSP_INVAL; 5353 else if(r_xram_rsp_victim_dirty) r_xram_rsp_fsm = XRAM_RSP_WRITE_DIRTY; 5354 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 5355 5356 #if DEBUG_MEMC_XRAM_RSP 5357 if(m_debug) 5358 std::cout << " <MEMC " << name() << " XRAM_RSP_DIR_RSP>" 5359 << " Request the TGT_RSP FSM to return data:" 5360 << " rsrcid = " << std::hex << r_xram_rsp_trt_buf.srcid 5361 << " / address = " << std::hex << r_xram_rsp_trt_buf.nline*m_words*4 5362 << " / nwords = " << std::dec << r_xram_rsp_trt_buf.read_length << std::endl; 5363 #endif 5364 } 5365 break; 5366 } 5367 //////////////////// 5368 case XRAM_RSP_INVAL: // send invalidate request to CC_SEND FSM 5369 { 5370 if(!r_xram_rsp_to_cc_send_multi_req.read() and 5371 !r_xram_rsp_to_cc_send_brdcast_req.read()) 5372 { 5373 bool multi_req = !r_xram_rsp_victim_is_cnt.read(); 5374 bool last_multi_req = multi_req and (r_xram_rsp_victim_count.read() == 1); 5375 bool not_last_multi_req = multi_req and (r_xram_rsp_victim_count.read() != 1); 5376 5377 r_xram_rsp_to_cc_send_multi_req = last_multi_req; 5378 r_xram_rsp_to_cc_send_brdcast_req = r_xram_rsp_victim_is_cnt.read(); 5379 r_xram_rsp_to_cc_send_nline = r_xram_rsp_victim_nline.read(); 5380 r_xram_rsp_to_cc_send_trdid = r_xram_rsp_ivt_index; 5381 xram_rsp_to_cc_send_fifo_srcid = r_xram_rsp_victim_copy.read(); 5382 xram_rsp_to_cc_send_fifo_inst = r_xram_rsp_victim_copy_inst.read(); 5383 xram_rsp_to_cc_send_fifo_put = multi_req; 5384 r_xram_rsp_next_ptr = r_xram_rsp_victim_ptr.read(); 5385 5386 if(r_xram_rsp_victim_dirty and r_xram_rsp_victim_coherent) r_xram_rsp_fsm = XRAM_RSP_WRITE_DIRTY; 5387 else if(not_last_multi_req) r_xram_rsp_fsm = XRAM_RSP_HEAP_REQ; 5388 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 5389 5390 #if DEBUG_MEMC_XRAM_RSP 5391 if(m_debug) 5392 std::cout << " <MEMC " << name() << " XRAM_RSP_INVAL>" 5393 << " Send an inval request to CC_SEND FSM" 5394 << " / address = " << r_xram_rsp_victim_nline.read()*m_words*4 << std::endl; 5395 #endif 5396 } 5397 break; 5398 } 5399 ////////////////////////// 5400 case XRAM_RSP_WRITE_DIRTY: // send a write request to IXR_CMD FSM 5401 { 5402 if ( not r_xram_rsp_to_ixr_cmd_req.read() ) 5403 { 5404 r_xram_rsp_to_ixr_cmd_req = true; 5405 r_xram_rsp_to_ixr_cmd_index = r_xram_rsp_trt_index.read(); 5406 5407 m_cpt_write_dirty++; 5408 5409 bool multi_req = not r_xram_rsp_victim_is_cnt.read() and 5410 r_xram_rsp_victim_inval.read(); 5411 bool not_last_multi_req = multi_req and (r_xram_rsp_victim_count.read() != 1); 5412 5413 if(not_last_multi_req) r_xram_rsp_fsm = XRAM_RSP_HEAP_REQ; 5414 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 5415 5416 #if DEBUG_MEMC_XRAM_RSP 5417 if(m_debug) 5418 std::cout << " <MEMC " << name() << " XRAM_RSP_WRITE_DIRTY>" 5419 << " Send the put request to IXR_CMD FSM" 5420 << " / victim line = " << r_xram_rsp_victim_nline.read() << std::endl; 5421 #endif 5422 } 5423 break; 5424 } 5425 ///////////////////////// 5426 case XRAM_RSP_HEAP_REQ: // Get the lock to the HEAP 5427 { 5428 if(r_alloc_heap_fsm.read() == ALLOC_HEAP_XRAM_RSP) 5429 { 5430 r_xram_rsp_fsm = XRAM_RSP_HEAP_ERASE; 5431 } 5432 5433 #if DEBUG_MEMC_XRAM_RSP 5434 if(m_debug) 5435 std::cout << " <MEMC " << name() << " XRAM_RSP_HEAP_REQ>" 5436 << " Requesting HEAP lock" << std::endl; 5437 #endif 5438 break; 5439 } 5440 ///////////////////////// 5441 case XRAM_RSP_HEAP_ERASE: // erase the copies and send invalidations 5442 { 5443 if(r_alloc_heap_fsm.read() == ALLOC_HEAP_XRAM_RSP) 5444 { 5445 HeapEntry entry = m_heap.read(r_xram_rsp_next_ptr.read()); 5446 5447 xram_rsp_to_cc_send_fifo_srcid = entry.owner.srcid; 5448 xram_rsp_to_cc_send_fifo_inst = entry.owner.inst; 5449 xram_rsp_to_cc_send_fifo_put = true; 5450 if(m_xram_rsp_to_cc_send_inst_fifo.wok()) 5451 { 5452 r_xram_rsp_next_ptr = entry.next; 5453 if(entry.next == r_xram_rsp_next_ptr.read()) // last copy 5454 { 5455 r_xram_rsp_to_cc_send_multi_req = true; 5456 r_xram_rsp_fsm = XRAM_RSP_HEAP_LAST; 5457 } 5458 else 5459 { 5460 r_xram_rsp_fsm = XRAM_RSP_HEAP_ERASE; 5461 5462 } 5515 r_xram_rsp_to_cc_send_multi_req = true; 5516 r_xram_rsp_fsm = XRAM_RSP_HEAP_LAST; 5463 5517 } 5464 5518 else … … 5466 5520 r_xram_rsp_fsm = XRAM_RSP_HEAP_ERASE; 5467 5521 } 5468 5469 #if DEBUG_MEMC_XRAM_RSP5470 if(m_debug)5471 std::cout << " <MEMC " << name() << " XRAM_RSP_HEAP_ERASE>"5472 << " Erase copy:"5473 << " srcid = " << std::hex << entry.owner.srcid5474 << " / inst = " << std::dec << entry.owner.inst << std::endl;5475 #endif5476 }5477 break;5478 }5479 /////////////////////////5480 case XRAM_RSP_HEAP_LAST: // last copy5481 {5482 if(r_alloc_heap_fsm.read() != ALLOC_HEAP_XRAM_RSP)5483 {5484 std::cout << "VCI_MEM_CACHE ERROR " << name() << " XRAM_RSP_HEAP_LAST"5485 << " bad HEAP allocation" << std::endl;5486 exit(0);5487 }5488 size_t free_pointer = m_heap.next_free_ptr();5489 5490 HeapEntry last_entry;5491 last_entry.owner.srcid = 0;5492 last_entry.owner.inst = false;5493 if(m_heap.is_full())5494 {5495 last_entry.next = r_xram_rsp_next_ptr.read();5496 m_heap.unset_full();5497 5522 } 5498 5523 else 5499 5524 { 5500 last_entry.next = free_pointer; 5501 } 5502 5503 m_heap.write_free_ptr(r_xram_rsp_victim_ptr.read()); 5504 m_heap.write(r_xram_rsp_next_ptr.read(),last_entry); 5505 5506 // <Activity counters> 5507 m_cpt_heap_slot_available = m_cpt_heap_slot_available + (r_xram_rsp_victim_count.read() - 1); 5508 // </Activity counters> 5509 5510 r_xram_rsp_fsm = XRAM_RSP_IDLE; 5511 5512 #if DEBUG_MEMC_XRAM_RSP 5513 if(m_debug) 5514 std::cout << " <MEMC " << name() << " XRAM_RSP_HEAP_LAST>" 5515 << " Heap housekeeping" << std::endl; 5516 #endif 5517 break; 5518 } 5519 ///////////////////////// 5520 case XRAM_RSP_ERROR_ERASE: // erase TRT entry in case of error 5521 { 5522 m_trt.erase(r_xram_rsp_trt_index.read()); 5523 5524 // Next state 5525 if (r_xram_rsp_trt_buf.proc_read) 5526 { 5527 r_xram_rsp_fsm = XRAM_RSP_ERROR_RSP; 5528 } 5529 else 5530 { 5531 // Trigger an interruption to signal a bus error from 5532 // the XRAM because a processor WRITE MISS (XRAM GET 5533 // transaction and not processor read). 5534 // 5535 // To avoid deadlocks we do not wait an error to be 5536 // acknowledged before signaling another one. 5537 // Therefore, when there is an active error, and other 5538 // errors arrive, these are not considered 5539 5540 if (!r_xram_rsp_rerror_irq.read() && r_xram_rsp_rerror_irq_enable.read() 5541 && r_xram_rsp_trt_buf.xram_read ) 5542 { 5543 r_xram_rsp_rerror_irq = true; 5544 r_xram_rsp_rerror_address = r_xram_rsp_trt_buf.nline * m_words * 4; 5545 r_xram_rsp_rerror_rsrcid = r_xram_rsp_trt_buf.srcid; 5546 5547 #if DEBUG_MEMC_XRAM_RSP 5548 if (m_debug) 5549 std::cout 5550 << " <MEMC " << name() << " XRAM_RSP_ERROR_ERASE>" 5551 << " Triggering interrupt to signal WRITE MISS bus error" 5552 << " / irq_enable = " << r_xram_rsp_rerror_irq_enable.read() 5553 << " / nline = " << r_xram_rsp_trt_buf.nline 5554 << " / rsrcid = " << r_xram_rsp_trt_buf.srcid 5555 << std::endl; 5556 #endif 5557 } 5558 5559 r_xram_rsp_fsm = XRAM_RSP_IDLE; 5525 r_xram_rsp_fsm = XRAM_RSP_HEAP_ERASE; 5560 5526 } 5561 5527 5562 5528 #if DEBUG_MEMC_XRAM_RSP 5563 5529 if (m_debug) 5564 std::cout << " <MEMC " << name() << " XRAM_RSP_ERROR_ERASE>" 5565 << " Error reported by XRAM / erase the TRT entry" << std::endl; 5566 #endif 5567 break; 5568 } 5569 //////////////////////// 5530 { 5531 std::cout << " <MEMC " << name() << " XRAM_RSP_HEAP_ERASE>" 5532 << " Erase copy:" 5533 << " srcid = " << std::hex << entry.owner.srcid 5534 << " / inst = " << std::dec << entry.owner.inst << std::endl; 5535 } 5536 #endif 5537 } 5538 break; 5539 } 5540 ///////////////////////// 5541 case XRAM_RSP_HEAP_LAST: // last copy 5542 { 5543 if (r_alloc_heap_fsm.read() != ALLOC_HEAP_XRAM_RSP) 5544 { 5545 std::cout << "VCI_MEM_CACHE ERROR " << name() << " XRAM_RSP_HEAP_LAST" 5546 << " bad HEAP allocation" << std::endl; 5547 exit(0); 5548 } 5549 size_t free_pointer = m_heap.next_free_ptr(); 5550 5551 HeapEntry last_entry; 5552 last_entry.owner.srcid = 0; 5553 last_entry.owner.inst = false; 5554 if (m_heap.is_full()) 5555 { 5556 last_entry.next = r_xram_rsp_next_ptr.read(); 5557 m_heap.unset_full(); 5558 } 5559 else 5560 { 5561 last_entry.next = free_pointer; 5562 } 5563 5564 m_heap.write_free_ptr(r_xram_rsp_victim_ptr.read()); 5565 m_heap.write(r_xram_rsp_next_ptr.read(),last_entry); 5566 5567 // <Activity counters> 5568 m_cpt_heap_slot_available = m_cpt_heap_slot_available + (r_xram_rsp_victim_count.read() - 1); 5569 // </Activity counters> 5570 5571 r_xram_rsp_fsm = XRAM_RSP_IDLE; 5572 5573 #if DEBUG_MEMC_XRAM_RSP 5574 if (m_debug) 5575 { 5576 std::cout << " <MEMC " << name() << " XRAM_RSP_HEAP_LAST>" 5577 << " Heap housekeeping" << std::endl; 5578 } 5579 #endif 5580 break; 5581 } 5582 ////////////////////////// 5583 case XRAM_RSP_ERROR_ERASE: // erase TRT entry in case of error 5584 { 5585 m_trt.erase(r_xram_rsp_trt_index.read()); 5586 5587 // Next state 5588 if (r_xram_rsp_trt_buf.proc_read) 5589 { 5590 r_xram_rsp_fsm = XRAM_RSP_ERROR_RSP; 5591 } 5592 else 5593 { 5594 // Trigger an interruption to signal a bus error from 5595 // the XRAM because a processor WRITE MISS (XRAM GET 5596 // transaction and not processor read). 5597 // 5598 // To avoid deadlocks we do not wait an error to be 5599 // acknowledged before signaling another one. 5600 // Therefore, when there is an active error, and other 5601 // errors arrive, these are not considered 5602 5603 if (!r_xram_rsp_rerror_irq.read() and r_xram_rsp_rerror_irq_enable.read() 5604 and r_xram_rsp_trt_buf.xram_read) 5605 { 5606 r_xram_rsp_rerror_irq = true; 5607 r_xram_rsp_rerror_address = r_xram_rsp_trt_buf.nline * m_words * 4; 5608 r_xram_rsp_rerror_rsrcid = r_xram_rsp_trt_buf.srcid; 5609 5610 #if DEBUG_MEMC_XRAM_RSP 5611 if (m_debug) 5612 { 5613 std::cout 5614 << " <MEMC " << name() << " XRAM_RSP_ERROR_ERASE>" 5615 << " Triggering interrupt to signal WRITE MISS bus error" 5616 << " / irq_enable = " << r_xram_rsp_rerror_irq_enable.read() 5617 << " / nline = " << r_xram_rsp_trt_buf.nline 5618 << " / rsrcid = " << r_xram_rsp_trt_buf.srcid 5619 << std::endl; 5620 } 5621 #endif 5622 } 5623 5624 r_xram_rsp_fsm = XRAM_RSP_IDLE; 5625 } 5626 5627 #if DEBUG_MEMC_XRAM_RSP 5628 if (m_debug) 5629 { 5630 std::cout << " <MEMC " << name() << " XRAM_RSP_ERROR_ERASE>" 5631 << " Error reported by XRAM / erase the TRT entry" << std::endl; 5632 } 5633 #endif 5634 break; 5635 } 5636 //////////////////////// 5570 5637 case XRAM_RSP_ERROR_RSP: // Request an error response to TGT_RSP FSM 5571 5572 if(!r_xram_rsp_to_tgt_rsp_req.read())5573 5574 5575 5576 5577 for(size_t i=0; i < m_words; i++)5578 5579 5580 5581 5582 5583 5584 5585 5586 5638 { 5639 if (!r_xram_rsp_to_tgt_rsp_req.read()) 5640 { 5641 r_xram_rsp_to_tgt_rsp_srcid = r_xram_rsp_trt_buf.srcid; 5642 r_xram_rsp_to_tgt_rsp_trdid = r_xram_rsp_trt_buf.trdid; 5643 r_xram_rsp_to_tgt_rsp_pktid = r_xram_rsp_trt_buf.pktid; 5644 for (size_t i = 0; i < m_words; i++) 5645 { 5646 r_xram_rsp_to_tgt_rsp_data[i] = r_xram_rsp_trt_buf.wdata[i]; 5647 } 5648 r_xram_rsp_to_tgt_rsp_word = r_xram_rsp_trt_buf.word_index; 5649 r_xram_rsp_to_tgt_rsp_length = r_xram_rsp_trt_buf.read_length; 5650 r_xram_rsp_to_tgt_rsp_rerror = true; 5651 r_xram_rsp_to_tgt_rsp_req = true; 5652 5653 r_xram_rsp_fsm = XRAM_RSP_IDLE; 5587 5654 5588 5655 #if DEBUG_MEMC_XRAM_RSP 5589 if(m_debug) 5590 std::cout << " <MEMC " << name() 5591 << " XRAM_RSP_ERROR_RSP> Request a response error to TGT_RSP FSM:" 5592 << " srcid = " << std::dec << r_xram_rsp_trt_buf.srcid << std::endl; 5593 #endif 5594 } 5595 break; 5596 } 5656 if (m_debug) 5657 { 5658 std::cout << " <MEMC " << name() 5659 << " XRAM_RSP_ERROR_RSP> Request a response error to TGT_RSP FSM:" 5660 << " srcid = " << std::dec << r_xram_rsp_trt_buf.srcid << std::endl; 5661 } 5662 #endif 5663 } 5664 break; 5665 } 5597 5666 } // end swich r_xram_rsp_fsm 5598 5667 … … 5607 5676 { 5608 5677 ////////////////// 5609 case CLEANUP_IDLE: // Get first DSPIN flit of the CLEANUP command 5610 { 5611 if(not m_cc_receive_to_cleanup_fifo.rok()) break; 5612 5678 case CLEANUP_IDLE: // Get first DSPIN flit of the CLEANUP command 5679 { 5680 if (not m_cc_receive_to_cleanup_fifo.rok()) break; 5681 5682 uint64_t flit = m_cc_receive_to_cleanup_fifo.read(); 5683 5684 uint32_t srcid = 5685 DspinRwtParam::dspin_get( 5686 flit, 5687 DspinRwtParam::CLEANUP_SRCID); 5688 5689 uint8_t type = 5690 DspinRwtParam::dspin_get( 5691 flit, 5692 DspinRwtParam::P2M_TYPE); 5693 5694 r_cleanup_way_index = 5695 DspinRwtParam::dspin_get( 5696 flit, 5697 DspinRwtParam::CLEANUP_WAY_INDEX); 5698 5699 r_cleanup_nline = 5700 DspinRwtParam::dspin_get( 5701 flit, 5702 DspinRwtParam::CLEANUP_NLINE_MSB) << 32; 5703 5704 r_cleanup_inst = (type == DspinRwtParam::TYPE_CLEANUP_INST); 5705 r_cleanup_srcid = srcid; 5706 r_cleanup_ncc = 5707 DspinRwtParam::dspin_get( 5708 flit, 5709 DspinRwtParam::CLEANUP_NCC); 5710 r_cleanup_contains_data = false; 5711 5712 assert((srcid < m_initiators) and 5713 "MEMC ERROR in CLEANUP_IDLE state : illegal SRCID value"); 5714 5715 cc_receive_to_cleanup_fifo_get = true; 5716 r_cleanup_fsm = CLEANUP_GET_NLINE; 5717 5718 #if DEBUG_MEMC_CLEANUP 5719 if (m_debug) 5720 { 5721 std::cout << " <MEMC " << name() 5722 << " CLEANUP_IDLE> Cleanup request:" << std::hex 5723 << " owner_id = " << srcid 5724 << " / owner_ins = " << (type == DspinRwtParam::TYPE_CLEANUP_INST) 5725 << " / ncc = " << DspinRwtParam::dspin_get( 5726 flit, 5727 DspinRwtParam::CLEANUP_NCC) 5728 << std::endl; 5729 } 5730 #endif 5731 break; 5732 } 5733 /////////////////////// 5734 case CLEANUP_GET_NLINE: // GET second DSPIN flit of the cleanup command 5735 { 5736 if (not m_cc_receive_to_cleanup_fifo.rok()) break; 5737 5738 uint64_t flit = m_cc_receive_to_cleanup_fifo.read(); 5739 uint32_t srcid = r_cleanup_srcid.read(); 5740 5741 addr_t nline = r_cleanup_nline.read() | 5742 DspinRwtParam::dspin_get(flit, DspinRwtParam::CLEANUP_NLINE_LSB); 5743 5744 bool eop = DspinRwtParam::dspin_get(flit, DspinRwtParam::P2M_EOP) == 0x1; 5745 5746 if (!eop) 5747 { 5748 r_cleanup_fsm = CLEANUP_GET_DATA; 5749 r_cleanup_data_index = 0; 5750 r_cleanup_contains_data = true; 5751 // <Activity Counters> 5752 if (is_local_req(srcid)) { 5753 m_cpt_cleanup_local++; 5754 m_cpt_cleanup_data_local++; 5755 } 5756 else { 5757 m_cpt_cleanup_remote++; 5758 m_cpt_cleanup_data_remote++; 5759 } 5760 // 2 + m_words flits for cleanup with data 5761 m_cpt_cleanup_cost += (m_words + 2) * req_distance(srcid); 5762 m_cpt_cleanup_data_cost += (m_words + 2) * req_distance(srcid); 5763 // </Activity Counters> 5764 } 5765 else 5766 { 5767 r_cleanup_fsm = CLEANUP_DIR_REQ; 5768 // <Activity Counters> 5769 if (is_local_req(srcid)) { 5770 m_cpt_cleanup_local++; 5771 } 5772 else { 5773 m_cpt_cleanup_remote++; 5774 } 5775 // 2 flits for cleanup without data 5776 m_cpt_cleanup_cost += 2 * req_distance(srcid); 5777 // </Activity Counters> 5778 } 5779 cc_receive_to_cleanup_fifo_get = true; 5780 r_cleanup_nline = nline; 5781 5782 #if DEBUG_MEMC_CLEANUP 5783 if (m_debug) 5784 { 5785 std::cout << " <MEMC " << name() 5786 << " CLEANUP_GET_NLINE> Cleanup request:" 5787 << " address = " << std::hex << nline * m_words * 4 5788 << " / contains data = " << (!eop) 5789 << std::dec << std::endl; 5790 } 5791 #endif 5792 break; 5793 } 5794 ///////////////////// 5795 case CLEANUP_GET_DATA: 5796 { 5797 if (m_cc_receive_to_cleanup_fifo.rok()) 5798 { 5613 5799 uint64_t flit = m_cc_receive_to_cleanup_fifo.read(); 5614 5800 5615 uint32_t srcid = 5616 DspinRwtParam::dspin_get( 5617 flit, 5618 DspinRwtParam::CLEANUP_SRCID); 5619 5620 uint8_t type = 5621 DspinRwtParam::dspin_get( 5622 flit, 5623 DspinRwtParam::P2M_TYPE); 5624 5625 r_cleanup_way_index = 5626 DspinRwtParam::dspin_get( 5627 flit, 5628 DspinRwtParam::CLEANUP_WAY_INDEX); 5629 5630 r_cleanup_nline = 5631 DspinRwtParam::dspin_get( 5632 flit, 5633 DspinRwtParam::CLEANUP_NLINE_MSB) << 32; 5634 5635 r_cleanup_inst = (type == DspinRwtParam::TYPE_CLEANUP_INST); 5636 r_cleanup_srcid = srcid; 5637 r_cleanup_ncc = 5638 DspinRwtParam::dspin_get( 5639 flit, 5640 DspinRwtParam::CLEANUP_NCC); 5641 r_cleanup_contains_data = false; 5642 5643 assert( (srcid < m_initiators) and 5644 "MEMC ERROR in CLEANUP_IDLE state : illegal SRCID value"); 5645 5801 uint32_t data = 5802 DspinRwtParam::dspin_get(flit, DspinRwtParam::CLEANUP_DATA_UPDT); 5803 5804 r_cleanup_data[r_cleanup_data_index] = data; 5805 r_cleanup_data_index = r_cleanup_data_index.read() + 1; 5806 assert(r_cleanup_data_index.read() < m_words and "MEM_CACHE in CLEANUP_GET_DATA : too much flits in cleanup data updt"); 5646 5807 cc_receive_to_cleanup_fifo_get = true; 5647 r_cleanup_fsm = CLEANUP_GET_NLINE; 5648 5808 if (r_cleanup_data_index.read() == (m_words - 1)) 5809 { 5810 r_cleanup_contains_data = true; 5811 r_cleanup_fsm = CLEANUP_DIR_REQ; 5812 } 5649 5813 #if DEBUG_MEMC_CLEANUP 5650 if(m_debug) 5814 if (m_debug) 5815 { 5816 std::cout << " <MEMC " << name() 5817 << " CLEANUP_GET_DATA> " 5818 << " / word = " << std::dec << r_cleanup_data_index.read() 5819 << " / data = " << std::hex << data 5820 << std::dec << std::endl; 5821 } 5822 #endif 5823 } 5824 break; 5825 } 5826 ///////////////////// 5827 case CLEANUP_DIR_REQ: // Get the lock to the directory 5828 { 5829 if (r_alloc_dir_fsm.read() != ALLOC_DIR_CLEANUP) break; 5830 5831 r_cleanup_fsm = CLEANUP_DIR_LOCK; 5832 5833 #if DEBUG_MEMC_CLEANUP 5834 if (m_debug) 5835 { 5836 std::cout << " <MEMC " << name() << " CLEANUP_DIR_REQ> Requesting DIR lock" << std::endl; 5837 } 5838 #endif 5839 break; 5840 } 5841 ////////////////////// 5842 case CLEANUP_DIR_LOCK: // test directory status 5843 { 5844 assert((r_alloc_dir_fsm.read() == ALLOC_DIR_CLEANUP) and 5845 "MEMC ERROR in CLEANUP_DIR_LOCK: bad DIR allocation"); 5846 5847 // Read the directory 5848 size_t way = 0; 5849 addr_t cleanup_address = r_cleanup_nline.read() * m_words * 4; 5850 5851 DirectoryEntry entry = m_cache_directory.read(cleanup_address, way); 5852 r_cleanup_is_cnt = entry.is_cnt; 5853 r_cleanup_dirty = entry.dirty; 5854 r_cleanup_tag = entry.tag; 5855 r_cleanup_lock = entry.lock; 5856 r_cleanup_way = way; 5857 r_cleanup_count = entry.count; 5858 r_cleanup_ptr = entry.ptr; 5859 r_cleanup_copy = entry.owner.srcid; 5860 r_cleanup_copy_inst = entry.owner.inst; 5861 5862 //RWT 5863 size_t set = m_y[(addr_t) (cleanup_address)]; 5864 m_cache_data.read_line(way, set, r_cleanup_old_data); 5865 r_cleanup_coherent = entry.cache_coherent; 5866 5867 if (entry.valid) // hit : the copy must be cleared 5868 { 5869 assert((entry.count > 0) and 5870 "MEMC ERROR in CLEANUP_DIR_LOCK state, CLEANUP on valid entry with no copies"); 5871 5872 if ((entry.count == 1) or (entry.is_cnt)) // no access to the heap 5873 { 5874 r_cleanup_fsm = CLEANUP_DIR_WRITE; 5875 } 5876 else // access to the heap 5877 { 5878 r_cleanup_fsm = CLEANUP_HEAP_REQ; 5879 } 5880 } 5881 else // miss : check IVT for a pending inval 5882 { 5883 r_cleanup_fsm = CLEANUP_IVT_LOCK; 5884 } 5885 5886 #if DEBUG_MEMC_CLEANUP 5887 if (m_debug) 5888 { 5889 std::cout << " <MEMC " << name() 5890 << " CLEANUP_DIR_LOCK> Test directory status: " 5891 << std::hex 5892 << " address = " << cleanup_address 5893 << " / hit = " << entry.valid 5894 << " / dir_id = " << entry.owner.srcid 5895 << " / dir_ins = " << entry.owner.inst 5896 << " / search_id = " << r_cleanup_srcid.read() 5897 << " / search_ins = " << r_cleanup_inst.read() 5898 << " / count = " << entry.count 5899 << " / is_cnt = " << entry.is_cnt 5900 << std::dec << std::endl; 5901 } 5902 #endif 5903 break; 5904 } 5905 /////////////////////// 5906 case CLEANUP_DIR_WRITE: // Update the directory entry without heap access 5907 { 5908 assert((r_alloc_dir_fsm.read() == ALLOC_DIR_CLEANUP) and 5909 "MEMC ERROR in CLEANUP_DIR_LOCK: bad DIR allocation"); 5910 5911 size_t way = r_cleanup_way.read(); 5912 size_t set = m_y[(addr_t) (r_cleanup_nline.read() * m_words * 4)]; 5913 bool match_srcid = (r_cleanup_copy.read() == r_cleanup_srcid.read()); 5914 bool match_inst = (r_cleanup_copy_inst.read() == r_cleanup_inst.read()); 5915 bool match = match_srcid and match_inst; 5916 5917 assert((r_cleanup_is_cnt.read() or match) and 5918 "MEMC ERROR in CLEANUP_DIR_LOCK: illegal CLEANUP on valid entry"); 5919 5920 /*RWT*/ 5921 // NCC to CC initiated by a read transaction 5922 bool inval_by_read = r_read_to_cleanup_req.read() and 5923 (r_cleanup_nline.read() == r_read_to_cleanup_nline.read()); 5924 5925 // NCC to CC initiated by a write transaction 5926 bool inval_by_write = r_write_to_cleanup_req.read() and 5927 (r_cleanup_nline.read() == r_write_to_cleanup_nline.read()); 5928 5929 bool inval_request = inval_by_read or inval_by_write; 5930 5931 if (inval_by_write) 5932 { 5933 r_write_to_cleanup_req = false; 5934 m_cpt_ncc_to_cc_write ++; 5935 } 5936 5937 // update the cache directory (for the copies) 5938 DirectoryEntry entry; 5939 entry.valid = true; 5940 entry.cache_coherent = inval_request or r_cleanup_coherent.read(); 5941 entry.is_cnt = r_cleanup_is_cnt.read(); 5942 entry.dirty = r_cleanup_dirty.read() or r_cleanup_contains_data.read(); 5943 entry.tag = r_cleanup_tag.read(); 5944 entry.lock = r_cleanup_lock.read(); 5945 entry.ptr = r_cleanup_ptr.read(); 5946 5947 if (inval_by_read) // pending READ 5948 { 5949 if (r_read_to_cleanup_cached_read.read()) 5950 { 5951 entry.count = r_cleanup_count.read(); 5952 entry.owner.srcid = r_read_to_cleanup_srcid.read(); 5953 entry.owner.inst = r_read_to_cleanup_inst.read(); 5954 } 5955 else 5956 { 5957 entry.count = r_cleanup_count.read() - 1; 5958 entry.owner.srcid = r_cleanup_copy.read(); 5959 entry.owner.inst = r_cleanup_copy_inst.read(); 5960 } 5961 if (r_read_to_cleanup_is_ll.read()) 5962 { 5963 r_cleanup_to_tgt_rsp_ll_key = r_read_to_cleanup_ll_key.read(); 5964 } 5965 } 5966 else 5967 { 5968 entry.count = r_cleanup_count.read() - 1; 5969 entry.owner.srcid = 0; 5970 entry.owner.inst = 0; 5971 5972 #if REVERT_CC_MECANISM 5973 // Revert CC to NCC if : 5974 // - no more copy in L1 caches 5975 // - this line is not in counter mode (broadcast) 5976 // - this line is not in NCC to CC mecanism 5977 if (((r_cleanup_count.read() - 1) == 0) and (r_cleanup_is_cnt == false) and (inval_request == false)) 5978 { 5979 entry.cache_coherent = false; 5980 } 5981 #endif 5982 5983 #if REVERT_BC_MECANISM 5984 if ((r_cleanup_count.read() - 1) == 0) 5985 { 5986 entry.is_cnt = false; 5987 } 5988 #endif 5989 5990 } 5991 5992 if (r_cleanup_contains_data.read()) 5993 { 5994 for (size_t word = 0; word < m_words; word++) 5995 { 5996 m_cache_data.write(way, set, word, r_cleanup_data[word].read(), 0xF); 5997 } 5998 addr_t min = r_cleanup_nline.read() * m_words * 4; 5999 addr_t max = r_cleanup_nline.read() * m_words * 4 + (m_words - 1) * 4; 6000 m_llsc_table.sw(min, max); 6001 } 6002 6003 m_cache_directory.write(set, way, entry); 6004 6005 /*RWT*/ 6006 if (inval_request) 6007 { 6008 r_cleanup_fsm = CLEANUP_IVT_LOCK_DATA; 6009 } 6010 else 6011 { 6012 r_cleanup_fsm = CLEANUP_SEND_CLACK; 6013 } 6014 6015 #if DEBUG_MEMC_CLEANUP 6016 if (m_debug) 6017 { 6018 std::cout << " <MEMC " << name() 6019 << " CLEANUP_DIR_WRITE> Update directory:" 6020 << std::hex 6021 << " address = " << r_cleanup_nline.read() * m_words * 4 6022 << " / dir_id = " << entry.owner.srcid 6023 << " / dir_ins = " << entry.owner.inst 6024 << " / count = " << entry.count 6025 << " / is_cnt = " << entry.is_cnt 6026 << " / match_inval = " << inval_request 6027 << " / is_coherent = " << entry.cache_coherent 6028 << std::dec << std::endl; 6029 } 6030 #endif 6031 6032 break; 6033 } 6034 ///////////////////// 6035 case CLEANUP_IVT_LOCK_DATA: //RWT 6036 { 6037 // Search for a matching inval in the IVT (there must be one) 6038 // and check if there is a pending read. 6039 if (r_alloc_ivt_fsm.read() == ALLOC_IVT_CLEANUP) 6040 { 6041 size_t index = 0; 6042 bool match_inval; 6043 6044 match_inval = m_ivt.search_inval(r_cleanup_nline.read(), index); 6045 assert(match_inval and 6046 "VCI MEM CACHE ERROR: In CLEANUP_IVT_LOCK_DATA, NO CORRESPONDING INVAL"); 6047 6048 r_cleanup_read_srcid = m_ivt.srcid(index); 6049 r_cleanup_read_trdid = m_ivt.trdid(index); 6050 r_cleanup_read_pktid = 0x0 + m_ivt.pktid(index); 6051 r_cleanup_read_need_rsp = !m_ivt.need_rsp(index); 6052 r_cleanup_index = index; 6053 6054 r_cleanup_fsm = CLEANUP_IVT_CLEAR_DATA; 6055 } 6056 #if DEBUG_MEMC_CLEANUP 6057 if (m_debug) 6058 { 6059 std::cout << " <MEMC " << name() 6060 << " CLEANUP_IVT_LOCK_DATA> fetch pending inval" 6061 << std::endl; 6062 } 6063 #endif 6064 break; 6065 } 6066 6067 ////////////////////////// 6068 case CLEANUP_IVT_CLEAR_DATA: //RWT 6069 { 6070 m_ivt.clear(r_cleanup_index.read()); 6071 assert((r_cleanup_read_need_rsp.read() == (r_read_to_cleanup_req.read() && (r_cleanup_nline.read() == r_read_to_cleanup_nline.read()))) && "condition pending read"); 6072 6073 if (r_cleanup_read_need_rsp.read()) 6074 { 6075 r_cleanup_fsm = CLEANUP_READ_RSP; 6076 } 6077 else 6078 { 6079 r_cleanup_fsm = CLEANUP_SEND_CLACK; 6080 } 6081 #if DEBUG_MEMC_CLEANUP 6082 if (m_debug) 6083 { 6084 std::cout << " <MEMC " << name() 6085 << " CLEANUP_IVT_CLEAR_DATA> clear IVT entry" 6086 << std::endl; 6087 } 6088 #endif 6089 break; 6090 } 6091 6092 //////////////////////// 6093 case CLEANUP_READ_RSP: //RWT 6094 { 6095 if (r_cleanup_to_tgt_rsp_req.read()) break; 6096 6097 r_cleanup_to_tgt_rsp_req = true; 6098 r_cleanup_to_tgt_rsp_srcid = r_cleanup_read_srcid.read(); 6099 r_cleanup_to_tgt_rsp_trdid = r_cleanup_read_trdid.read(); 6100 r_cleanup_to_tgt_rsp_pktid = 0x0 + r_cleanup_read_pktid.read(); 6101 r_cleanup_to_tgt_rsp_type = 0; // Read instruction 6102 r_cleanup_to_tgt_rsp_length = r_read_to_cleanup_length.read(); 6103 r_cleanup_to_tgt_rsp_first_word = r_read_to_cleanup_first_word.read(); 6104 r_read_to_cleanup_req = false; 6105 m_cpt_ncc_to_cc_read++; 6106 if (r_cleanup_contains_data.read()) // L1 was dirty 6107 { 6108 for (size_t i = 0; i < m_words; i++) 6109 { 6110 r_cleanup_to_tgt_rsp_data[i] = r_cleanup_data[i].read(); 6111 } 6112 } 6113 else // L2 data are up to date 6114 { 6115 for (size_t i = 0; i < m_words; i++) 6116 { 6117 r_cleanup_to_tgt_rsp_data[i] = r_cleanup_old_data[i].read(); 6118 } 6119 } 6120 6121 r_cleanup_fsm = CLEANUP_SEND_CLACK; 6122 6123 #if DEBUG_MEMC_CLEANUP 6124 if (m_debug) 6125 { 6126 std::cout << " <MEMC " << name() 6127 << " CLEANUP_READ_RSP> answer READ" 6128 << std::endl; 6129 } 6130 #endif 6131 break; 6132 } 6133 ////////////////////// 6134 case CLEANUP_HEAP_REQ: // get the lock to the HEAP directory 6135 { 6136 if (r_alloc_heap_fsm.read() != ALLOC_HEAP_CLEANUP) break; 6137 6138 r_cleanup_fsm = CLEANUP_HEAP_LOCK; 6139 6140 #if DEBUG_MEMC_CLEANUP 6141 if (m_debug) 6142 { 6143 std::cout << " <MEMC " << name() 6144 << " CLEANUP_HEAP_REQ> HEAP lock acquired " << std::endl; 6145 } 6146 #endif 6147 break; 6148 } 6149 ////////////////////// 6150 case CLEANUP_HEAP_LOCK: // two cases are handled in this state : 6151 // 1. the matching copy is directly in the directory 6152 // 2. the matching copy is the first copy in the heap 6153 { 6154 assert((r_alloc_heap_fsm.read() == ALLOC_HEAP_CLEANUP) and 6155 "MEMC ERROR in CLEANUP_HEAP_LOCK state: bad HEAP allocation"); 6156 6157 size_t way = r_cleanup_way.read(); 6158 size_t set = m_y[(addr_t) (r_cleanup_nline.read() * m_words * 4)]; 6159 6160 HeapEntry heap_entry = m_heap.read(r_cleanup_ptr.read()); 6161 bool last = (heap_entry.next == r_cleanup_ptr.read()); 6162 6163 // match_dir computation 6164 bool match_dir_srcid = (r_cleanup_copy.read() == r_cleanup_srcid.read()); 6165 bool match_dir_inst = (r_cleanup_copy_inst.read() == r_cleanup_inst.read()); 6166 bool match_dir = match_dir_srcid and match_dir_inst; 6167 6168 // match_heap computation 6169 bool match_heap_srcid = (heap_entry.owner.srcid == r_cleanup_srcid.read()); 6170 bool match_heap_inst = (heap_entry.owner.inst == r_cleanup_inst.read()); 6171 bool match_heap = match_heap_srcid and match_heap_inst; 6172 6173 r_cleanup_prev_ptr = r_cleanup_ptr.read(); 6174 r_cleanup_prev_srcid = heap_entry.owner.srcid; 6175 r_cleanup_prev_inst = heap_entry.owner.inst; 6176 6177 assert((not last or match_dir or match_heap) and 6178 "MEMC ERROR in CLEANUP_HEAP_LOCK state: hit but no copy found"); 6179 6180 assert((not match_dir or not match_heap) and 6181 "MEMC ERROR in CLEANUP_HEAP_LOCK state: two matching copies found"); 6182 6183 DirectoryEntry dir_entry; 6184 dir_entry.valid = true; 6185 dir_entry.is_cnt = r_cleanup_is_cnt.read(); 6186 dir_entry.dirty = r_cleanup_dirty.read(); 6187 dir_entry.tag = r_cleanup_tag.read(); 6188 dir_entry.lock = r_cleanup_lock.read(); 6189 dir_entry.count = r_cleanup_count.read() - 1; 6190 dir_entry.cache_coherent = true; 6191 6192 // the matching copy is registered in the directory and 6193 // it must be replaced by the first copy registered in 6194 // the heap. The corresponding entry must be freed 6195 if (match_dir) 6196 { 6197 dir_entry.ptr = heap_entry.next; 6198 dir_entry.owner.srcid = heap_entry.owner.srcid; 6199 dir_entry.owner.inst = heap_entry.owner.inst; 6200 r_cleanup_next_ptr = r_cleanup_ptr.read(); 6201 r_cleanup_fsm = CLEANUP_HEAP_FREE; 6202 } 6203 6204 // the matching copy is the first copy in the heap 6205 // It must be freed and the copy registered in directory 6206 // must point to the next copy in heap 6207 else if (match_heap) 6208 { 6209 dir_entry.ptr = heap_entry.next; 6210 dir_entry.owner.srcid = r_cleanup_copy.read(); 6211 dir_entry.owner.inst = r_cleanup_copy_inst.read(); 6212 r_cleanup_next_ptr = r_cleanup_ptr.read(); 6213 r_cleanup_fsm = CLEANUP_HEAP_FREE; 6214 } 6215 6216 // The matching copy is in the heap, but is not the first copy 6217 // The directory entry must be modified to decrement count 6218 else 6219 { 6220 dir_entry.ptr = r_cleanup_ptr.read(); 6221 dir_entry.owner.srcid = r_cleanup_copy.read(); 6222 dir_entry.owner.inst = r_cleanup_copy_inst.read(); 6223 r_cleanup_next_ptr = heap_entry.next; 6224 r_cleanup_fsm = CLEANUP_HEAP_SEARCH; 6225 } 6226 6227 m_cache_directory.write(set, way, dir_entry); 6228 6229 #if DEBUG_MEMC_CLEANUP 6230 if (m_debug) 6231 { 6232 std::cout << " <MEMC " << name() 6233 << " CLEANUP_HEAP_LOCK> Checks matching:" 6234 << " address = " << r_cleanup_nline.read() * m_words * 4 6235 << " / dir_id = " << r_cleanup_copy.read() 6236 << " / dir_ins = " << r_cleanup_copy_inst.read() 6237 << " / heap_id = " << heap_entry.owner.srcid 6238 << " / heap_ins = " << heap_entry.owner.inst 6239 << " / search_id = " << r_cleanup_srcid.read() 6240 << " / search_ins = " << r_cleanup_inst.read() << std::endl; 6241 } 6242 #endif 6243 break; 6244 } 6245 //////////////////////// 6246 case CLEANUP_HEAP_SEARCH: // This state is handling the case where the copy 6247 // is in the heap, but is not the first in the linked list 6248 { 6249 assert((r_alloc_heap_fsm.read() == ALLOC_HEAP_CLEANUP) and 6250 "MEMC ERROR in CLEANUP_HEAP_LOCK state: bad HEAP allocation"); 6251 6252 HeapEntry heap_entry = m_heap.read(r_cleanup_next_ptr.read()); 6253 6254 bool last = (heap_entry.next == r_cleanup_next_ptr.read()); 6255 bool match_heap_srcid = (heap_entry.owner.srcid == r_cleanup_srcid.read()); 6256 bool match_heap_inst = (heap_entry.owner.inst == r_cleanup_inst.read()); 6257 bool match_heap = match_heap_srcid and match_heap_inst; 6258 6259 assert((not last or match_heap) and 6260 "MEMC ERROR in CLEANUP_HEAP_SEARCH state: no copy found"); 6261 6262 // the matching copy must be removed 6263 if (match_heap) 6264 { 6265 // re-use ressources 6266 r_cleanup_ptr = heap_entry.next; 6267 r_cleanup_fsm = CLEANUP_HEAP_CLEAN; 6268 } 6269 // test the next in the linked list 6270 else 6271 { 6272 r_cleanup_prev_ptr = r_cleanup_next_ptr.read(); 6273 r_cleanup_prev_srcid = heap_entry.owner.srcid; 6274 r_cleanup_prev_inst = heap_entry.owner.inst; 6275 r_cleanup_next_ptr = heap_entry.next; 6276 r_cleanup_fsm = CLEANUP_HEAP_SEARCH; 6277 } 6278 6279 #if DEBUG_MEMC_CLEANUP 6280 if (m_debug) 6281 { 6282 if (not match_heap) 5651 6283 { 5652 6284 std::cout 5653 << " <MEMC " << name() 5654 << " CLEANUP_IDLE> Cleanup request:" << std::hex 5655 << " / owner_id = " << srcid 5656 << " / owner_ins = " << (type == DspinRwtParam::TYPE_CLEANUP_INST) 5657 << " / ncc = " << DspinRwtParam::dspin_get( 5658 flit, 5659 DspinRwtParam::CLEANUP_NCC) 6285 << " <MEMC " << name() 6286 << " CLEANUP_HEAP_SEARCH> Matching copy not found, search next:" 5660 6287 << std::endl; 5661 6288 } 5662 #endif 5663 break; 5664 } 5665 5666 /////////////////////// 5667 case CLEANUP_GET_NLINE: // GET second DSPIN flit of the cleanup command 5668 { 5669 if (not m_cc_receive_to_cleanup_fifo.rok()) break; 5670 5671 uint64_t flit = m_cc_receive_to_cleanup_fifo.read(); 5672 uint32_t srcid = r_cleanup_srcid.read(); 5673 5674 addr_t nline = r_cleanup_nline.read() | 5675 DspinRwtParam::dspin_get(flit, DspinRwtParam::CLEANUP_NLINE_LSB); 5676 5677 bool eop = DspinRwtParam::dspin_get(flit, DspinRwtParam::P2M_EOP) == 0x1; 5678 5679 if (! eop) 5680 { 5681 r_cleanup_fsm = CLEANUP_GET_DATA; 5682 r_cleanup_data_index = 0; 5683 r_cleanup_contains_data = true; 5684 // <Activity Counters> 5685 if (is_local_req(srcid)) { 5686 m_cpt_cleanup_local++; 5687 m_cpt_cleanup_data_local++; 6289 else 6290 { 6291 std::cout 6292 << " <MEMC " << name() 6293 << " CLEANUP_HEAP_SEARCH> Matching copy found:" 6294 << std::endl; 6295 } 6296 std::cout 6297 << " address = " << r_cleanup_nline.read() * m_words * 4 6298 << " / heap_id = " << heap_entry.owner.srcid 6299 << " / heap_ins = " << heap_entry.owner.inst 6300 << " / search_id = " << r_cleanup_srcid.read() 6301 << " / search_ins = " << r_cleanup_inst.read() 6302 << " / last = " << last 6303 << std::endl; 6304 } 6305 #endif 6306 break; 6307 } 6308 //////////////////////// 6309 case CLEANUP_HEAP_CLEAN: // remove a copy in the linked list 6310 { 6311 assert((r_alloc_heap_fsm.read() == ALLOC_HEAP_CLEANUP) and 6312 "MEMC ERROR in CLEANUP_HEAP_LOCK state: bad HEAP allocation"); 6313 6314 HeapEntry heap_entry; 6315 heap_entry.owner.srcid = r_cleanup_prev_srcid.read(); 6316 heap_entry.owner.inst = r_cleanup_prev_inst.read(); 6317 bool last = (r_cleanup_next_ptr.read() == r_cleanup_ptr.read()); 6318 6319 if (last) // this is the last entry of the list of copies 6320 { 6321 heap_entry.next = r_cleanup_prev_ptr.read(); 6322 } 6323 else // this is not the last entry 6324 { 6325 heap_entry.next = r_cleanup_ptr.read(); 6326 } 6327 6328 m_heap.write(r_cleanup_prev_ptr.read(), heap_entry); 6329 6330 r_cleanup_fsm = CLEANUP_HEAP_FREE; 6331 6332 #if DEBUG_MEMC_CLEANUP 6333 if (m_debug) 6334 { 6335 std::cout << " <MEMC " << name() << " CLEANUP_HEAP_SEARCH>" 6336 << " Remove the copy in the linked list" << std::endl; 6337 } 6338 #endif 6339 break; 6340 } 6341 /////////////////////// 6342 case CLEANUP_HEAP_FREE: // The heap entry pointed by r_cleanup_next_ptr is freed 6343 // and becomes the head of the list of free entries 6344 { 6345 assert((r_alloc_heap_fsm.read() == ALLOC_HEAP_CLEANUP) and 6346 "MEMC ERROR in CLEANUP_HEAP_LOCK state: bad HEAP allocation"); 6347 6348 HeapEntry heap_entry; 6349 heap_entry.owner.srcid = 0; 6350 heap_entry.owner.inst = false; 6351 6352 if (m_heap.is_full()) 6353 { 6354 heap_entry.next = r_cleanup_next_ptr.read(); 6355 } 6356 else 6357 { 6358 heap_entry.next = m_heap.next_free_ptr(); 6359 } 6360 6361 m_heap.write(r_cleanup_next_ptr.read(), heap_entry); 6362 m_heap.write_free_ptr(r_cleanup_next_ptr.read()); 6363 m_heap.unset_full(); 6364 6365 // <Activity counters> 6366 m_cpt_heap_slot_available++; 6367 // </Activity counters> 6368 6369 r_cleanup_fsm = CLEANUP_SEND_CLACK; 6370 6371 #if DEBUG_MEMC_CLEANUP 6372 if (m_debug) 6373 { 6374 std::cout << " <MEMC " << name() << " CLEANUP_HEAP_FREE>" 6375 << " Update the list of free entries" << std::endl; 6376 } 6377 #endif 6378 break; 6379 } 6380 ////////////////////// 6381 case CLEANUP_IVT_LOCK: // get the lock protecting the IVT to search a pending 6382 // invalidate transaction matching the cleanup 6383 { 6384 if (r_alloc_ivt_fsm.read() != ALLOC_IVT_CLEANUP) break; 6385 6386 size_t index = 0; 6387 bool match_inval; 6388 6389 match_inval = m_ivt.search_inval(r_cleanup_nline.read(), index); 6390 6391 if (not match_inval) // no pending inval in IVT 6392 { 6393 r_cleanup_fsm = CLEANUP_SEND_CLACK; 6394 6395 #if DEBUG_MEMC_CLEANUP 6396 if (m_debug) 6397 { 6398 std::cout << " <MEMC " << name() 6399 << " CLEANUP_IVT_LOCK> Unexpected cleanup" 6400 << " with no corresponding IVT entry:" 6401 << " address = " << std::hex 6402 << (r_cleanup_nline.read() * 4 * m_words) 6403 << std::dec << std::endl; 6404 } 6405 #endif 6406 } 6407 else // pending inval in IVT 6408 { 6409 r_cleanup_write_srcid = m_ivt.srcid(index); 6410 r_cleanup_write_trdid = m_ivt.trdid(index); 6411 r_cleanup_write_pktid = m_ivt.pktid(index); 6412 r_cleanup_need_rsp = m_ivt.need_rsp(index); 6413 r_cleanup_need_ack = m_ivt.need_ack(index); 6414 r_cleanup_index = index; 6415 r_cleanup_fsm = CLEANUP_IVT_DECREMENT; 6416 6417 #if DEBUG_MEMC_CLEANUP 6418 if (m_debug) 6419 { 6420 std::cout << " <MEMC " << name() 6421 << " CLEANUP_IVT_LOCK> Cleanup matching pending" 6422 << " invalidate transaction on IVT:" 6423 << " address = " << std::hex << r_cleanup_nline.read() * m_words * 4 6424 << " / ivt_entry = " << index << std::dec << std::endl; 6425 } 6426 #endif 6427 } 6428 break; 6429 } 6430 /////////////////////////// 6431 case CLEANUP_IVT_DECREMENT: // decrement response counter in IVT matching entry 6432 // and test if last 6433 { 6434 assert((r_alloc_ivt_fsm.read() == ALLOC_IVT_CLEANUP) and 6435 "MEMC ERROR in CLEANUP_IVT_DECREMENT state: Bad IVT allocation"); 6436 6437 size_t count = 0; 6438 m_ivt.decrement(r_cleanup_index.read(), count); 6439 6440 if (count == 0) // multi inval transaction completed 6441 { 6442 r_cleanup_fsm = CLEANUP_IVT_CLEAR; 6443 } 6444 else // multi inval transaction not completed 6445 { 6446 if (r_cleanup_ncc.read()) // need to put data to the XRAM 6447 { 6448 r_cleanup_fsm = CLEANUP_IXR_REQ; 6449 } 6450 else 6451 { 6452 r_cleanup_fsm = CLEANUP_SEND_CLACK; 6453 } 6454 } 6455 6456 #if DEBUG_MEMC_CLEANUP 6457 if (m_debug) 6458 { 6459 std::cout << " <MEMC " << name() << " CLEANUP_IVT_DECREMENT>" 6460 << " Decrement response counter in IVT:" 6461 << " IVT_index = " << r_cleanup_index.read() 6462 << " / rsp_count = " << count << std::endl; 6463 } 6464 #endif 6465 break; 6466 } 6467 /////////////////////// 6468 case CLEANUP_IVT_CLEAR: // Clear IVT entry 6469 // Acknowledge CONFIG FSM if required 6470 { 6471 assert((r_alloc_ivt_fsm.read() == ALLOC_IVT_CLEANUP) and 6472 "MEMC ERROR in CLEANUP_IVT_CLEAR state : bad IVT allocation"); 6473 6474 m_ivt.clear(r_cleanup_index.read()); 6475 6476 if (r_cleanup_need_ack.read()) 6477 { 6478 assert((r_config_rsp_lines.read() > 0) and 6479 "MEMC ERROR in CLEANUP_IVT_CLEAR state"); 6480 6481 config_rsp_lines_cleanup_decr = true; 6482 } 6483 6484 if (r_cleanup_need_rsp.read()) r_cleanup_fsm = CLEANUP_WRITE_RSP; 6485 else if (r_cleanup_ncc.read()) r_cleanup_fsm = CLEANUP_IXR_REQ; 6486 else r_cleanup_fsm = CLEANUP_SEND_CLACK; 6487 6488 #if DEBUG_MEMC_CLEANUP 6489 if (m_debug) 6490 { 6491 std::cout << " <MEMC " << name() 6492 << " CLEANUP_IVT_CLEAR> Clear entry in IVT:" 6493 << " IVT_index = " << r_cleanup_index.read() << std::endl; 6494 } 6495 #endif 6496 break; 6497 } 6498 /////////////////////// 6499 case CLEANUP_WRITE_RSP: // response to a previous write on the direct network 6500 // wait if pending request to the TGT_RSP FSM 6501 { 6502 if (r_cleanup_to_tgt_rsp_req.read()) break; 6503 6504 assert((r_cleanup_ncc.read() == false) and 6505 "CLEANUP_WRITE_RSP : Cleanup on NCC line invalid in " 6506 "MEM_CACHE with write_rsp needed. STRANGE BEHAVIOUR"); 6507 6508 // no pending request 6509 r_cleanup_to_tgt_rsp_req = true; 6510 r_cleanup_to_tgt_rsp_srcid = r_cleanup_write_srcid.read(); 6511 r_cleanup_to_tgt_rsp_trdid = r_cleanup_write_trdid.read(); 6512 r_cleanup_to_tgt_rsp_pktid = r_cleanup_write_pktid.read(); 6513 r_cleanup_to_tgt_rsp_type = true; 6514 6515 if (r_cleanup_ncc.read()) // need to put data to the XRAM 6516 { 6517 r_cleanup_fsm = CLEANUP_IXR_REQ; 6518 } 6519 else 6520 { 6521 r_cleanup_fsm = CLEANUP_SEND_CLACK; 6522 } 6523 6524 #if DEBUG_MEMC_CLEANUP 6525 if (m_debug) 6526 { 6527 std::cout << " <MEMC " << name() << " CLEANUP_WRITE_RSP>" 6528 << " Send a response to a previous write request: " 6529 << " rsrcid = " << std::hex << r_cleanup_write_srcid.read() 6530 << " / rtrdid = " << r_cleanup_write_trdid.read() 6531 << " / rpktid = " << r_cleanup_write_pktid.read() << std::endl; 6532 } 6533 #endif 6534 break; 6535 } 6536 ///////////////////// 6537 case CLEANUP_IXR_REQ: 6538 { 6539 // Send a request to the ixr to write the data in the XRAM using 6540 // the prereserved TRT entry 6541 if (r_alloc_trt_fsm.read() == ALLOC_TRT_CLEANUP) 6542 { 6543 if (not r_cleanup_to_ixr_cmd_req.read()) 6544 { 6545 size_t index = 0; 6546 bool hit = m_trt.hit_write(r_cleanup_nline.read(), &index); 6547 6548 assert(hit and "CLEANUP_IXR_REQ found no matching entry in TRT"); 6549 6550 r_cleanup_to_ixr_cmd_req = true; 6551 6552 if (r_cleanup_contains_data.read()) 6553 { 6554 std::vector<data_t> data_vector; 6555 data_vector.clear(); 6556 6557 for (size_t i = 0; i < m_words; i++) 6558 { 6559 data_vector.push_back(r_cleanup_data[i]); 6560 } 6561 6562 m_trt.set(index, 6563 false, // write to XRAM 6564 r_cleanup_nline.read(), // line index 6565 0, 6566 0, 6567 0, 6568 false, 6569 0, 6570 0, 6571 std::vector<be_t> (m_words, 0), 6572 data_vector); 5688 6573 } 5689 else { 5690 m_cpt_cleanup_remote++; 5691 m_cpt_cleanup_data_remote++; 5692 } 5693 // 2 + m_words flits for cleanup with data 5694 m_cpt_cleanup_cost += (m_words + 2) * req_distance(srcid); 5695 m_cpt_cleanup_data_cost += (m_words + 2) * req_distance(srcid); 5696 // </Activity Counters> 5697 } 5698 else 5699 { 5700 r_cleanup_fsm = CLEANUP_DIR_REQ; 5701 // <Activity Counters> 5702 if (is_local_req(srcid)) { 5703 m_cpt_cleanup_local++; 5704 } 5705 else { 5706 m_cpt_cleanup_remote++; 5707 } 5708 // 2 flits for cleanup without data 5709 m_cpt_cleanup_cost += 2 * req_distance(srcid); 5710 // </Activity Counters> 5711 } 5712 cc_receive_to_cleanup_fifo_get = true; 5713 r_cleanup_nline = nline; 5714 6574 r_cleanup_to_ixr_cmd_srcid = r_cleanup_srcid.read(); 6575 r_cleanup_to_ixr_cmd_index = index; 6576 r_cleanup_to_ixr_cmd_pktid = r_cleanup_pktid.read(); 6577 r_cleanup_to_ixr_cmd_nline = r_cleanup_nline.read(); 6578 r_cleanup_fsm = CLEANUP_SEND_CLACK; 5715 6579 #if DEBUG_MEMC_CLEANUP 5716 if(m_debug) 5717 { 5718 std::cout 5719 << " <MEMC " << name() 5720 << " CLEANUP_GET_NLINE> Cleanup request:" 5721 << std::hex 5722 << " / address = " << nline * m_words * 4 5723 << " / contains data = " << (!eop) 5724 << std::endl; 5725 } 5726 #endif 5727 break; 5728 } 5729 ///////////////////// 5730 case CLEANUP_GET_DATA : 5731 { 5732 if (m_cc_receive_to_cleanup_fifo.rok()) 5733 { 5734 uint64_t flit = m_cc_receive_to_cleanup_fifo.read(); 5735 5736 uint32_t data = 5737 DspinRwtParam::dspin_get (flit, DspinRwtParam::CLEANUP_DATA_UPDT); 5738 5739 r_cleanup_data[r_cleanup_data_index] = data; 5740 r_cleanup_data_index = r_cleanup_data_index.read() + 1; 5741 assert (r_cleanup_data_index.read() < m_words and "MEM_CACHE in CLEANUP_GET_DATA : too much flits in cleanup data updt"); 5742 cc_receive_to_cleanup_fifo_get = true; 5743 if (r_cleanup_data_index.read() == m_words - 1) 6580 if (m_debug) 5744 6581 { 5745 r_cleanup_contains_data = true; 5746 r_cleanup_fsm = CLEANUP_DIR_REQ; 5747 } 5748 #if DEBUG_MEMC_CLEANUP 5749 if(m_debug) 5750 { 5751 std::cout 5752 << " <MEMC " << name() 5753 << " CLEANUP_GET_DATA> " 5754 << " / word = " << std::dec << r_cleanup_data_index.read() 5755 << " / data = " << std::hex << data 6582 std::cout << " <MEMC " << name() 6583 << " CLEANUP_IXR_REQ>" 6584 << " request send to IXR_CMD" 5756 6585 << std::endl; 5757 6586 } 5758 6587 #endif 5759 6588 } 5760 break; 5761 } 5762 ///////////////////// 5763 case CLEANUP_DIR_REQ: // Get the lock to the directory 5764 { 5765 if(r_alloc_dir_fsm.read() != ALLOC_DIR_CLEANUP) break; 5766 5767 r_cleanup_fsm = CLEANUP_DIR_LOCK; 5768 6589 else 6590 { 6591 r_cleanup_fsm = CLEANUP_WAIT; 5769 6592 #if DEBUG_MEMC_CLEANUP 5770 if(m_debug) 5771 std::cout << " <MEMC " << name() << " CLEANUP_DIR_REQ> Requesting DIR lock" << std::endl; 5772 #endif 5773 break; 5774 } 5775 5776 ////////////////////// 5777 case CLEANUP_DIR_LOCK: 5778 { 5779 // test directory status 5780 if(r_alloc_dir_fsm.read() != ALLOC_DIR_CLEANUP) 5781 { 5782 std::cout 5783 << "VCI_MEM_CACHE ERROR " << name() 5784 << " CLEANUP_DIR_LOCK state" 5785 << " bad DIR allocation" << std::endl; 5786 5787 exit(0); 5788 } 5789 5790 // Read the directory 5791 size_t way = 0; 5792 addr_t cleanup_address = r_cleanup_nline.read() * m_words * 4; 5793 5794 DirectoryEntry entry = m_cache_directory.read(cleanup_address , way); 5795 r_cleanup_is_cnt = entry.is_cnt; 5796 r_cleanup_dirty = entry.dirty; 5797 r_cleanup_tag = entry.tag; 5798 r_cleanup_lock = entry.lock; 5799 r_cleanup_way = way; 5800 r_cleanup_count = entry.count; 5801 r_cleanup_ptr = entry.ptr; 5802 r_cleanup_copy = entry.owner.srcid; 5803 r_cleanup_copy_inst = entry.owner.inst; 5804 5805 //RWT 5806 size_t set = m_y[(addr_t)(cleanup_address)]; 5807 m_cache_data.read_line(way, set, r_cleanup_old_data); 5808 r_cleanup_coherent = entry.cache_coherent; 5809 5810 if(entry.valid) // hit : the copy must be cleared 5811 { 5812 assert( 5813 (entry.count > 0) and 5814 "VCI MEM CACHE ERROR: " 5815 "In CLEANUP_DIR_LOCK, CLEANUP command on a valid entry " 5816 "with no copies"); 5817 5818 // no access to the heap 5819 if((entry.count == 1) or (entry.is_cnt)) 6593 if (m_debug) 5820 6594 { 5821 r_cleanup_fsm = CLEANUP_DIR_WRITE; 5822 } 5823 // access to the heap 5824 else 5825 { 5826 r_cleanup_fsm = CLEANUP_HEAP_REQ; 5827 } 5828 } 5829 else // miss : check IVT for a pending invalidation transaction 5830 { 5831 r_cleanup_fsm = CLEANUP_IVT_LOCK; 5832 } 5833 5834 #if DEBUG_MEMC_CLEANUP 5835 if(m_debug) 5836 { 5837 std::cout 5838 << " <MEMC " << name() 5839 << " CLEANUP_DIR_LOCK> Test directory status: " 5840 << std::hex 5841 << " line = " << cleanup_address 5842 << " / hit = " << entry.valid 5843 << " / dir_id = " << entry.owner.srcid 5844 << " / dir_ins = " << entry.owner.inst 5845 << " / search_id = " << r_cleanup_srcid.read() 5846 << " / search_ins = " << r_cleanup_inst.read() 5847 << " / count = " << entry.count 5848 << " / is_cnt = " << entry.is_cnt 5849 << std::endl; 5850 } 5851 #endif 5852 break; 5853 } 5854 5855 /////////////////////// 5856 case CLEANUP_DIR_WRITE: 5857 { 5858 // Update the directory entry without heap access 5859 if(r_alloc_dir_fsm.read() != ALLOC_DIR_CLEANUP) 5860 { 5861 std::cout 5862 << "VCI_MEM_CACHE ERROR " << name() 5863 << " CLEANUP_DIR_WRITE state" 5864 << " bad DIR allocation" << std::endl; 5865 5866 exit(0); 5867 } 5868 5869 size_t way = r_cleanup_way.read(); 5870 size_t set = m_y[(addr_t)(r_cleanup_nline.read()*m_words*4)]; 5871 bool match_srcid = (r_cleanup_copy.read() == r_cleanup_srcid.read()); 5872 5873 bool match_inst = (r_cleanup_copy_inst.read() == r_cleanup_inst.read()); 5874 bool match = match_srcid and match_inst; 5875 5876 if(not r_cleanup_is_cnt.read() and not match) 5877 { 5878 std::cout 5879 << "VCI_MEM_CACHE ERROR : Cleanup request on a valid" 5880 << "entry using linked list mode with no corresponding" 5881 << "directory or heap entry" 5882 << std::endl; 5883 5884 exit(1); 5885 } 5886 5887 /*RWT*/ 5888 bool inval_request = (r_read_to_cleanup_req.read() and (r_cleanup_nline.read() == r_read_to_cleanup_nline.read())) // NCC to CC initiated by a read transaction 5889 or (r_write_to_cleanup_req.read() and (r_cleanup_nline.read() == r_write_to_cleanup_nline.read())); // NCC to CC initiated by a write transaction 5890 5891 5892 5893 if (r_write_to_cleanup_req.read() and (r_cleanup_nline.read() == r_write_to_cleanup_nline.read())) 5894 { 5895 r_write_to_cleanup_req = false; 5896 m_cpt_ncc_to_cc_write ++; 5897 } 5898 5899 5900 // update the cache directory (for the copies) 5901 DirectoryEntry entry; 5902 entry.valid = true; 5903 entry.cache_coherent = inval_request or r_cleanup_coherent.read(); 5904 entry.is_cnt = r_cleanup_is_cnt.read(); 5905 entry.dirty = r_cleanup_dirty.read() or r_cleanup_contains_data.read(); 5906 entry.tag = r_cleanup_tag.read(); 5907 entry.lock = r_cleanup_lock.read(); 5908 entry.ptr = r_cleanup_ptr.read(); 5909 if (r_read_to_cleanup_req.read() and (r_cleanup_nline.read() == r_read_to_cleanup_nline.read())) //pending READ 5910 { 5911 if (r_read_to_cleanup_cached_read.read()) 5912 { 5913 entry.count = r_cleanup_count.read(); 5914 entry.owner.srcid = r_read_to_cleanup_srcid.read(); 5915 entry.owner.inst = r_read_to_cleanup_inst.read(); 5916 } 5917 else 5918 { 5919 entry.count = r_cleanup_count.read() - 1; 5920 entry.owner.srcid = r_cleanup_copy.read(); 5921 entry.owner.inst = r_cleanup_copy_inst.read(); 5922 } 5923 if (r_read_to_cleanup_is_ll.read()) 5924 { 5925 r_cleanup_to_tgt_rsp_ll_key = r_read_to_cleanup_ll_key.read(); 5926 } 5927 } 5928 else 5929 { 5930 entry.count = r_cleanup_count.read() - 1; 5931 entry.owner.srcid = 0; 5932 entry.owner.inst = 0; 5933 5934 #if REVERT_CC_MECANISM 5935 // Revert CC to NCC if : 5936 // - no more copy in L1 caches 5937 // - this line is not in counter mode (broadcast) 5938 // - this line is not in NCC to CC mecanism 5939 if (((r_cleanup_count.read() - 1) == 0) and (r_cleanup_is_cnt == false) and (inval_request == false)) 5940 { 5941 entry.cache_coherent = false; 5942 } 5943 #endif 5944 5945 #if REVERT_BC_MECANISM 5946 if ((r_cleanup_count.read() - 1) == 0) 5947 { 5948 entry.is_cnt = false; 5949 } 5950 #endif 5951 5952 } 5953 5954 if (r_cleanup_contains_data.read()) 5955 { 5956 for (size_t word = 0; word < m_words; word ++) 5957 { 5958 m_cache_data.write(way, set, word, r_cleanup_data[word].read(), 0xF); 5959 } 5960 addr_t min = r_cleanup_nline.read()*m_words*4 ; 5961 addr_t max = r_cleanup_nline.read()*m_words*4 + (m_words - 1)*4; 5962 m_llsc_table.sw(min, max); 5963 } 5964 5965 m_cache_directory.write(set, way, entry); 5966 5967 /*RWT*/ 5968 if (inval_request) 5969 { 5970 r_cleanup_fsm = CLEANUP_IVT_LOCK_DATA; 5971 } 5972 else 5973 { 5974 r_cleanup_fsm = CLEANUP_SEND_CLACK; 5975 } 5976 5977 #if DEBUG_MEMC_CLEANUP 5978 if(m_debug) 5979 { 5980 std::cout 5981 << " <MEMC " << name() 5982 << " CLEANUP_DIR_WRITE> Update directory:" 5983 << std::hex 5984 << " address = " << r_cleanup_nline.read() * m_words * 4 5985 << " / dir_id = " << entry.owner.srcid 5986 << " / dir_ins = " << entry.owner.inst 5987 << " / count = " << entry.count 5988 << " / is_cnt = " << entry.is_cnt 5989 << " / match_inval = " << inval_request 5990 << " / is_coherent = " << entry.cache_coherent 5991 << std::dec 5992 << std::endl; 5993 } 5994 #endif 5995 5996 break; 5997 } 5998 ///////////////////// 5999 case CLEANUP_IVT_LOCK_DATA://RWT 6000 { 6001 //Search for a matching inval in the IVT (there must be one) and check if there is a pending read. 6002 if(r_alloc_ivt_fsm.read() == ALLOC_IVT_CLEANUP) 6003 { 6004 size_t index = 0; 6005 bool match_inval; 6006 6007 match_inval = m_ivt.search_inval(r_cleanup_nline.read(), index); 6008 assert (match_inval && "VCI MEM CACHE ERROR: In CLEANUP_IVT_LOCK_DATA, NO CORRESPONDING INVAL"); 6009 r_cleanup_read_srcid = m_ivt.srcid(index); 6010 r_cleanup_read_trdid = m_ivt.trdid(index); 6011 r_cleanup_read_pktid = 0x0 + m_ivt.pktid(index); 6012 r_cleanup_read_need_rsp = !m_ivt.need_rsp(index); 6013 r_cleanup_index = index; 6014 6015 r_cleanup_fsm = CLEANUP_IVT_CLEAR_DATA; 6016 } 6017 #if DEBUG_MEMC_CLEANUP 6018 if (m_debug) 6019 { 6020 std::cout 6021 << " <MEMC " << name() 6022 << " CLEANUP_IVT_LOCK_DATA> fetch pending inval" 6023 << std::endl; 6024 } 6025 #endif 6026 break; 6027 } 6028 6029 ////////////////////////// 6030 case CLEANUP_IVT_CLEAR_DATA://RWT 6031 { 6032 m_ivt.clear(r_cleanup_index.read()); 6033 assert ((r_cleanup_read_need_rsp.read() == (r_read_to_cleanup_req.read() && (r_cleanup_nline.read() == r_read_to_cleanup_nline.read()))) && "condition pending read"); 6034 if (r_cleanup_read_need_rsp.read()) 6035 { 6036 r_cleanup_fsm = CLEANUP_READ_RSP; 6037 } 6038 else 6039 { 6040 r_cleanup_fsm = CLEANUP_SEND_CLACK; 6041 } 6042 #if DEBUG_MEMC_CLEANUP 6043 if (m_debug) 6044 { 6045 std::cout 6046 << " <MEMC " << name() 6047 << " CLEANUP_IVT_CLEAR_DATA> clear IVT entry" 6048 << std::endl; 6049 } 6050 #endif 6051 break; 6052 } 6053 6054 //////////////////////// 6055 case CLEANUP_READ_RSP://RWT 6056 { 6057 if(r_cleanup_to_tgt_rsp_req.read()) break; 6058 6059 r_cleanup_to_tgt_rsp_req = true; 6060 r_cleanup_to_tgt_rsp_srcid = r_cleanup_read_srcid.read(); 6061 r_cleanup_to_tgt_rsp_trdid = r_cleanup_read_trdid.read(); 6062 r_cleanup_to_tgt_rsp_pktid = 0x0 + r_cleanup_read_pktid.read();//RWT 6063 r_cleanup_to_tgt_rsp_type = 0; //Read instruction 6064 r_cleanup_to_tgt_rsp_length = r_read_to_cleanup_length.read(); 6065 r_cleanup_to_tgt_rsp_first_word = r_read_to_cleanup_first_word.read(); 6066 r_read_to_cleanup_req = false; 6067 m_cpt_ncc_to_cc_read ++; 6068 if (r_cleanup_contains_data.read()) //L1 was dirty 6069 { 6070 for(size_t i = 0; i<m_words; i++) 6071 { 6072 r_cleanup_to_tgt_rsp_data[i] = r_cleanup_data[i].read(); 6073 } 6074 } 6075 else //the L2 data are up to date 6076 { 6077 for(size_t i = 0; i<m_words; i++) 6078 { 6079 r_cleanup_to_tgt_rsp_data[i] = r_cleanup_old_data[i].read(); 6080 } 6081 } 6082 6083 r_cleanup_fsm = CLEANUP_SEND_CLACK; 6084 6085 #if DEBUG_MEMC_CLEANUP 6086 if (m_debug) 6087 { 6088 std::cout 6089 << " <MEMC " << name() 6090 << " CLEANUP_READ_RSP> answer READ" 6091 << std::endl; 6092 } 6093 #endif 6094 break; 6095 } 6096 ////////////////////// 6097 case CLEANUP_HEAP_REQ: 6098 { 6099 // get the lock to the HEAP directory 6100 if(r_alloc_heap_fsm.read() != ALLOC_HEAP_CLEANUP) break; 6101 6102 r_cleanup_fsm = CLEANUP_HEAP_LOCK; 6103 6104 #if DEBUG_MEMC_CLEANUP 6105 if(m_debug) 6106 { 6107 std::cout 6108 << " <MEMC " << name() 6109 << " CLEANUP_HEAP_REQ> HEAP lock acquired " 6110 << std::endl; 6111 } 6112 #endif 6113 break; 6114 } 6115 6116 ////////////////////// 6117 case CLEANUP_HEAP_LOCK: 6118 { 6119 // two cases are handled in this state : 6120 // 1. the matching copy is directly in the directory 6121 // 2. the matching copy is the first copy in the heap 6122 assert( (r_alloc_heap_fsm.read() == ALLOC_HEAP_CLEANUP) and 6123 "MEMC ERROR in CLEANUP_HEAP_LOCK state: bad HEAP allocation"); 6124 6125 size_t way = r_cleanup_way.read(); 6126 size_t set = m_y[(addr_t)(r_cleanup_nline.read() *m_words*4)]; 6127 6128 HeapEntry heap_entry = m_heap.read(r_cleanup_ptr.read()); 6129 bool last = (heap_entry.next == r_cleanup_ptr.read()); 6130 6131 // match_dir computation 6132 bool match_dir_srcid = (r_cleanup_copy.read() == r_cleanup_srcid.read()); 6133 bool match_dir_inst = (r_cleanup_copy_inst.read() == r_cleanup_inst.read()); 6134 bool match_dir = match_dir_srcid and match_dir_inst; 6135 6136 // match_heap computation 6137 bool match_heap_srcid = (heap_entry.owner.srcid == r_cleanup_srcid.read()); 6138 bool match_heap_inst = (heap_entry.owner.inst == r_cleanup_inst.read()); 6139 bool match_heap = match_heap_srcid and match_heap_inst; 6140 6141 r_cleanup_prev_ptr = r_cleanup_ptr.read(); 6142 r_cleanup_prev_srcid = heap_entry.owner.srcid; 6143 r_cleanup_prev_inst = heap_entry.owner.inst; 6144 6145 assert( (not last or match_dir or match_heap) and 6146 "MEMC ERROR in CLEANUP_HEAP_LOCK state: hit but no copy found"); 6147 6148 assert( (not match_dir or not match_heap) and 6149 "MEMC ERROR in CLEANUP_HEAP_LOCK state: two matching copies found"); 6150 6151 DirectoryEntry dir_entry; 6152 dir_entry.valid = true; 6153 dir_entry.cache_coherent = true; 6154 dir_entry.is_cnt = r_cleanup_is_cnt.read(); 6155 dir_entry.dirty = r_cleanup_dirty.read(); 6156 dir_entry.tag = r_cleanup_tag.read(); 6157 dir_entry.lock = r_cleanup_lock.read(); 6158 dir_entry.count = r_cleanup_count.read()-1; 6159 6160 // the matching copy is registered in the directory and 6161 // it must be replaced by the first copy registered in 6162 // the heap. The corresponding entry must be freed 6163 if(match_dir) 6164 { 6165 dir_entry.ptr = heap_entry.next; 6166 dir_entry.owner.srcid = heap_entry.owner.srcid; 6167 dir_entry.owner.inst = heap_entry.owner.inst; 6168 r_cleanup_next_ptr = r_cleanup_ptr.read(); 6169 r_cleanup_fsm = CLEANUP_HEAP_FREE; 6170 } 6171 6172 // the matching copy is the first copy in the heap 6173 // It must be freed and the copy registered in directory 6174 // must point to the next copy in heap 6175 else if(match_heap) 6176 { 6177 dir_entry.ptr = heap_entry.next; 6178 dir_entry.owner.srcid = r_cleanup_copy.read(); 6179 dir_entry.owner.inst = r_cleanup_copy_inst.read(); 6180 r_cleanup_next_ptr = r_cleanup_ptr.read(); 6181 r_cleanup_fsm = CLEANUP_HEAP_FREE; 6182 } 6183 6184 // The matching copy is in the heap, but is not the first copy 6185 // The directory entry must be modified to decrement count 6186 else 6187 { 6188 dir_entry.ptr = r_cleanup_ptr.read(); 6189 dir_entry.owner.srcid = r_cleanup_copy.read(); 6190 dir_entry.owner.inst = r_cleanup_copy_inst.read(); 6191 r_cleanup_next_ptr = heap_entry.next; 6192 r_cleanup_fsm = CLEANUP_HEAP_SEARCH; 6193 } 6194 6195 m_cache_directory.write(set,way,dir_entry); 6196 6197 #if DEBUG_MEMC_CLEANUP 6198 if(m_debug) 6199 { 6200 std::cout 6201 << " <MEMC " << name() 6202 << " CLEANUP_HEAP_LOCK> Checks matching:" 6203 << " address = " << r_cleanup_nline.read() * m_words * 4 6204 << " / dir_id = " << r_cleanup_copy.read() 6205 << " / dir_ins = " << r_cleanup_copy_inst.read() 6206 << " / heap_id = " << heap_entry.owner.srcid 6207 << " / heap_ins = " << heap_entry.owner.inst 6208 << " / search_id = " << r_cleanup_srcid.read() 6209 << " / search_ins = " << r_cleanup_inst.read() 6210 << std::endl; 6211 } 6212 #endif 6213 break; 6214 } 6215 6216 //////////////////////// 6217 case CLEANUP_HEAP_SEARCH: 6218 { 6219 // This state is handling the case where the copy 6220 // is in the heap, but is not the first in the linked list 6221 assert( (r_alloc_heap_fsm.read() == ALLOC_HEAP_CLEANUP) and 6222 "MEMC ERROR in CLEANUP_HEAP_LOCK state: bad HEAP allocation"); 6223 6224 HeapEntry heap_entry = m_heap.read(r_cleanup_next_ptr.read()); 6225 6226 bool last = (heap_entry.next == r_cleanup_next_ptr.read()); 6227 bool match_heap_srcid = (heap_entry.owner.srcid == r_cleanup_srcid.read()); 6228 bool match_heap_inst = (heap_entry.owner.inst == r_cleanup_inst.read()); 6229 bool match_heap = match_heap_srcid and match_heap_inst; 6230 6231 assert( (not last or match_heap) and 6232 "MEMC ERROR in CLEANUP_HEAP_SEARCH state: no copy found"); 6233 6234 // the matching copy must be removed 6235 if(match_heap) 6236 { 6237 // re-use ressources 6238 r_cleanup_ptr = heap_entry.next; 6239 r_cleanup_fsm = CLEANUP_HEAP_CLEAN; 6240 } 6241 // test the next in the linked list 6242 else 6243 { 6244 r_cleanup_prev_ptr = r_cleanup_next_ptr.read(); 6245 r_cleanup_prev_srcid = heap_entry.owner.srcid; 6246 r_cleanup_prev_inst = heap_entry.owner.inst; 6247 r_cleanup_next_ptr = heap_entry.next; 6248 r_cleanup_fsm = CLEANUP_HEAP_SEARCH; 6249 } 6250 6251 #if DEBUG_MEMC_CLEANUP 6252 if(m_debug) 6253 { 6254 if(not match_heap) 6255 { 6256 std::cout 6257 << " <MEMC " << name() 6258 << " CLEANUP_HEAP_SEARCH> Matching copy not found, search next:" 6595 std::cout << " <MEMC " << name() 6596 << " CLEANUP_IXR_REQ>" 6597 << " waiting completion of previous request" 6259 6598 << std::endl; 6260 6599 } 6261 else 6262 { 6263 std::cout 6264 << " <MEMC " << name() 6265 << " CLEANUP_HEAP_SEARCH> Matching copy found:" 6266 << std::endl; 6267 } 6268 6269 std::cout 6270 << " address = " << r_cleanup_nline.read() * m_words * 4 6271 << " / heap_id = " << heap_entry.owner.srcid 6272 << " / heap_ins = " << heap_entry.owner.inst 6273 << " / search_id = " << r_cleanup_srcid.read() 6274 << " / search_ins = " << r_cleanup_inst.read() 6275 << " / last = " << last 6276 << std::endl; 6277 } 6278 #endif 6279 break; 6280 } 6281 //////////////////////// 6282 case CLEANUP_HEAP_CLEAN: // remove a copy in the linked list 6283 { 6284 assert( (r_alloc_heap_fsm.read() == ALLOC_HEAP_CLEANUP) and 6285 "MEMC ERROR in CLEANUP_HEAP_LOCK state: bad HEAP allocation"); 6286 6287 HeapEntry heap_entry; 6288 heap_entry.owner.srcid = r_cleanup_prev_srcid.read(); 6289 heap_entry.owner.inst = r_cleanup_prev_inst.read(); 6290 bool last = (r_cleanup_next_ptr.read() == r_cleanup_ptr.read()); 6291 6292 // this is the last entry of the list of copies 6293 if(last) 6294 { 6295 heap_entry.next = r_cleanup_prev_ptr.read(); 6296 } 6297 // this is not the last entry 6298 else 6299 { 6300 heap_entry.next = r_cleanup_ptr.read(); 6301 } 6302 6303 m_heap.write(r_cleanup_prev_ptr.read(), heap_entry); 6304 6305 r_cleanup_fsm = CLEANUP_HEAP_FREE; 6600 #endif 6601 } 6602 } 6603 break; 6604 } 6605 6606 ///////////////////// 6607 case CLEANUP_WAIT : 6608 { 6609 r_cleanup_fsm = CLEANUP_IXR_REQ; 6610 break; 6611 } 6612 6613 //////////////////////// 6614 case CLEANUP_SEND_CLACK: // acknowledgement to a cleanup command 6615 // on the coherence CLACK network. 6616 { 6617 if (not p_dspin_clack.read) break; 6618 6619 r_cleanup_fsm = CLEANUP_IDLE; 6306 6620 6307 6621 #if DEBUG_MEMC_CLEANUP 6308 if(m_debug) 6309 std::cout << " <MEMC " << name() << " CLEANUP_HEAP_SEARCH>" 6310 << " Remove the copy in the linked list" << std::endl; 6311 #endif 6312 break; 6313 } 6314 /////////////////////// 6315 case CLEANUP_HEAP_FREE: // The heap entry pointed by r_cleanup_next_ptr is freed 6316 // and becomes the head of the list of free entries 6317 { 6318 assert( (r_alloc_heap_fsm.read() == ALLOC_HEAP_CLEANUP) and 6319 "MEMC ERROR in CLEANUP_HEAP_LOCK state: bad HEAP allocation"); 6320 HeapEntry heap_entry; 6321 heap_entry.owner.srcid = 0; 6322 heap_entry.owner.inst = false; 6323 6324 if(m_heap.is_full()) 6325 { 6326 heap_entry.next = r_cleanup_next_ptr.read(); 6327 } 6328 else 6329 { 6330 heap_entry.next = m_heap.next_free_ptr(); 6331 } 6332 6333 m_heap.write(r_cleanup_next_ptr.read(),heap_entry); 6334 m_heap.write_free_ptr(r_cleanup_next_ptr.read()); 6335 m_heap.unset_full(); 6336 6337 // <Activity counters> 6338 m_cpt_heap_slot_available++; 6339 // </Activity counters> 6340 6341 r_cleanup_fsm = CLEANUP_SEND_CLACK; 6342 6343 #if DEBUG_MEMC_CLEANUP 6344 if(m_debug) 6345 std::cout << " <MEMC " << name() << " CLEANUP_HEAP_FREE>" 6346 << " Update the list of free entries" << std::endl; 6347 #endif 6348 break; 6349 } 6350 ////////////////////// 6351 case CLEANUP_IVT_LOCK: // get the lock protecting the IVT to search a pending 6352 // invalidate transaction matching the cleanup 6353 { 6354 if(r_alloc_ivt_fsm.read() != ALLOC_IVT_CLEANUP) break; 6355 6356 size_t index = 0; 6357 bool match_inval; 6358 6359 match_inval = m_ivt.search_inval(r_cleanup_nline.read(), index); 6360 if ( not match_inval ) // no pending inval 6361 { 6362 r_cleanup_fsm = CLEANUP_SEND_CLACK; 6363 6364 #if DEBUG_MEMC_CLEANUP 6365 if(m_debug) 6366 std::cout << " <MEMC " << name() 6367 << " CLEANUP_IVT_LOCK> Unexpected cleanup" 6368 << " with no corresponding IVT entry:" 6369 << " address = " << std::hex 6370 << (r_cleanup_nline.read() *4*m_words) 6371 << std::endl; 6372 #endif 6373 } 6374 else 6375 { 6376 // pending inval 6377 r_cleanup_write_srcid = m_ivt.srcid(index); 6378 r_cleanup_write_trdid = m_ivt.trdid(index); 6379 r_cleanup_write_pktid = m_ivt.pktid(index); 6380 r_cleanup_need_rsp = m_ivt.need_rsp(index); 6381 r_cleanup_need_ack = m_ivt.need_ack(index); 6382 r_cleanup_index = index; 6383 r_cleanup_fsm = CLEANUP_IVT_DECREMENT; 6384 #if DEBUG_MEMC_CLEANUP 6385 if(m_debug) 6386 std::cout << " <MEMC " << name() 6387 << " CLEANUP_IVT_LOCK> Cleanup matching pending" 6388 << " invalidate transaction on IVT:" 6389 << " address = " << std::hex << r_cleanup_nline.read() * m_words * 4 6390 << " / ivt_entry = " << index << std::endl; 6391 #endif 6392 } 6393 break; 6394 } 6395 /////////////////////////// 6396 case CLEANUP_IVT_DECREMENT: // decrement response counter in IVT matching entry 6397 { 6398 assert( (r_alloc_ivt_fsm.read() == ALLOC_IVT_CLEANUP) and 6399 "MEMC ERROR in CLEANUP_IVT_DECREMENT state: Bad IVT allocation"); 6400 6401 size_t count = 0; 6402 m_ivt.decrement(r_cleanup_index.read(), count); 6403 6404 if(count == 0) // multi inval transaction completed 6405 { 6406 r_cleanup_fsm = CLEANUP_IVT_CLEAR; 6407 } 6408 else // multi inval transaction not completed 6409 { 6410 if (r_cleanup_ncc.read()) //need to put data to the XRAM 6411 { 6412 r_cleanup_fsm = CLEANUP_IXR_REQ; 6413 } 6414 else 6415 { 6416 r_cleanup_fsm = CLEANUP_SEND_CLACK; 6417 } 6418 } 6419 6420 #if DEBUG_MEMC_CLEANUP 6421 if(m_debug) 6422 std::cout << " <MEMC " << name() << " CLEANUP_IVT_DECREMENT>" 6423 << " Decrement response counter in IVT:" 6424 << " IVT_index = " << r_cleanup_index.read() 6425 << " / rsp_count = " << count << std::endl; 6426 #endif 6427 break; 6428 } 6429 /////////////////////// 6430 case CLEANUP_IVT_CLEAR: // Clear IVT entry 6431 { 6432 assert( (r_alloc_ivt_fsm.read() == ALLOC_IVT_CLEANUP) and 6433 "MEMC ERROR in CLEANUP_IVT_CLEAR state : bad IVT allocation"); 6434 6435 m_ivt.clear(r_cleanup_index.read()); 6436 6437 if ( r_cleanup_need_ack.read() ) 6438 { 6439 assert( (r_config_rsp_lines.read() > 0) and 6440 "MEMC ERROR in CLEANUP_IVT_CLEAR state"); 6441 6442 config_rsp_lines_cleanup_decr = true; 6443 } 6444 6445 if ( r_cleanup_need_rsp.read() ) r_cleanup_fsm = CLEANUP_WRITE_RSP; 6446 else if ( r_cleanup_ncc.read() ) r_cleanup_fsm = CLEANUP_IXR_REQ; 6447 else r_cleanup_fsm = CLEANUP_SEND_CLACK; 6448 6449 #if DEBUG_MEMC_CLEANUP 6450 if(m_debug) 6451 std::cout << " <MEMC " << name() 6452 << " CLEANUP_IVT_CLEAR> Clear entry in IVT:" 6453 << " IVT_index = " << r_cleanup_index.read() << std::endl; 6454 #endif 6455 break; 6456 } 6457 /////////////////////// 6458 case CLEANUP_WRITE_RSP: // response to a previous write on the direct network 6459 // wait if pending request to the TGT_RSP FSM 6460 { 6461 if(r_cleanup_to_tgt_rsp_req.read()) break; 6462 6463 assert ( (r_cleanup_ncc.read() == false) and 6464 "CLEANUP_WRITE_RSP : Cleanup on NCC line invalid in " 6465 "MEM_CACHE with write_rsp needed. STRANGE BEHAVIOUR"); 6466 // no pending request 6467 r_cleanup_to_tgt_rsp_req = true; 6468 r_cleanup_to_tgt_rsp_srcid = r_cleanup_write_srcid.read(); 6469 r_cleanup_to_tgt_rsp_trdid = r_cleanup_write_trdid.read(); 6470 r_cleanup_to_tgt_rsp_pktid = r_cleanup_write_pktid.read(); 6471 r_cleanup_to_tgt_rsp_type = true; 6472 6473 if (r_cleanup_ncc.read() ) 6474 { 6475 r_cleanup_fsm = CLEANUP_IXR_REQ;//need to put data to the XRAM 6476 } 6477 else 6478 { 6479 r_cleanup_fsm = CLEANUP_SEND_CLACK; 6480 } 6481 6482 #if DEBUG_MEMC_CLEANUP 6483 if(m_debug) 6484 std::cout << " <MEMC " << name() << " CLEANUP_WRITE_RSP>" 6485 << " Send a response to a previous write request: " 6486 << " rsrcid = " << std::hex << r_cleanup_write_srcid.read() 6487 << " / rtrdid = " << r_cleanup_write_trdid.read() 6488 << " / rpktid = " << r_cleanup_write_pktid.read() << std::endl; 6489 #endif 6490 break; 6491 } 6492 ///////////////////////// 6493 case CLEANUP_IXR_REQ: 6494 { 6495 //Send a request to the ixr to write the data in the XRAM using the prereserved TRT entry 6496 if (r_alloc_trt_fsm.read() == ALLOC_TRT_CLEANUP) 6497 { 6498 if( not r_cleanup_to_ixr_cmd_req.read()) 6499 { 6500 size_t index = 0; 6501 bool hit = m_trt.hit_write(r_cleanup_nline.read(), &index); 6502 6503 assert (hit and "CLEANUP_IXR_REQ found no matching entry in TRT"); 6504 6505 r_cleanup_to_ixr_cmd_req = true; 6506 6507 if (r_cleanup_contains_data.read()) 6508 { 6509 std::vector<data_t> data_vector; 6510 data_vector.clear(); 6511 6512 for(size_t i=0; i<m_words; i++) 6513 { 6514 data_vector.push_back(r_cleanup_data[i]); 6515 } 6516 6517 m_trt.set(index, 6518 false, // write to XRAM 6519 r_cleanup_nline.read(), // line index 6520 0, 6521 0, 6522 0, 6523 false, 6524 0, 6525 0, 6526 std::vector<be_t> (m_words,0), 6527 data_vector); 6528 } 6529 r_cleanup_to_ixr_cmd_srcid = r_cleanup_srcid.read(); 6530 r_cleanup_to_ixr_cmd_index = index; 6531 r_cleanup_to_ixr_cmd_pktid = r_cleanup_pktid.read(); 6532 r_cleanup_to_ixr_cmd_nline = r_cleanup_nline.read(); 6533 //r_cleanup_to_ixr_cmd_l1_dirty_ncc = r_cleanup_contains_data.read(); 6534 r_cleanup_fsm = CLEANUP_SEND_CLACK; 6535 #if DEBUG_MEMC_CLEANUP 6536 if(m_debug) 6537 { 6538 std::cout 6539 << " <MEMC " << name() 6540 << " CLEANUP_IXR_REQ>" 6541 << " request send to IXR_CMD" 6542 << std::endl; 6543 } 6544 #endif 6545 } 6546 else 6547 { 6548 r_cleanup_fsm = CLEANUP_WAIT; 6549 #if DEBUG_MEMC_CLEANUP 6550 if(m_debug) 6551 { 6552 std::cout 6553 << " <MEMC " << name() 6554 << " CLEANUP_IXR_REQ>" 6555 << " waiting completion of previous request" 6556 << std::endl; 6557 } 6558 #endif 6559 } 6560 } 6561 break; 6562 } 6563 6564 ///////////////////// 6565 case CLEANUP_WAIT : 6566 { 6567 r_cleanup_fsm = CLEANUP_IXR_REQ; 6568 break; 6569 } 6570 6571 //////////////////////// 6572 case CLEANUP_SEND_CLACK: // acknowledgement to a cleanup command 6573 // on the coherence CLACK network. 6574 { 6575 if(not p_dspin_clack.read) break; 6576 6577 r_cleanup_fsm = CLEANUP_IDLE; 6578 6579 #if DEBUG_MEMC_CLEANUP 6580 if(m_debug) 6581 std::cout << " <MEMC " << name() 6582 << " CLEANUP_SEND_CLACK> Send the response to a cleanup request:" 6583 << " nline = " << std::hex << r_cleanup_nline.read() 6584 << " / way = " << std::dec << r_cleanup_way.read() 6585 << " / srcid = " << std::dec << r_cleanup_srcid.read() 6586 << std::endl; 6587 #endif 6588 break; 6589 } 6622 if (m_debug) 6623 { 6624 std::cout << " <MEMC " << name() 6625 << " CLEANUP_SEND_CLACK> Send the response to a cleanup request:" 6626 << " address = " << std::hex << r_cleanup_nline.read() * m_words * 4 6627 << " / way = " << std::dec << r_cleanup_way.read() 6628 << " / srcid = " << std::dec << r_cleanup_srcid.read() 6629 << std::endl; 6630 } 6631 #endif 6632 break; 6633 } 6590 6634 } // end switch cleanup fsm 6591 6635 … … 6593 6637 // CAS FSM 6594 6638 //////////////////////////////////////////////////////////////////////////////////// 6595 // The CAS FSM handles the CAS (Store Conditionnal) atomic commands, 6596 // that are handled as "compare-and-swap instructions. 6639 // The CAS FSM handles the CAS (Compare And Swap) atomic commands. 6597 6640 // 6598 6641 // This command contains two or four flits: 6599 6642 // - In case of 32 bits atomic access, the first flit contains the value read 6600 // by a previous LLinstruction, the second flit contains the value to be writen.6643 // by a previous READ instruction, the second flit contains the value to be writen. 6601 6644 // - In case of 64 bits atomic access, the 2 first flits contains the value read 6602 // by a previous LLinstruction, the 2 next flits contains the value to be writen.6645 // by a previous READ instruction, the 2 next flits contains the value to be writen. 6603 6646 // 6604 6647 // The target address is cachable. If it is replicated in other L1 caches … … 6607 6650 // It access the directory to check hit / miss. 6608 6651 // - In case of miss, the CAS FSM must register a GET transaction in TRT. 6609 // If a read transaction to the XRAM for this line already exists,6610 // or if the transaction table is full, it goes to the WAIT state6611 // to release the locks and try again. When the GET transaction has been6612 // launched, it goes to the WAIT state and try again.6613 // The CAS request is not consumed in the FIFO until a HIT is obtained.6652 // If a read transaction to the XRAM for this line already exists, 6653 // or if the transaction table is full, it goes to the WAIT state 6654 // to release the locks and try again. When the GET transaction has been 6655 // launched, it goes to the WAIT state and try again. 6656 // The CAS request is not consumed in the FIFO until a HIT is obtained. 6614 6657 // - In case of hit... 6615 6658 /////////////////////////////////////////////////////////////////////////////////// 6616 6659 6617 switch (r_cas_fsm.read())6660 switch (r_cas_fsm.read()) 6618 6661 { 6662 //////////// 6663 case CAS_IDLE: // fill the local rdata buffers 6664 { 6665 if (m_cmd_cas_addr_fifo.rok()) 6666 { 6667 6668 #if DEBUG_MEMC_CAS 6669 if (m_debug) 6670 { 6671 std::cout << " <MEMC " << name() << " CAS_IDLE> CAS command: " << std::hex 6672 << " srcid = " << std::dec << m_cmd_cas_srcid_fifo.read() 6673 << " addr = " << std::hex << m_cmd_cas_addr_fifo.read() 6674 << " wdata = " << m_cmd_cas_wdata_fifo.read() 6675 << " eop = " << std::dec << m_cmd_cas_eop_fifo.read() 6676 << " cpt = " << std::dec << r_cas_cpt.read() << std::endl; 6677 } 6678 #endif 6679 if (m_cmd_cas_eop_fifo.read()) 6680 { 6681 r_cas_fsm = CAS_DIR_REQ; 6682 } 6683 else // we keep the last word in the FIFO 6684 { 6685 cmd_cas_fifo_get = true; 6686 } 6687 6688 // We fill the two buffers 6689 if (r_cas_cpt.read() < 2) // 32 bits access 6690 { 6691 r_cas_rdata[r_cas_cpt.read()] = m_cmd_cas_wdata_fifo.read(); 6692 } 6693 6694 if ((r_cas_cpt.read() == 1) and m_cmd_cas_eop_fifo.read()) 6695 { 6696 r_cas_wdata = m_cmd_cas_wdata_fifo.read(); 6697 } 6698 6699 assert((r_cas_cpt.read() <= 3) and // no more than 4 flits... 6700 "MEMC ERROR in CAS_IDLE state: illegal CAS command"); 6701 6702 if (r_cas_cpt.read() == 2) 6703 { 6704 r_cas_wdata = m_cmd_cas_wdata_fifo.read(); 6705 } 6706 6707 r_cas_cpt = r_cas_cpt.read() + 1; 6708 } 6709 break; 6710 } 6711 ///////////////// 6712 case CAS_DIR_REQ: 6713 { 6714 if (r_alloc_dir_fsm.read() == ALLOC_DIR_CAS) 6715 { 6716 r_cas_fsm = CAS_DIR_LOCK; 6717 } 6718 6719 #if DEBUG_MEMC_CAS 6720 if (m_debug) 6721 { 6722 std::cout << " <MEMC " << name() << " CAS_DIR_REQ> Requesting DIR lock " << std::endl; 6723 } 6724 #endif 6725 break; 6726 } 6727 ///////////////// 6728 case CAS_DIR_LOCK: // Read the directory 6729 { 6730 assert((r_alloc_dir_fsm.read() == ALLOC_DIR_CAS) and 6731 "MEMC ERROR in CAS_DIR_LOCK: Bad DIR allocation"); 6732 6733 size_t way = 0; 6734 DirectoryEntry entry(m_cache_directory.read(m_cmd_cas_addr_fifo.read(), way)); 6735 6736 r_cas_is_cnt = entry.is_cnt; 6737 r_cas_dirty = entry.dirty; 6738 r_cas_tag = entry.tag; 6739 r_cas_way = way; 6740 r_cas_copy = entry.owner.srcid; 6741 r_cas_copy_inst = entry.owner.inst; 6742 r_cas_ptr = entry.ptr; 6743 r_cas_count = entry.count; 6744 r_cas_coherent = entry.cache_coherent; 6745 6746 if (entry.valid) r_cas_fsm = CAS_DIR_HIT_READ; 6747 else r_cas_fsm = CAS_MISS_TRT_LOCK; 6748 6749 #if DEBUG_MEMC_CAS 6750 if (m_debug) 6751 { 6752 std::cout << " <MEMC " << name() << " CAS_DIR_LOCK> Directory acces" 6753 << " / address = " << std::hex << m_cmd_cas_addr_fifo.read() 6754 << " / hit = " << std::dec << entry.valid 6755 << " / count = " << entry.count 6756 << " / is_cnt = " << entry.is_cnt << std::endl; 6757 } 6758 #endif 6759 6760 break; 6761 } 6762 ///////////////////// 6763 case CAS_DIR_HIT_READ: // update directory for lock and dirty bit 6764 // and check data change in cache 6765 { 6766 assert((r_alloc_dir_fsm.read() == ALLOC_DIR_CAS) and 6767 "MEMC ERROR in CAS_DIR_HIT_READ: Bad DIR allocation"); 6768 6769 size_t way = r_cas_way.read(); 6770 size_t set = m_y[(addr_t)(m_cmd_cas_addr_fifo.read())]; 6771 6772 // update directory (lock & dirty bits) 6773 DirectoryEntry entry; 6774 entry.valid = true; 6775 entry.is_cnt = r_cas_is_cnt.read(); 6776 entry.dirty = true; 6777 entry.lock = true; 6778 entry.tag = r_cas_tag.read(); 6779 entry.owner.srcid = r_cas_copy.read(); 6780 entry.owner.inst = r_cas_copy_inst.read(); 6781 entry.count = r_cas_count.read(); 6782 entry.ptr = r_cas_ptr.read(); 6783 entry.cache_coherent = r_cas_coherent.read(); 6784 6785 m_cache_directory.write(set, way, entry); 6786 6787 // Store data from cache in buffer to do the comparison in next state 6788 m_cache_data.read_line(way, set, r_cas_data); 6789 6790 r_cas_fsm = CAS_DIR_HIT_COMPARE; 6791 6792 #if DEBUG_MEMC_CAS 6793 if (m_debug) 6794 { 6795 std::cout << " <MEMC " << name() << " CAS_DIR_HIT_READ> Read data from " 6796 << " cache and store it in buffer" << std::endl; 6797 } 6798 #endif 6799 break; 6800 } 6801 //////////////////////// 6802 case CAS_DIR_HIT_COMPARE: 6803 { 6804 size_t word = m_x[(addr_t)(m_cmd_cas_addr_fifo.read())]; 6805 6806 // check data change 6807 bool ok = (r_cas_rdata[0].read() == r_cas_data[word].read()); 6808 6809 if (r_cas_cpt.read() == 4) // 64 bits CAS 6810 ok &= (r_cas_rdata[1] == r_cas_data[word+1]); 6811 6812 // to avoid livelock, force the atomic access to fail pseudo-randomly 6813 bool forced_fail = ((r_cas_lfsr % (64) == 0) and RANDOMIZE_CAS); 6814 r_cas_lfsr = (r_cas_lfsr >> 1) ^ ((- (r_cas_lfsr & 1)) & 0xd0000001); 6815 6816 if (ok and not forced_fail) r_cas_fsm = CAS_DIR_HIT_WRITE; 6817 else r_cas_fsm = CAS_RSP_FAIL; 6818 6819 #if DEBUG_MEMC_CAS 6820 if (m_debug) 6821 { 6822 std::cout << " <MEMC " << name() << " CAS_DIR_HIT_COMPARE> Compare old and new data" 6823 << " / expected value = " << std::hex << r_cas_rdata[0].read() 6824 << " / actual value = " << std::hex << r_cas_data[word].read() 6825 << " / forced_fail = " << std::dec << forced_fail << std::endl; 6826 } 6827 #endif 6828 break; 6829 } 6830 ////////////////////// 6831 case CAS_DIR_HIT_WRITE: // test if a CC transaction is required 6832 // write data in cache if no CC request 6833 { 6834 assert((r_alloc_dir_fsm.read() == ALLOC_DIR_CAS) and 6835 "MEMC ERROR in CAS_DIR_HIT_WRITE: Bad DIR allocation"); 6836 6837 // The CAS is a success => sw access to the llsc_global_table 6838 m_llsc_table.sw(m_cmd_cas_addr_fifo.read(), m_cmd_cas_addr_fifo.read()); 6839 6840 // test coherence request 6841 if (r_cas_count.read()) // replicated line 6842 { 6843 if (r_cas_is_cnt.read()) 6844 { 6845 r_cas_fsm = CAS_BC_TRT_LOCK; // broadcast invalidate required 6846 6847 #if DEBUG_MEMC_CAS 6848 if (m_debug) 6849 { 6850 std::cout << " <MEMC " << name() << " CAS_DIR_HIT_WRITE>" 6851 << " Broacast Inval required" 6852 << " / copies = " << r_cas_count.read() << std::endl; 6853 } 6854 #endif 6855 } 6856 else if (not r_cas_to_cc_send_multi_req.read() and 6857 not r_cas_to_cc_send_brdcast_req.read()) 6858 { 6859 r_cas_fsm = CAS_UPT_LOCK; // multi update required 6860 6861 #if DEBUG_MEMC_CAS 6862 if (m_debug) 6863 { 6864 std::cout << " <MEMC " << name() << " CAS_DIR_HIT_WRITE>" 6865 << " Multi Inval required" 6866 << " / copies = " << r_cas_count.read() << std::endl; 6867 } 6868 #endif 6869 } 6870 else 6871 { 6872 r_cas_fsm = CAS_WAIT; 6873 6874 #if DEBUG_MEMC_CAS 6875 if (m_debug) 6876 { 6877 std::cout << " <MEMC " << name() << " CAS_DIR_HIT_WRITE>" 6878 << " CC_SEND FSM busy: release all locks and retry" << std::endl; 6879 } 6880 #endif 6881 } 6882 } 6883 else // no copies 6884 { 6885 size_t way = r_cas_way.read(); 6886 size_t set = m_y[(addr_t) (m_cmd_cas_addr_fifo.read())]; 6887 size_t word = m_x[(addr_t) (m_cmd_cas_addr_fifo.read())]; 6888 6889 // cache update 6890 m_cache_data.write(way, set, word, r_cas_wdata.read()); 6891 if (r_cas_cpt.read() == 4) 6892 { 6893 m_cache_data.write(way, set, word + 1, m_cmd_cas_wdata_fifo.read()); 6894 } 6895 6896 r_cas_fsm = CAS_RSP_SUCCESS; 6897 6898 #if DEBUG_MEMC_CAS 6899 if (m_debug) 6900 { 6901 std::cout << " <MEMC " << name() << " CAS_DIR_HIT_WRITE> Update cache:" 6902 << " way = " << std::dec << way 6903 << " / set = " << set 6904 << " / word = " << word 6905 << " / value = " << r_cas_wdata.read() 6906 << " / count = " << r_cas_count.read() 6907 << " / global_llsc_table access" << std::endl; 6908 } 6909 #endif 6910 } 6911 break; 6912 } 6913 ///////////////// 6914 case CAS_UPT_LOCK: // try to register the transaction in UPT 6915 // and write data in cache if successful registration 6916 // releases locks to retry later if UPT full 6917 { 6918 if (r_alloc_upt_fsm.read() == ALLOC_UPT_CAS) 6919 { 6920 bool wok = false; 6921 size_t index = 0; 6922 size_t srcid = m_cmd_cas_srcid_fifo.read(); 6923 size_t trdid = m_cmd_cas_trdid_fifo.read(); 6924 size_t pktid = m_cmd_cas_pktid_fifo.read(); 6925 addr_t nline = m_nline[(addr_t) (m_cmd_cas_addr_fifo.read())]; 6926 size_t nb_copies = r_cas_count.read(); 6927 6928 wok = m_upt.set(true, // it's an update transaction 6929 false, // it's not a broadcast 6930 true, // response required 6931 false, // no acknowledge required 6932 srcid, 6933 trdid, 6934 pktid, 6935 nline, 6936 nb_copies, 6937 index); 6938 if (wok) // coherence transaction registered in UPT 6939 { 6940 // cache update 6941 size_t way = r_cas_way.read(); 6942 size_t set = m_y[(addr_t) (m_cmd_cas_addr_fifo.read())]; 6943 size_t word = m_x[(addr_t) (m_cmd_cas_addr_fifo.read())]; 6944 6945 m_cache_data.write(way, set, word, r_cas_wdata.read()); 6946 if (r_cas_cpt.read() == 4) 6947 { 6948 m_cache_data.write(way, set, word + 1, m_cmd_cas_wdata_fifo.read()); 6949 } 6950 6951 r_cas_upt_index = index; 6952 r_cas_fsm = CAS_UPT_HEAP_LOCK; 6953 } 6954 else // releases the locks protecting UPT and DIR UPT full 6955 { 6956 r_cas_fsm = CAS_WAIT; 6957 } 6958 6959 #if DEBUG_MEMC_CAS 6960 if (m_debug) 6961 { 6962 std::cout << " <MEMC " << name() 6963 << " CAS_UPT_LOCK> Register multi-update transaction in UPT" 6964 << " / wok = " << wok 6965 << " / address = " << std::hex << nline * m_words * 4 6966 << " / count = " << nb_copies << std::endl; 6967 } 6968 #endif 6969 } 6970 break; 6971 } 6619 6972 ///////////// 6620 case CAS_IDLE: // fill the local rdata buffers 6621 { 6622 if(m_cmd_cas_addr_fifo.rok()) 6623 { 6973 case CAS_WAIT: // release all locks and retry from beginning 6974 { 6624 6975 6625 6976 #if DEBUG_MEMC_CAS 6626 if(m_debug) 6977 if (m_debug) 6978 { 6979 std::cout << " <MEMC " << name() << " CAS_WAIT> Release all locks" << std::endl; 6980 } 6981 #endif 6982 r_cas_fsm = CAS_DIR_REQ; 6983 break; 6984 } 6985 ////////////////////// 6986 case CAS_UPT_HEAP_LOCK: // lock the heap 6987 { 6988 if (r_alloc_heap_fsm.read() == ALLOC_HEAP_CAS) 6989 { 6990 6991 #if DEBUG_MEMC_CAS 6992 if (m_debug) 6993 { 6994 std::cout << " <MEMC " << name() 6995 << " CAS_UPT_HEAP_LOCK> Get access to the heap" << std::endl; 6996 } 6997 #endif 6998 r_cas_fsm = CAS_UPT_REQ; 6999 } 7000 break; 7001 } 7002 //////////////// 7003 case CAS_UPT_REQ: // send a first update request to CC_SEND FSM 7004 { 7005 assert((r_alloc_heap_fsm.read() == ALLOC_HEAP_CAS) and 7006 "VCI_MEM_CACHE ERROR : bad HEAP allocation"); 7007 7008 if (!r_cas_to_cc_send_multi_req.read() and !r_cas_to_cc_send_brdcast_req.read()) 7009 { 7010 r_cas_to_cc_send_brdcast_req = false; 7011 r_cas_to_cc_send_trdid = r_cas_upt_index.read(); 7012 r_cas_to_cc_send_nline = m_nline[(addr_t) (m_cmd_cas_addr_fifo.read())]; 7013 r_cas_to_cc_send_index = m_x[(addr_t) (m_cmd_cas_addr_fifo.read())]; 7014 r_cas_to_cc_send_wdata = r_cas_wdata.read(); 7015 7016 if (r_cas_cpt.read() == 4) 7017 { 7018 r_cas_to_cc_send_is_long = true; 7019 r_cas_to_cc_send_wdata_high = m_cmd_cas_wdata_fifo.read(); 7020 } 7021 else 7022 { 7023 r_cas_to_cc_send_is_long = false; 7024 r_cas_to_cc_send_wdata_high = 0; 7025 } 7026 7027 // We put the first copy in the fifo 7028 cas_to_cc_send_fifo_put = true; 7029 cas_to_cc_send_fifo_inst = r_cas_copy_inst.read(); 7030 cas_to_cc_send_fifo_srcid = r_cas_copy.read(); 7031 if (r_cas_count.read() == 1) // one single copy 7032 { 7033 r_cas_fsm = CAS_IDLE; // Response will be sent after receiving 7034 // update responses 7035 cmd_cas_fifo_get = true; 7036 r_cas_to_cc_send_multi_req = true; 7037 r_cas_cpt = 0; 7038 } 7039 else // several copies 7040 { 7041 r_cas_fsm = CAS_UPT_NEXT; 7042 } 7043 7044 #if DEBUG_MEMC_CAS 7045 if (m_debug) 7046 { 7047 std::cout << " <MEMC " << name() << " CAS_UPT_REQ> Send the first update request to CC_SEND FSM " 7048 << " / address = " << std::hex << m_cmd_cas_addr_fifo.read() 7049 << " / wdata = " << std::hex << r_cas_wdata.read() 7050 << " / srcid = " << std::dec << r_cas_copy.read() 7051 << " / inst = " << std::dec << r_cas_copy_inst.read() << std::endl; 7052 } 7053 #endif 7054 } 7055 break; 7056 } 7057 ///////////////// 7058 case CAS_UPT_NEXT: // send a multi-update request to CC_SEND FSM 7059 { 7060 assert((r_alloc_heap_fsm.read() == ALLOC_HEAP_CAS) 7061 and "VCI_MEM_CACHE ERROR : bad HEAP allocation"); 7062 7063 HeapEntry entry = m_heap.read(r_cas_ptr.read()); 7064 cas_to_cc_send_fifo_srcid = entry.owner.srcid; 7065 cas_to_cc_send_fifo_inst = entry.owner.inst; 7066 cas_to_cc_send_fifo_put = true; 7067 7068 if (m_cas_to_cc_send_inst_fifo.wok()) // request accepted by CC_SEND FSM 7069 { 7070 r_cas_ptr = entry.next; 7071 if (entry.next == r_cas_ptr.read()) // last copy 7072 { 7073 r_cas_to_cc_send_multi_req = true; 7074 r_cas_fsm = CAS_IDLE; // Response will be sent after receiving 7075 // all update responses 7076 cmd_cas_fifo_get = true; 7077 r_cas_cpt = 0; 7078 } 7079 } 7080 7081 #if DEBUG_MEMC_CAS 7082 if (m_debug) 7083 { 7084 std::cout << " <MEMC " << name() << " CAS_UPT_NEXT> Send the next update request to CC_SEND FSM " 7085 << " / address = " << std::hex << m_cmd_cas_addr_fifo.read() 7086 << " / wdata = " << std::hex << r_cas_wdata.read() 7087 << " / srcid = " << std::dec << entry.owner.srcid 7088 << " / inst = " << std::dec << entry.owner.inst << std::endl; 7089 } 7090 #endif 7091 break; 7092 } 7093 ///////////////////// 7094 case CAS_BC_TRT_LOCK: // get TRT lock to check TRT not full 7095 { 7096 assert((r_alloc_dir_fsm.read() == ALLOC_DIR_CAS) and 7097 "MEMC ERROR in CAS_BC_TRT_LOCK state: Bas DIR allocation"); 7098 7099 if (r_alloc_trt_fsm.read() == ALLOC_TRT_CAS) 7100 { 7101 size_t wok_index = 0; 7102 bool wok = !m_trt.full(wok_index); 7103 if (wok) 7104 { 7105 r_cas_trt_index = wok_index; 7106 r_cas_fsm = CAS_BC_IVT_LOCK; 7107 } 7108 else 7109 { 7110 r_cas_fsm = CAS_WAIT; 7111 } 7112 7113 #if DEBUG_MEMC_CAS 7114 if (m_debug) 7115 { 7116 std::cout << " <MEMC " << name() << " CAS_BC_TRT_LOCK> Check TRT" 7117 << " : wok = " << wok << " / index = " << wok_index << std::endl; 7118 } 7119 #endif 7120 } 7121 break; 7122 } 7123 ///////////////////// 7124 case CAS_BC_IVT_LOCK: // get IVT lock and register BC transaction in IVT 7125 { 7126 assert((r_alloc_dir_fsm.read() == ALLOC_DIR_CAS) and 7127 "MEMC ERROR in CAS_BC_IVT_LOCK state: Bas DIR allocation"); 7128 7129 assert((r_alloc_trt_fsm.read() == ALLOC_TRT_CAS) and 7130 "MEMC ERROR in CAS_BC_IVT_LOCK state: Bas TRT allocation"); 7131 7132 if (r_alloc_ivt_fsm.read() == ALLOC_IVT_CAS) 7133 { 7134 // register broadcast inval transaction in IVT 7135 bool wok = false; 7136 size_t index = 0; 7137 size_t srcid = m_cmd_cas_srcid_fifo.read(); 7138 size_t trdid = m_cmd_cas_trdid_fifo.read(); 7139 size_t pktid = m_cmd_cas_pktid_fifo.read(); 7140 addr_t nline = m_nline[(addr_t)(m_cmd_cas_addr_fifo.read())]; 7141 size_t nb_copies = r_cas_count.read(); 7142 7143 // register a broadcast inval transaction in IVT 7144 wok = m_ivt.set(false, // it's an inval transaction 7145 true, // it's a broadcast 7146 true, // response required 7147 false, // no acknowledge required 7148 srcid, 7149 trdid, 7150 pktid, 7151 nline, 7152 nb_copies, 7153 index); 7154 7155 if (wok) // IVT not full 7156 { 7157 // cache update 7158 size_t way = r_cas_way.read(); 7159 size_t set = m_y[(addr_t) (m_cmd_cas_addr_fifo.read())]; 7160 size_t word = m_x[(addr_t) (m_cmd_cas_addr_fifo.read())]; 7161 7162 m_cache_data.write(way, set, word, r_cas_wdata.read()); 7163 if (r_cas_cpt.read() == 4) 6627 7164 { 6628 std::cout << " <MEMC " << name() << " CAS_IDLE> CAS command: " << std::hex 6629 << " srcid = " << std::dec << m_cmd_cas_srcid_fifo.read() 6630 << " addr = " << std::hex << m_cmd_cas_addr_fifo.read() 6631 << " wdata = " << m_cmd_cas_wdata_fifo.read() 6632 << " eop = " << std::dec << m_cmd_cas_eop_fifo.read() 6633 << " cpt = " << std::dec << r_cas_cpt.read() << std::endl; 7165 m_cache_data.write(way, set, word + 1, m_cmd_cas_wdata_fifo.read()); 6634 7166 } 6635 #endif 6636 if(m_cmd_cas_eop_fifo.read()) 6637 { 6638 r_cas_fsm = CAS_DIR_REQ; 6639 } 6640 else // we keep the last word in the FIFO 6641 { 6642 cmd_cas_fifo_get = true; 6643 } 6644 // We fill the two buffers 6645 if(r_cas_cpt.read() < 2) // 32 bits access 6646 r_cas_rdata[r_cas_cpt.read()] = m_cmd_cas_wdata_fifo.read(); 6647 6648 if((r_cas_cpt.read() == 1) and m_cmd_cas_eop_fifo.read()) 6649 r_cas_wdata = m_cmd_cas_wdata_fifo.read(); 6650 6651 assert( (r_cas_cpt.read() <= 3) and // no more than 4 flits... 6652 "MEMC ERROR in CAS_IDLE state: illegal CAS command"); 6653 6654 if(r_cas_cpt.read() ==2) 6655 r_cas_wdata = m_cmd_cas_wdata_fifo.read(); 6656 6657 r_cas_cpt = r_cas_cpt.read() +1; 6658 } 6659 break; 6660 } 6661 6662 ///////////////// 6663 case CAS_DIR_REQ: 6664 { 6665 if(r_alloc_dir_fsm.read() == ALLOC_DIR_CAS) 6666 { 6667 r_cas_fsm = CAS_DIR_LOCK; 6668 } 7167 7168 r_cas_upt_index = index; 7169 r_cas_fsm = CAS_BC_DIR_INVAL; 6669 7170 6670 7171 #if DEBUG_MEMC_CAS 6671 if(m_debug) 6672 { 6673 std::cout 6674 << " <MEMC " << name() << " CAS_DIR_REQ> Requesting DIR lock " 6675 << std::endl; 6676 } 6677 #endif 6678 break; 6679 } 6680 6681 ///////////////// 6682 case CAS_DIR_LOCK: // Read the directory 6683 { 6684 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CAS) and 6685 "MEMC ERROR in CAS_DIR_LOCK: Bad DIR allocation"); 6686 6687 size_t way = 0; 6688 DirectoryEntry entry(m_cache_directory.read(m_cmd_cas_addr_fifo.read(), way)); 6689 6690 r_cas_is_cnt = entry.is_cnt; 6691 r_cas_coherent = entry.cache_coherent; 6692 r_cas_dirty = entry.dirty; 6693 r_cas_tag = entry.tag; 6694 r_cas_way = way; 6695 r_cas_copy = entry.owner.srcid; 6696 r_cas_copy_inst = entry.owner.inst; 6697 r_cas_ptr = entry.ptr; 6698 r_cas_count = entry.count; 6699 6700 if(entry.valid) r_cas_fsm = CAS_DIR_HIT_READ; 6701 else r_cas_fsm = CAS_MISS_TRT_LOCK; 6702 6703 #if DEBUG_MEMC_CAS 6704 if(m_debug) 6705 { 6706 std::cout << " <MEMC " << name() << " CAS_DIR_LOCK> Directory acces" 6707 << " / address = " << std::hex << m_cmd_cas_addr_fifo.read() 6708 << " / hit = " << std::dec << entry.valid 6709 << " / count = " << entry.count 6710 << " / is_cnt = " << entry.is_cnt << std::endl; 6711 } 6712 #endif 6713 break; 6714 } 6715 ///////////////////// 6716 case CAS_DIR_HIT_READ: // update directory for lock and dirty bit 6717 // and check data change in cache 6718 { 6719 size_t way = r_cas_way.read(); 6720 size_t set = m_y[(addr_t)(m_cmd_cas_addr_fifo.read())]; 6721 6722 // update directory (lock & dirty bits) 6723 DirectoryEntry entry; 6724 entry.valid = true; 6725 entry.cache_coherent = r_cas_coherent.read(); 6726 entry.is_cnt = r_cas_is_cnt.read(); 6727 entry.dirty = true; 6728 entry.lock = true; 6729 entry.tag = r_cas_tag.read(); 6730 entry.owner.srcid = r_cas_copy.read(); 6731 entry.owner.inst = r_cas_copy_inst.read(); 6732 entry.count = r_cas_count.read(); 6733 entry.ptr = r_cas_ptr.read(); 6734 6735 m_cache_directory.write(set, way, entry); 6736 6737 // Stored data from cache in buffer to do the comparison in next state 6738 m_cache_data.read_line(way, set, r_cas_data); 6739 6740 r_cas_fsm = CAS_DIR_HIT_COMPARE; 6741 6742 #if DEBUG_MEMC_CAS 6743 if(m_debug) 6744 std::cout << " <MEMC " << name() << " CAS_DIR_HIT_READ> Read data from " 6745 << " cache and store it in buffer" << std::endl; 6746 #endif 6747 break; 6748 } 6749 6750 case CAS_DIR_HIT_COMPARE: 6751 { 6752 size_t word = m_x[(addr_t)(m_cmd_cas_addr_fifo.read())]; 6753 6754 // Read data in buffer & check data change 6755 bool ok = (r_cas_rdata[0].read() == r_cas_data[word].read()); 6756 6757 if(r_cas_cpt.read() == 4) // 64 bits CAS 6758 ok &= (r_cas_rdata[1] == r_cas_data[word+1]); 6759 6760 // to avoid livelock, force the atomic access to fail pseudo-randomly 6761 bool forced_fail = ((r_cas_lfsr % (64) == 0) and RANDOMIZE_CAS); 6762 r_cas_lfsr = (r_cas_lfsr >> 1) ^ ((- (r_cas_lfsr & 1)) & 0xd0000001); 6763 6764 // cas success 6765 if(ok and not forced_fail) 6766 { 6767 r_cas_fsm = CAS_DIR_HIT_WRITE; 6768 } 6769 // cas failure 6770 else 6771 { 6772 r_cas_fsm = CAS_RSP_FAIL; 6773 } 6774 6775 #if DEBUG_MEMC_CAS 6776 if(m_debug) 6777 std::cout << " <MEMC " << name() << " CAS_DIR_HIT_COMPARE> Compare the old" 6778 << " and the new data" 6779 << " / expected value = " << r_cas_rdata[0].read() 6780 << " / actual value = " << r_cas_data[word].read() 6781 << " / forced_fail = " << forced_fail << std::endl; 6782 #endif 6783 break; 6784 } 6785 ////////////////////// 6786 case CAS_DIR_HIT_WRITE: // test if a CC transaction is required 6787 // write data in cache if no CC request 6788 { 6789 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CAS) and 6790 "MEMC ERROR in CAS_DIR_HIT_WRITE: Bad DIR allocation"); 6791 6792 // The CAS is a success => sw access to the llsc_global_table 6793 m_llsc_table.sw(m_cmd_cas_addr_fifo.read(), m_cmd_cas_addr_fifo.read()); 6794 // test coherence request 6795 if(r_cas_count.read()) // replicated line 6796 { 6797 if(r_cas_is_cnt.read()) 6798 { 6799 r_cas_fsm = CAS_BC_TRT_LOCK; // broadcast invalidate required 6800 #if DEBUG_MEMC_CAS 6801 if(m_debug) 6802 std::cout << " <MEMC " << name() << " CAS_DIR_HIT_WRITE>" 6803 << " Broacast Inval required" 6804 << " / copies = " << r_cas_count.read() << std::endl; 6805 #endif 6806 6807 } 6808 else if(!r_cas_to_cc_send_multi_req.read() and 6809 !r_cas_to_cc_send_brdcast_req.read()) 6810 { 6811 r_cas_fsm = CAS_UPT_LOCK; // multi update required 6812 #if DEBUG_MEMC_CAS 6813 if(m_debug) 6814 std::cout << " <MEMC " << name() << " CAS_DIR_HIT_WRITE>" 6815 << " Multi Inval required" 6816 << " / copies = " << r_cas_count.read() << std::endl; 6817 #endif 6818 } 6819 else 6820 { 6821 r_cas_fsm = CAS_WAIT; 6822 #if DEBUG_MEMC_CAS 6823 if(m_debug) 6824 std::cout << " <MEMC " << name() << " CAS_DIR_HIT_WRITE>" 6825 << " CC_SEND FSM busy: release all locks and retry" << std::endl; 6826 #endif 6827 } 6828 } 6829 else // no copies 6830 { 6831 size_t way = r_cas_way.read(); 6832 size_t set = m_y[(addr_t)(m_cmd_cas_addr_fifo.read())]; 6833 size_t word = m_x[(addr_t)(m_cmd_cas_addr_fifo.read())]; 6834 6835 // cache update 6836 m_cache_data.write(way, set, word, r_cas_wdata.read()); 6837 if(r_cas_cpt.read() == 4) 6838 m_cache_data.write(way, set, word+1, m_cmd_cas_wdata_fifo.read()); 6839 6840 r_cas_fsm = CAS_RSP_SUCCESS; 6841 6842 #if DEBUG_MEMC_CAS 6843 if(m_debug) 6844 std::cout << " <MEMC " << name() << " CAS_DIR_HIT_WRITE> Update cache:" 6845 << " way = " << std::dec << way 6846 << " / set = " << set 6847 << " / word = " << word 6848 << " / value = " << r_cas_wdata.read() 6849 << " / count = " << r_cas_count.read() 6850 << " / global_llsc_table access" << std::endl; 6851 #endif 6852 } 6853 break; 6854 } 6855 ///////////////// 6856 case CAS_UPT_LOCK: // try to register the transaction in UPT 6857 // and write data in cache if successful registration 6858 // releases locks to retry later if UPT full 6859 { 6860 if(r_alloc_upt_fsm.read() == ALLOC_UPT_CAS) 6861 { 6862 bool wok = false; 6863 size_t index = 0; 6864 size_t srcid = m_cmd_cas_srcid_fifo.read(); 6865 size_t trdid = m_cmd_cas_trdid_fifo.read(); 6866 size_t pktid = m_cmd_cas_pktid_fifo.read(); 6867 addr_t nline = m_nline[(addr_t)(m_cmd_cas_addr_fifo.read())]; 6868 size_t nb_copies = r_cas_count.read(); 6869 6870 wok = m_upt.set(true, // it's an update transaction 6871 false, // it's not a broadcast 6872 true, // response required 6873 false, // no acknowledge required 6874 srcid, 6875 trdid, 6876 pktid, 6877 nline, 6878 nb_copies, 6879 index); 6880 if(wok) // coherence transaction registered in UPT 6881 { 6882 // cache update 6883 size_t way = r_cas_way.read(); 6884 size_t set = m_y[(addr_t)(m_cmd_cas_addr_fifo.read())]; 6885 size_t word = m_x[(addr_t)(m_cmd_cas_addr_fifo.read())]; 6886 6887 m_cache_data.write(way, set, word, r_cas_wdata.read()); 6888 if(r_cas_cpt.read() ==4) 6889 m_cache_data.write(way, set, word+1, m_cmd_cas_wdata_fifo.read()); 6890 6891 r_cas_upt_index = index; 6892 r_cas_fsm = CAS_UPT_HEAP_LOCK; 6893 6894 } 6895 else // releases the locks protecting UPT and DIR UPT full 6896 { 6897 r_cas_fsm = CAS_WAIT; 6898 } 6899 6900 #if DEBUG_MEMC_CAS 6901 if(m_debug) 6902 std::cout << " <MEMC " << name() 6903 << " CAS_UPT_LOCK> Register multi-update transaction in UPT" 6904 << " / wok = " << wok 6905 << " / nline = " << std::hex << nline 6906 << " / count = " << nb_copies << std::endl; 6907 #endif 6908 } 6909 break; 6910 } 6911 ///////////// 6912 case CAS_WAIT: // release all locks and retry from beginning 6913 { 6914 6915 #if DEBUG_MEMC_CAS 6916 if(m_debug) 6917 { 6918 std::cout << " <MEMC " << name() 6919 << " CAS_WAIT> Release all locks" << std::endl; 6920 } 6921 #endif 6922 r_cas_fsm = CAS_DIR_REQ; 6923 break; 6924 } 6925 ////////////////// 6926 case CAS_UPT_HEAP_LOCK: // lock the heap 6927 { 6928 if(r_alloc_heap_fsm.read() == ALLOC_HEAP_CAS) 6929 { 6930 6931 #if DEBUG_MEMC_CAS 6932 if(m_debug) 7172 if (m_debug) 6933 7173 { 6934 7174 std::cout << " <MEMC " << name() 6935 << " CAS_UPT_HEAP_LOCK> Get access to the heap" << std::endl; 7175 << " CAS_BC_IVT_LOCK> Register a broadcast inval transaction in IVT" 7176 << " / nline = " << std::hex << nline 7177 << " / count = " << std::dec << nb_copies 7178 << " / ivt_index = " << index << std::endl; 6936 7179 } 6937 7180 #endif 6938 r_cas_fsm = CAS_UPT_REQ; 6939 } 6940 break; 6941 } 6942 //////////////// 6943 case CAS_UPT_REQ: // send a first update request to CC_SEND FSM 6944 { 6945 assert((r_alloc_heap_fsm.read() == ALLOC_HEAP_CAS) and 6946 "VCI_MEM_CACHE ERROR : bad HEAP allocation"); 6947 6948 if(!r_cas_to_cc_send_multi_req.read() and !r_cas_to_cc_send_brdcast_req.read()) 6949 { 6950 r_cas_to_cc_send_brdcast_req = false; 6951 r_cas_to_cc_send_trdid = r_cas_upt_index.read(); 6952 r_cas_to_cc_send_nline = m_nline[(addr_t)(m_cmd_cas_addr_fifo.read())]; 6953 r_cas_to_cc_send_index = m_x[(addr_t)(m_cmd_cas_addr_fifo.read())]; 6954 r_cas_to_cc_send_wdata = r_cas_wdata.read(); 6955 6956 if(r_cas_cpt.read() == 4) 6957 { 6958 r_cas_to_cc_send_is_long = true; 6959 r_cas_to_cc_send_wdata_high = m_cmd_cas_wdata_fifo.read(); 6960 } 6961 else 6962 { 6963 r_cas_to_cc_send_is_long = false; 6964 r_cas_to_cc_send_wdata_high = 0; 6965 } 6966 6967 // We put the first copy in the fifo 6968 cas_to_cc_send_fifo_put = true; 6969 cas_to_cc_send_fifo_inst = r_cas_copy_inst.read(); 6970 cas_to_cc_send_fifo_srcid = r_cas_copy.read(); 6971 if(r_cas_count.read() == 1) // one single copy 6972 { 6973 r_cas_fsm = CAS_IDLE; // Response will be sent after receiving 6974 // update responses 6975 cmd_cas_fifo_get = true; 6976 r_cas_to_cc_send_multi_req = true; 6977 r_cas_cpt = 0; 6978 } 6979 else // several copies 6980 { 6981 r_cas_fsm = CAS_UPT_NEXT; 6982 } 7181 } 7182 else // releases the lock protecting IVT 7183 { 7184 r_cas_fsm = CAS_WAIT; 7185 } 7186 } 7187 break; 7188 } 7189 ////////////////////// 7190 case CAS_BC_DIR_INVAL: // Register PUT transaction in TRT, 7191 // and inval the DIR entry 7192 { 7193 assert((r_alloc_dir_fsm.read() == ALLOC_DIR_CAS) and 7194 "MEMC ERROR in CAS_BC_DIR_INVAL state: Bad DIR allocation"); 7195 7196 assert((r_alloc_trt_fsm.read() == ALLOC_TRT_CAS) and 7197 "MEMC ERROR in CAS_BC_DIR_INVAL state: Bad TRT allocation"); 7198 7199 assert((r_alloc_ivt_fsm.read() == ALLOC_IVT_CAS) and 7200 "MEMC ERROR in CAS_BC_DIR_INVAL state: Bad IVT allocation"); 7201 7202 // set TRT 7203 std::vector<data_t> data_vector; 7204 data_vector.clear(); 7205 size_t word = m_x[(addr_t) (m_cmd_cas_addr_fifo.read())]; 7206 for (size_t i = 0; i < m_words; i++) 7207 { 7208 if (i == word) 7209 { 7210 // first modified word 7211 data_vector.push_back(r_cas_wdata.read()); 7212 } 7213 else if ((i == word + 1) and (r_cas_cpt.read() == 4)) 7214 { 7215 // second modified word 7216 data_vector.push_back(m_cmd_cas_wdata_fifo.read()); 7217 } 7218 else 7219 { 7220 // unmodified words 7221 data_vector.push_back(r_cas_data[i].read()); 7222 } 7223 } 7224 m_trt.set(r_cas_trt_index.read(), 7225 false, // PUT request 7226 m_nline[(addr_t) (m_cmd_cas_addr_fifo.read())], 7227 0, 7228 0, 7229 0, 7230 false, // not a processor read 7231 0, 7232 0, 7233 std::vector<be_t> (m_words, 0), 7234 data_vector); 7235 7236 // invalidate directory entry 7237 DirectoryEntry entry; 7238 entry.valid = false; 7239 entry.dirty = false; 7240 entry.tag = 0; 7241 entry.is_cnt = false; 7242 entry.lock = false; 7243 entry.count = 0; 7244 entry.owner.srcid = 0; 7245 entry.owner.inst = false; 7246 entry.ptr = 0; 7247 size_t set = m_y[(addr_t) (m_cmd_cas_addr_fifo.read())]; 7248 size_t way = r_cas_way.read(); 7249 7250 m_cache_directory.write(set, way, entry); 7251 7252 r_cas_fsm = CAS_BC_CC_SEND; 6983 7253 6984 7254 #if DEBUG_MEMC_CAS 6985 if(m_debug) 6986 { 6987 std::cout << " <MEMC " << name() << " CAS_UPT_REQ> Send the first update request to CC_SEND FSM " 6988 << " / address = " << std::hex << m_cmd_cas_addr_fifo.read() 6989 << " / wdata = " << std::hex << r_cas_wdata.read() 6990 << " / srcid = " << std::dec << r_cas_copy.read() 6991 << " / inst = " << std::dec << r_cas_copy_inst.read() << std::endl; 6992 } 6993 #endif 6994 } 6995 break; 6996 } 6997 ///////////////// 6998 case CAS_UPT_NEXT: // send a multi-update request to CC_SEND FSM 6999 { 7000 assert((r_alloc_heap_fsm.read() == ALLOC_HEAP_CAS) 7001 and "VCI_MEM_CACHE ERROR : bad HEAP allocation"); 7002 7003 HeapEntry entry = m_heap.read(r_cas_ptr.read()); 7004 cas_to_cc_send_fifo_srcid = entry.owner.srcid; 7005 cas_to_cc_send_fifo_inst = entry.owner.inst; 7006 cas_to_cc_send_fifo_put = true; 7007 7008 if(m_cas_to_cc_send_inst_fifo.wok()) // request accepted by CC_SEND FSM 7009 { 7010 r_cas_ptr = entry.next; 7011 if(entry.next == r_cas_ptr.read()) // last copy 7012 { 7013 r_cas_to_cc_send_multi_req = true; 7014 r_cas_fsm = CAS_IDLE; // Response will be sent after receiving 7015 // all update responses 7016 cmd_cas_fifo_get = true; 7017 r_cas_cpt = 0; 7018 } 7019 } 7255 if (m_debug) 7256 { 7257 std::cout << " <MEMC " << name() << " CAS_BC_DIR_INVAL> Inval DIR & register in TRT:" 7258 << " address = " << m_cmd_cas_addr_fifo.read() << std::endl; 7259 } 7260 #endif 7261 break; 7262 } 7263 /////////////////// 7264 case CAS_BC_CC_SEND: // Request the broadcast inval to CC_SEND FSM 7265 { 7266 if (not r_cas_to_cc_send_multi_req.read() and 7267 not r_cas_to_cc_send_brdcast_req.read()) 7268 { 7269 r_cas_to_cc_send_multi_req = false; 7270 r_cas_to_cc_send_brdcast_req = true; 7271 r_cas_to_cc_send_trdid = r_cas_upt_index.read(); 7272 r_cas_to_cc_send_nline = m_nline[(addr_t) (m_cmd_cas_addr_fifo.read())]; 7273 r_cas_to_cc_send_index = 0; 7274 r_cas_to_cc_send_wdata = 0; 7275 7276 r_cas_fsm = CAS_BC_XRAM_REQ; 7020 7277 7021 7278 #if DEBUG_MEMC_CAS 7022 if(m_debug) 7023 { 7024 std::cout << " <MEMC " << name() << " CAS_UPT_NEXT> Send the next update request to CC_SEND FSM " 7025 << " / address = " << std::hex << m_cmd_cas_addr_fifo.read() 7026 << " / wdata = " << std::hex << r_cas_wdata.read() 7027 << " / srcid = " << std::dec << entry.owner.srcid 7028 << " / inst = " << std::dec << entry.owner.inst << std::endl; 7029 } 7030 #endif 7031 break; 7032 } 7033 ///////////////////// 7034 case CAS_BC_TRT_LOCK: // check the TRT to register a PUT transaction 7035 { 7036 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CAS) and 7037 "MEMC ERROR in CAS_BC_TRT_LOCK state: Bas DIR allocation"); 7038 7039 if(r_alloc_trt_fsm.read() == ALLOC_TRT_CAS) 7040 { 7041 size_t wok_index = 0; 7042 bool wok = !m_trt.full(wok_index); 7043 if( wok ) 7044 { 7045 r_cas_trt_index = wok_index; 7046 r_cas_fsm = CAS_BC_IVT_LOCK; 7047 } 7048 else 7049 { 7050 r_cas_fsm = CAS_WAIT; 7051 } 7279 if (m_debug) 7280 { 7281 std::cout << " <MEMC " << name() 7282 << " CAS_BC_CC_SEND> Post a broadcast request to CC_SEND FSM" << std::endl; 7283 } 7284 #endif 7285 } 7286 break; 7287 } 7288 //////////////////// 7289 case CAS_BC_XRAM_REQ: // request the IXR FSM to start a PUT transaction 7290 { 7291 if (not r_cas_to_ixr_cmd_req.read()) 7292 { 7293 r_cas_to_ixr_cmd_req = true; 7294 r_cas_to_ixr_cmd_index = r_cas_trt_index.read(); 7295 r_cas_fsm = CAS_IDLE; 7296 cmd_cas_fifo_get = true; 7297 r_cas_cpt = 0; 7052 7298 7053 7299 #if DEBUG_MEMC_CAS 7054 if(m_debug) 7055 std::cout << " <MEMC " << name() << " CAS_BC_TRT_LOCK> Check TRT" 7056 << " : wok = " << wok << " / index = " << wok_index << std::endl; 7057 #endif 7058 } 7059 break; 7060 } 7061 ///////////////////// 7062 case CAS_BC_IVT_LOCK: // register a broadcast inval transaction in IVT 7063 // write data in cache in case of successful registration 7064 { 7065 if(r_alloc_ivt_fsm.read() == ALLOC_IVT_CAS) 7066 { 7067 bool wok = false; 7068 size_t index = 0; 7069 size_t srcid = m_cmd_cas_srcid_fifo.read(); 7070 size_t trdid = m_cmd_cas_trdid_fifo.read(); 7071 size_t pktid = m_cmd_cas_pktid_fifo.read(); 7072 addr_t nline = m_nline[(addr_t)(m_cmd_cas_addr_fifo.read())]; 7073 size_t nb_copies = r_cas_count.read(); 7074 7075 // register a broadcast inval transaction in IVT 7076 wok = m_ivt.set(false, // it's an inval transaction 7077 true, // it's a broadcast 7078 true, // response required 7079 false, // no acknowledge required 7080 srcid, 7081 trdid, 7082 pktid, 7083 nline, 7084 nb_copies, 7085 index); 7086 7087 if(wok) // IVT not full 7088 { 7089 // cache update 7090 size_t way = r_cas_way.read(); 7091 size_t set = m_y[(addr_t)(m_cmd_cas_addr_fifo.read())]; 7092 size_t word = m_x[(addr_t)(m_cmd_cas_addr_fifo.read())]; 7093 7094 m_cache_data.write(way, set, word, r_cas_wdata.read()); 7095 if(r_cas_cpt.read() ==4) 7096 m_cache_data.write(way, set, word+1, m_cmd_cas_wdata_fifo.read()); 7097 7098 r_cas_upt_index = index; 7099 r_cas_fsm = CAS_BC_DIR_INVAL; 7300 if (m_debug) 7301 { 7302 std::cout << " <MEMC " << name() 7303 << " CAS_BC_XRAM_REQ> Request a PUT transaction to IXR_CMD FSM" << std::hex 7304 << " / address = " << (addr_t) m_cmd_cas_addr_fifo.read() 7305 << " / trt_index = " << r_cas_trt_index.read() << std::endl; 7306 } 7307 #endif 7308 } 7309 break; 7310 } 7311 ///////////////// 7312 case CAS_RSP_FAIL: // request TGT_RSP FSM to send a failure response 7313 { 7314 if (not r_cas_to_tgt_rsp_req.read()) 7315 { 7316 cmd_cas_fifo_get = true; 7317 r_cas_cpt = 0; 7318 r_cas_to_tgt_rsp_req = true; 7319 r_cas_to_tgt_rsp_data = 1; 7320 r_cas_to_tgt_rsp_srcid = m_cmd_cas_srcid_fifo.read(); 7321 r_cas_to_tgt_rsp_trdid = m_cmd_cas_trdid_fifo.read(); 7322 r_cas_to_tgt_rsp_pktid = m_cmd_cas_pktid_fifo.read(); 7323 r_cas_fsm = CAS_IDLE; 7100 7324 7101 7325 #if DEBUG_MEMC_CAS 7102 if(m_debug) 7103 std::cout << " <MEMC " << name() 7104 << " CAS_BC_IVT_LOCK> Register a broadcast inval transaction in IVT" 7105 << " / nline = " << std::hex << nline 7106 << " / count = " << std::dec << nb_copies 7107 << " / ivt_index = " << index << std::endl; 7108 #endif 7109 } 7110 else // releases the lock protecting IVT 7111 { 7112 r_cas_fsm = CAS_WAIT; 7113 } 7114 } 7115 break; 7116 } 7117 ////////////////////// 7118 case CAS_BC_DIR_INVAL: // Register the PUT transaction in TRT, and inval the DIR entry 7119 { 7120 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CAS) and 7121 "MEMC ERROR in CAS_BC_DIR_INVAL state: Bad DIR allocation"); 7122 7123 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_CAS) and 7124 "MEMC ERROR in CAS_BC_DIR_INVAL state: Bad TRT allocation"); 7125 7126 assert( (r_alloc_ivt_fsm.read() == ALLOC_IVT_CAS) and 7127 "MEMC ERROR in CAS_BC_DIR_INVAL state: Bad IVT allocation"); 7128 7129 std::vector<data_t> data_vector; 7130 data_vector.clear(); 7131 size_t word = m_x[(addr_t)(m_cmd_cas_addr_fifo.read())]; 7132 for(size_t i=0; i<m_words; i++) 7133 { 7134 if(i == word) // first modified word 7135 data_vector.push_back( r_cas_wdata.read() ); 7136 else if((i == word+1) and (r_cas_cpt.read() == 4)) // second modified word 7137 data_vector.push_back( m_cmd_cas_wdata_fifo.read() ); 7138 else // unmodified words 7139 data_vector.push_back( r_cas_data[i].read() ); 7140 } 7141 m_trt.set( r_cas_trt_index.read(), 7142 false, // PUT request 7143 m_nline[(addr_t)(m_cmd_cas_addr_fifo.read())], 7144 0, 7145 0, 7146 0, 7147 false, // not a processor read 7148 0, 7149 0, 7150 std::vector<be_t> (m_words,0), 7151 data_vector ); 7152 7153 // invalidate directory entry 7154 DirectoryEntry entry; 7155 entry.valid = false; 7156 entry.dirty = false; 7157 entry.tag = 0; 7158 entry.is_cnt = false; 7159 entry.lock = false; 7160 entry.count = 0; 7161 entry.owner.srcid = 0; 7162 entry.owner.inst = false; 7163 entry.ptr = 0; 7164 size_t set = m_y[(addr_t)(m_cmd_cas_addr_fifo.read())]; 7165 size_t way = r_cas_way.read(); 7166 m_cache_directory.write(set, way, entry); 7167 7168 r_cas_fsm = CAS_BC_CC_SEND; 7326 if (m_debug) 7327 { 7328 std::cout << " <MEMC " << name() 7329 << " CAS_RSP_FAIL> Request TGT_RSP to send a failure response" << std::endl; 7330 } 7331 #endif 7332 } 7333 break; 7334 } 7335 //////////////////// 7336 case CAS_RSP_SUCCESS: // request TGT_RSP FSM to send a success response 7337 { 7338 if (not r_cas_to_tgt_rsp_req.read()) 7339 { 7340 cmd_cas_fifo_get = true; 7341 r_cas_cpt = 0; 7342 r_cas_to_tgt_rsp_req = true; 7343 r_cas_to_tgt_rsp_data = 0; 7344 r_cas_to_tgt_rsp_srcid = m_cmd_cas_srcid_fifo.read(); 7345 r_cas_to_tgt_rsp_trdid = m_cmd_cas_trdid_fifo.read(); 7346 r_cas_to_tgt_rsp_pktid = m_cmd_cas_pktid_fifo.read(); 7347 r_cas_fsm = CAS_IDLE; 7169 7348 7170 7349 #if DEBUG_MEMC_CAS 7171 if(m_debug) 7172 std::cout << " <MEMC " << name() << " CAS_BC_DIR_INVAL> Inval DIR & register in TRT:" 7173 << " address = " << m_cmd_cas_addr_fifo.read() << std::endl; 7174 #endif 7175 break; 7176 } 7177 /////////////////// 7178 case CAS_BC_CC_SEND: // Request the broadcast inval to CC_SEND FSM 7179 { 7180 if( not r_cas_to_cc_send_multi_req.read() and 7181 not r_cas_to_cc_send_brdcast_req.read()) 7182 { 7183 r_cas_to_cc_send_multi_req = false; 7184 r_cas_to_cc_send_brdcast_req = true; 7185 r_cas_to_cc_send_trdid = r_cas_upt_index.read(); 7186 r_cas_to_cc_send_nline = m_nline[(addr_t)(m_cmd_cas_addr_fifo.read())]; 7187 r_cas_to_cc_send_index = 0; 7188 r_cas_to_cc_send_wdata = 0; 7189 7190 r_cas_fsm = CAS_BC_XRAM_REQ; 7191 } 7192 break; 7193 } 7194 //////////////////// 7195 case CAS_BC_XRAM_REQ: // request the IXR FSM to start a put transaction 7196 { 7197 if( not r_cas_to_ixr_cmd_req.read() ) 7198 { 7199 r_cas_to_ixr_cmd_req = true; 7200 r_cas_to_ixr_cmd_index = r_cas_trt_index.read(); 7201 r_cas_fsm = CAS_IDLE; 7202 cmd_cas_fifo_get = true; 7203 r_cas_cpt = 0; 7350 if (m_debug) 7351 { 7352 std::cout << " <MEMC " << name() 7353 << " CAS_RSP_SUCCESS> Request TGT_RSP to send a success response" << std::endl; 7354 } 7355 #endif 7356 } 7357 break; 7358 } 7359 /////////////////////// 7360 case CAS_MISS_TRT_LOCK: // cache miss : request access to transaction Table 7361 { 7362 if (r_alloc_trt_fsm.read() == ALLOC_TRT_CAS) 7363 { 7364 size_t index = 0; 7365 bool hit_read = m_trt.hit_read( 7366 m_nline[(addr_t) m_cmd_cas_addr_fifo.read()], index); 7367 bool hit_write = m_trt.hit_write( 7368 m_nline[(addr_t) m_cmd_cas_addr_fifo.read()]); 7369 bool wok = not m_trt.full(index); 7204 7370 7205 7371 #if DEBUG_MEMC_CAS 7206 if(m_debug) 7207 std::cout << " <MEMC " << name() 7208 << " CAS_BC_XRAM_REQ> Request a PUT transaction to IXR_CMD FSM" << std::hex 7209 << " / address = " << (addr_t) m_cmd_cas_addr_fifo.read() 7210 << " / trt_index = " << r_cas_trt_index.read() << std::endl; 7211 #endif 7212 } 7213 7214 break; 7215 } 7216 ///////////////// 7217 case CAS_RSP_FAIL: // request TGT_RSP FSM to send a failure response 7218 { 7219 if( not r_cas_to_tgt_rsp_req.read() ) 7220 { 7221 cmd_cas_fifo_get = true; 7222 r_cas_cpt = 0; 7223 r_cas_to_tgt_rsp_req = true; 7224 r_cas_to_tgt_rsp_data = 1; 7225 r_cas_to_tgt_rsp_srcid = m_cmd_cas_srcid_fifo.read(); 7226 r_cas_to_tgt_rsp_trdid = m_cmd_cas_trdid_fifo.read(); 7227 r_cas_to_tgt_rsp_pktid = m_cmd_cas_pktid_fifo.read(); 7228 r_cas_fsm = CAS_IDLE; 7372 if (m_debug) 7373 { 7374 std::cout << " <MEMC " << name() << " CAS_MISS_TRT_LOCK> Check TRT state" 7375 << " / hit_read = " << hit_read 7376 << " / hit_write = " << hit_write 7377 << " / wok = " << wok 7378 << " / index = " << index << std::endl; 7379 } 7380 #endif 7381 7382 if (hit_read or !wok or hit_write) // missing line already requested or TRT full 7383 { 7384 r_cas_fsm = CAS_WAIT; 7385 } 7386 else 7387 { 7388 r_cas_trt_index = index; 7389 r_cas_fsm = CAS_MISS_TRT_SET; 7390 } 7391 } 7392 break; 7393 } 7394 ////////////////////// 7395 case CAS_MISS_TRT_SET: // register the GET transaction in TRT 7396 { 7397 assert((r_alloc_trt_fsm.read() == ALLOC_TRT_CAS) and 7398 "MEMC ERROR in CAS_MISS_TRT_SET state: Bad TRT allocation"); 7399 7400 std::vector<be_t> be_vector; 7401 std::vector<data_t> data_vector; 7402 be_vector.clear(); 7403 data_vector.clear(); 7404 for (size_t i = 0; i < m_words; i++) 7405 { 7406 be_vector.push_back(0); 7407 data_vector.push_back(0); 7408 } 7409 7410 m_trt.set(r_cas_trt_index.read(), 7411 true, // GET 7412 m_nline[(addr_t) m_cmd_cas_addr_fifo.read()], 7413 m_cmd_cas_srcid_fifo.read(), 7414 m_cmd_cas_trdid_fifo.read(), 7415 m_cmd_cas_pktid_fifo.read(), 7416 false, // write request from processor 7417 0, 7418 0, 7419 be_vector, 7420 data_vector); 7421 7422 r_cas_fsm = CAS_MISS_XRAM_REQ; 7229 7423 7230 7424 #if DEBUG_MEMC_CAS 7231 if(m_debug) 7232 std::cout << " <MEMC " << name() 7233 << " CAS_RSP_FAIL> Request TGT_RSP to send a failure response" << std::endl; 7234 #endif 7235 } 7236 break; 7237 } 7238 //////////////////// 7239 case CAS_RSP_SUCCESS: // request TGT_RSP FSM to send a success response 7240 { 7241 if( not r_cas_to_tgt_rsp_req.read() ) 7242 { 7243 cmd_cas_fifo_get = true; 7244 r_cas_cpt = 0; 7245 r_cas_to_tgt_rsp_req = true; 7246 r_cas_to_tgt_rsp_data = 0; 7247 r_cas_to_tgt_rsp_srcid = m_cmd_cas_srcid_fifo.read(); 7248 r_cas_to_tgt_rsp_trdid = m_cmd_cas_trdid_fifo.read(); 7249 r_cas_to_tgt_rsp_pktid = m_cmd_cas_pktid_fifo.read(); 7250 r_cas_fsm = CAS_IDLE; 7425 if (m_debug) 7426 { 7427 std::cout << " <MEMC " << name() << " CAS_MISS_TRT_SET> Register GET transaction in TRT" 7428 << " / address = " << std::hex << (addr_t) m_cmd_cas_addr_fifo.read() 7429 << " / trt_index = " << std::dec << r_cas_trt_index.read() << std::endl; 7430 } 7431 #endif 7432 break; 7433 } 7434 ////////////////////// 7435 case CAS_MISS_XRAM_REQ: // request the IXR_CMD FSM a GET request 7436 { 7437 if (not r_cas_to_ixr_cmd_req.read()) 7438 { 7439 r_cas_to_ixr_cmd_req = true; 7440 r_cas_to_ixr_cmd_index = r_cas_trt_index.read(); 7441 r_cas_fsm = CAS_WAIT; 7251 7442 7252 7443 #if DEBUG_MEMC_CAS 7253 if(m_debug) 7254 std::cout << " <MEMC " << name() 7255 << " CAS_RSP_SUCCESS> Request TGT_RSP to send a success response" << std::endl; 7256 #endif 7257 } 7258 break; 7259 } 7260 ///////////////////// 7261 case CAS_MISS_TRT_LOCK: // cache miss : request access to transaction Table 7262 { 7263 if(r_alloc_trt_fsm.read() == ALLOC_TRT_CAS) 7264 { 7265 size_t index = 0; 7266 bool hit_read = m_trt.hit_read( 7267 m_nline[(addr_t) m_cmd_cas_addr_fifo.read()],index); 7268 bool hit_write = m_trt.hit_write( 7269 m_nline[(addr_t) m_cmd_cas_addr_fifo.read()]); 7270 bool wok = not m_trt.full(index); 7271 7272 #if DEBUG_MEMC_CAS 7273 if(m_debug) 7274 { 7275 std::cout << " <MEMC " << name() << " CAS_MISS_TRT_LOCK> Check TRT state" 7276 << " / hit_read = " << hit_read 7277 << " / hit_write = " << hit_write 7278 << " / wok = " << wok 7279 << " / index = " << index << std::endl; 7280 } 7281 #endif 7282 7283 if(hit_read or !wok or hit_write) // missing line already requested or no space in TRT 7284 { 7285 r_cas_fsm = CAS_WAIT; 7286 } 7287 else 7288 { 7289 r_cas_trt_index = index; 7290 r_cas_fsm = CAS_MISS_TRT_SET; 7291 } 7292 } 7293 break; 7294 } 7295 //////////////////// 7296 case CAS_MISS_TRT_SET: // register the GET transaction in TRT 7297 { 7298 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_CAS) and 7299 "MEMC ERROR in CAS_MISS_TRT_SET state: Bad TRT allocation"); 7300 7301 std::vector<be_t> be_vector; 7302 std::vector<data_t> data_vector; 7303 be_vector.clear(); 7304 data_vector.clear(); 7305 for(size_t i=0; i<m_words; i++) 7306 { 7307 be_vector.push_back(0); 7308 data_vector.push_back(0); 7309 } 7310 7311 m_trt.set(r_cas_trt_index.read(), 7312 true, // read request 7313 m_nline[(addr_t) m_cmd_cas_addr_fifo.read()], 7314 m_cmd_cas_srcid_fifo.read(), 7315 m_cmd_cas_trdid_fifo.read(), 7316 m_cmd_cas_pktid_fifo.read(), 7317 false, // write request from processor 7318 0, 7319 0, 7320 be_vector, 7321 data_vector); 7322 r_cas_fsm = CAS_MISS_XRAM_REQ; 7323 7324 #if DEBUG_MEMC_CAS 7325 if(m_debug) 7326 { 7327 std::cout << " <MEMC " << name() << " CAS_MISS_TRT_SET> Register a GET transaction in TRT" << std::hex 7328 << " / nline = " << m_nline[(addr_t) m_cmd_cas_addr_fifo.read()] 7329 << " / trt_index = " << r_cas_trt_index.read() << std::endl; 7330 } 7331 #endif 7332 break; 7333 } 7334 ////////////////////// 7335 case CAS_MISS_XRAM_REQ: // request the IXR_CMD FSM to fetch the missing line 7336 { 7337 if( not r_cas_to_ixr_cmd_req.read() ) 7338 { 7339 r_cas_to_ixr_cmd_req = true; 7340 r_cas_to_ixr_cmd_index = r_cas_trt_index.read(); 7341 r_cas_fsm = CAS_WAIT; 7342 7343 #if DEBUG_MEMC_CAS 7344 if(m_debug) 7345 std::cout << " <MEMC " << name() << " CAS_MISS_XRAM_REQ> Request a GET transaction" 7346 << " / address = " << std::hex << (addr_t) m_cmd_cas_addr_fifo.read() 7347 << " / trt_index = " << std::dec << r_cas_trt_index.read() << std::endl; 7348 #endif 7349 } 7350 break; 7351 } 7444 if (m_debug) 7445 { 7446 std::cout << " <MEMC " << name() << " CAS_MISS_XRAM_REQ> Request a GET transaction" 7447 << " / address = " << std::hex << (addr_t) m_cmd_cas_addr_fifo.read() 7448 << " / trt_index = " << std::dec << r_cas_trt_index.read() << std::endl; 7449 } 7450 #endif 7451 } 7452 break; 7453 } 7352 7454 } // end switch r_cas_fsm 7353 7455 … … 7381 7483 /////////////////////////////////////////////////////////////////////////////// 7382 7484 7383 switch (r_cc_send_fsm.read())7485 switch (r_cc_send_fsm.read()) 7384 7486 { 7385 7487 ///////////////////////// 7386 7488 case CC_SEND_CONFIG_IDLE: // XRAM_RSP FSM has highest priority 7387 { 7388 // XRAM_RSP 7389 if(m_xram_rsp_to_cc_send_inst_fifo.rok() or 7390 r_xram_rsp_to_cc_send_multi_req.read()) 7391 { 7392 r_cc_send_fsm = CC_SEND_XRAM_RSP_INVAL_HEADER; 7393 break; 7394 } 7395 if(r_xram_rsp_to_cc_send_brdcast_req.read()) 7396 { 7397 r_cc_send_fsm = CC_SEND_XRAM_RSP_BRDCAST_HEADER; 7398 break; 7399 } 7400 // CAS 7401 if(m_cas_to_cc_send_inst_fifo.rok() or 7402 r_cas_to_cc_send_multi_req.read()) 7403 { 7404 r_cc_send_fsm = CC_SEND_CAS_UPDT_HEADER; 7405 break; 7406 } 7407 if(r_cas_to_cc_send_brdcast_req.read()) 7408 { 7409 r_cc_send_fsm = CC_SEND_CAS_BRDCAST_HEADER; 7410 break; 7411 } 7412 7413 // READ 7414 if(r_read_to_cc_send_req.read()) 7415 { 7416 r_cc_send_fsm = CC_SEND_READ_NCC_INVAL_HEADER; 7417 break; 7418 } 7419 7420 // WRITE 7421 if(r_write_to_cc_send_req.read()) 7422 { 7423 r_cc_send_fsm = CC_SEND_WRITE_NCC_INVAL_HEADER; 7424 break; 7425 } 7426 7427 if(m_write_to_cc_send_inst_fifo.rok() or 7428 r_write_to_cc_send_multi_req.read()) 7429 { 7430 r_cc_send_fsm = CC_SEND_WRITE_UPDT_HEADER; 7431 break; 7432 } 7433 if(r_write_to_cc_send_brdcast_req.read()) 7434 { 7435 r_cc_send_fsm = CC_SEND_WRITE_BRDCAST_HEADER; 7436 break; 7437 } 7438 // CONFIG 7439 if(r_config_to_cc_send_multi_req.read()) 7440 { 7441 r_cc_send_fsm = CC_SEND_CONFIG_INVAL_HEADER; 7442 break; 7443 } 7444 if(r_config_to_cc_send_brdcast_req.read()) 7445 { 7446 r_cc_send_fsm = CC_SEND_CONFIG_BRDCAST_HEADER; 7447 break; 7448 } 7489 { 7490 // XRAM_RSP 7491 if (m_xram_rsp_to_cc_send_inst_fifo.rok() or 7492 r_xram_rsp_to_cc_send_multi_req.read()) 7493 { 7494 r_cc_send_fsm = CC_SEND_XRAM_RSP_INVAL_HEADER; 7449 7495 break; 7450 7496 } 7451 //////////////////////// 7452 case CC_SEND_WRITE_IDLE: // CONFIG FSM has highest priority 7453 { 7454 // CONFIG 7455 if(r_config_to_cc_send_multi_req.read()) 7456 { 7457 r_cc_send_fsm = CC_SEND_CONFIG_INVAL_HEADER; 7458 break; 7459 } 7460 if(r_config_to_cc_send_brdcast_req.read()) 7461 { 7462 r_cc_send_fsm = CC_SEND_CONFIG_BRDCAST_HEADER; 7463 break; 7464 } 7465 // XRAM_RSP 7466 if(m_xram_rsp_to_cc_send_inst_fifo.rok() or 7467 r_xram_rsp_to_cc_send_multi_req.read()) 7468 { 7469 r_cc_send_fsm = CC_SEND_XRAM_RSP_INVAL_HEADER; 7470 break; 7471 } 7472 if(r_xram_rsp_to_cc_send_brdcast_req.read()) 7473 { 7474 r_cc_send_fsm = CC_SEND_XRAM_RSP_BRDCAST_HEADER; 7475 break; 7476 } 7477 // CAS 7478 if(m_cas_to_cc_send_inst_fifo.rok() or 7479 r_cas_to_cc_send_multi_req.read()) 7480 { 7481 r_cc_send_fsm = CC_SEND_CAS_UPDT_HEADER; 7482 break; 7483 } 7484 if(r_cas_to_cc_send_brdcast_req.read()) 7485 { 7486 r_cc_send_fsm = CC_SEND_CAS_BRDCAST_HEADER; 7487 break; 7488 } 7489 // READ 7490 if(r_read_to_cc_send_req.read()) 7491 { 7492 r_cc_send_fsm = CC_SEND_READ_NCC_INVAL_HEADER; 7493 break; 7494 } 7495 // WRITE 7496 if(r_write_to_cc_send_req.read()) 7497 { 7498 r_cc_send_fsm = CC_SEND_WRITE_NCC_INVAL_HEADER; 7499 break; 7500 } 7501 if(m_write_to_cc_send_inst_fifo.rok() or 7502 r_write_to_cc_send_multi_req.read()) 7503 { 7504 r_cc_send_fsm = CC_SEND_WRITE_UPDT_HEADER; 7505 break; 7506 } 7507 if(r_write_to_cc_send_brdcast_req.read()) 7508 { 7509 r_cc_send_fsm = CC_SEND_WRITE_BRDCAST_HEADER; 7510 break; 7511 } 7497 if (r_xram_rsp_to_cc_send_brdcast_req.read()) 7498 { 7499 r_cc_send_fsm = CC_SEND_XRAM_RSP_BRDCAST_HEADER; 7512 7500 break; 7513 7501 } 7514 /////////////////////////// 7515 case CC_SEND_READ_IDLE: 7516 { 7517 // WRITE 7518 if(r_write_to_cc_send_req.read()) 7519 { 7520 r_cc_send_fsm = CC_SEND_WRITE_NCC_INVAL_HEADER; 7521 break; 7522 } 7523 if(m_write_to_cc_send_inst_fifo.rok() or 7524 r_write_to_cc_send_multi_req.read()) 7525 { 7526 r_cc_send_fsm = CC_SEND_WRITE_UPDT_HEADER; 7527 break; 7528 } 7529 if(r_write_to_cc_send_brdcast_req.read()) 7530 { 7531 r_cc_send_fsm = CC_SEND_WRITE_BRDCAST_HEADER; 7532 break; 7533 } 7534 // CONFIG 7535 if(r_config_to_cc_send_multi_req.read()) 7536 { 7537 r_cc_send_fsm = CC_SEND_CONFIG_INVAL_HEADER; 7538 break; 7539 } 7540 if(r_config_to_cc_send_brdcast_req.read()) 7541 { 7542 r_cc_send_fsm = CC_SEND_CONFIG_BRDCAST_HEADER; 7543 break; 7544 } 7545 // XRAM_RSP 7546 if(m_xram_rsp_to_cc_send_inst_fifo.rok() or 7547 r_xram_rsp_to_cc_send_multi_req.read()) 7548 { 7549 r_cc_send_fsm = CC_SEND_XRAM_RSP_INVAL_HEADER; 7550 break; 7551 } 7552 if(r_xram_rsp_to_cc_send_brdcast_req.read()) 7553 { 7554 r_cc_send_fsm = CC_SEND_XRAM_RSP_BRDCAST_HEADER; 7555 break; 7556 } 7557 // CAS 7558 if(m_cas_to_cc_send_inst_fifo.rok() or 7559 r_cas_to_cc_send_multi_req.read()) 7560 { 7561 r_cc_send_fsm = CC_SEND_CAS_UPDT_HEADER; 7562 break; 7563 } 7564 if(r_cas_to_cc_send_brdcast_req.read()) 7565 { 7566 r_cc_send_fsm = CC_SEND_CAS_BRDCAST_HEADER; 7567 break; 7568 } 7569 // READ 7570 if(r_read_to_cc_send_req.read()) 7571 { 7572 r_cc_send_fsm = CC_SEND_READ_NCC_INVAL_HEADER; 7573 break; 7574 } 7575 break; 7576 7577 } 7578 /////////////////////////// 7579 case CC_SEND_XRAM_RSP_IDLE: // CAS FSM has highest priority 7580 { 7581 // CAS 7582 if(m_cas_to_cc_send_inst_fifo.rok() or 7583 r_cas_to_cc_send_multi_req.read()) 7584 { 7585 r_cc_send_fsm = CC_SEND_CAS_UPDT_HEADER; 7586 break; 7587 } 7588 if(r_cas_to_cc_send_brdcast_req.read()) 7589 { 7590 r_cc_send_fsm = CC_SEND_CAS_BRDCAST_HEADER; 7591 break; 7592 } 7593 7594 // READ 7595 if(r_read_to_cc_send_req.read()) 7596 { 7597 r_cc_send_fsm = CC_SEND_READ_NCC_INVAL_HEADER; 7598 break; 7599 } 7600 7601 // WRITE 7602 if(r_write_to_cc_send_req.read()) 7603 { 7604 r_cc_send_fsm = CC_SEND_WRITE_NCC_INVAL_HEADER; 7605 break; 7606 } 7607 if(m_write_to_cc_send_inst_fifo.rok() or 7608 r_write_to_cc_send_multi_req.read()) 7609 { 7610 r_cc_send_fsm = CC_SEND_WRITE_UPDT_HEADER; 7611 break; 7612 } 7613 7614 if(r_write_to_cc_send_brdcast_req.read()) 7615 { 7616 r_cc_send_fsm = CC_SEND_WRITE_BRDCAST_HEADER; 7617 break; 7618 } 7619 // CONFIG 7620 if(r_config_to_cc_send_multi_req.read()) 7621 { 7622 r_cc_send_fsm = CC_SEND_CONFIG_INVAL_HEADER; 7623 break; 7624 } 7625 if(r_config_to_cc_send_brdcast_req.read()) 7626 { 7627 r_cc_send_fsm = CC_SEND_CONFIG_BRDCAST_HEADER; 7628 break; 7629 } 7630 // XRAM_RSP 7631 if(m_xram_rsp_to_cc_send_inst_fifo.rok() or 7632 r_xram_rsp_to_cc_send_multi_req.read()) 7633 { 7634 r_cc_send_fsm = CC_SEND_XRAM_RSP_INVAL_HEADER; 7635 break; 7636 } 7637 if(r_xram_rsp_to_cc_send_brdcast_req.read()) 7638 { 7639 r_cc_send_fsm = CC_SEND_XRAM_RSP_BRDCAST_HEADER; 7640 break; 7641 } 7642 break; 7643 } 7644 ////////////////////// 7645 case CC_SEND_CAS_IDLE: // READ FSM has highest priority 7646 { 7647 7648 // READ 7649 if(r_read_to_cc_send_req.read()) 7650 { 7651 r_cc_send_fsm = CC_SEND_READ_NCC_INVAL_HEADER; 7652 break; 7653 } 7654 // WRITE 7655 if(r_write_to_cc_send_req.read()) 7656 { 7657 r_cc_send_fsm = CC_SEND_WRITE_NCC_INVAL_HEADER; 7658 break; 7659 } 7660 7661 if(m_write_to_cc_send_inst_fifo.rok() or 7662 r_write_to_cc_send_multi_req.read()) 7663 { 7664 r_cc_send_fsm = CC_SEND_WRITE_UPDT_HEADER; 7665 break; 7666 } 7667 if(r_write_to_cc_send_brdcast_req.read()) 7668 { 7669 r_cc_send_fsm = CC_SEND_WRITE_BRDCAST_HEADER; 7670 break; 7671 } 7672 // CONFIG 7673 if(r_config_to_cc_send_multi_req.read()) 7674 { 7675 r_cc_send_fsm = CC_SEND_CONFIG_INVAL_HEADER; 7676 break; 7677 } 7678 if(r_config_to_cc_send_brdcast_req.read()) 7679 { 7680 r_cc_send_fsm = CC_SEND_CONFIG_BRDCAST_HEADER; 7681 break; 7682 } 7683 // XRAM RSP 7684 if(m_xram_rsp_to_cc_send_inst_fifo.rok() or 7685 r_xram_rsp_to_cc_send_multi_req.read()) 7686 { 7687 r_cc_send_fsm = CC_SEND_XRAM_RSP_INVAL_HEADER; 7688 break; 7689 } 7690 if(r_xram_rsp_to_cc_send_brdcast_req.read()) 7691 { 7692 r_cc_send_fsm = CC_SEND_XRAM_RSP_BRDCAST_HEADER; 7693 break; 7694 } 7695 // CAS 7696 if(m_cas_to_cc_send_inst_fifo.rok() or 7697 r_cas_to_cc_send_multi_req.read()) 7698 { 7699 r_cc_send_fsm = CC_SEND_CAS_UPDT_HEADER; 7700 break; 7701 } 7702 if(r_cas_to_cc_send_brdcast_req.read()) 7703 { 7704 r_cc_send_fsm = CC_SEND_CAS_BRDCAST_HEADER; 7705 break; 7706 } 7707 break; 7708 } 7709 ///////////////////////////////// 7710 case CC_SEND_CONFIG_INVAL_HEADER: // send first flit multi-inval (from CONFIG FSM) 7711 { 7712 if(m_config_to_cc_send_inst_fifo.rok()) 7713 { 7714 if(not p_dspin_m2p.read) break; 7715 7716 // <Activity Counters> 7717 if (is_local_req(m_config_to_cc_send_srcid_fifo.read())) 7718 { 7719 m_cpt_minval_local++; 7720 } 7721 else 7722 { 7723 m_cpt_minval_remote++; 7724 } 7725 // 2 flits for multi inval 7726 m_cpt_minval_cost += 2 * req_distance(m_config_to_cc_send_srcid_fifo.read()); 7727 // </Activity Counters> 7728 r_cc_send_fsm = CC_SEND_CONFIG_INVAL_NLINE; 7729 break; 7730 } 7731 if(r_config_to_cc_send_multi_req.read()) r_config_to_cc_send_multi_req = false; 7732 // <Activity Counters> 7733 m_cpt_minval++; 7734 // </Activity Counters> 7735 r_cc_send_fsm = CC_SEND_CONFIG_IDLE; 7736 break; 7737 } 7738 //////////////////////////////// 7739 case CC_SEND_CONFIG_INVAL_NLINE: // send second flit multi-inval (from CONFIG FSM) 7740 { 7741 if(not p_dspin_m2p.read) break; 7742 config_to_cc_send_fifo_get = true; 7743 r_cc_send_fsm = CC_SEND_CONFIG_INVAL_HEADER; 7744 7745 #if DEBUG_MEMC_CC_SEND 7746 if(m_debug) 7747 std::cout << " <MEMC " << name() 7748 << " CC_SEND_CONFIG_INVAL_NLINE> multi-inval for line " 7749 << std::hex << r_config_to_cc_send_nline.read() << std::endl; 7750 #endif 7751 break; 7752 } 7753 /////////////////////////////////// 7754 case CC_SEND_CONFIG_BRDCAST_HEADER: // send first flit BC-inval (from CONFIG FSM) 7755 { 7756 if(not p_dspin_m2p.read) break; 7757 r_cc_send_fsm = CC_SEND_CONFIG_BRDCAST_NLINE; 7758 break; 7759 } 7760 ////////////////////////////////// 7761 case CC_SEND_CONFIG_BRDCAST_NLINE: // send second flit BC-inval (from CONFIG FSM) 7762 { 7763 if(not p_dspin_m2p.read) break; 7764 // <Activity Counters> 7765 m_cpt_binval++; 7766 // </Activity Counters> 7767 r_config_to_cc_send_brdcast_req = false; 7768 r_cc_send_fsm = CC_SEND_CONFIG_IDLE; 7769 7770 #if DEBUG_MEMC_CC_SEND 7771 if(m_debug) 7772 std::cout << " <MEMC " << name() 7773 << " CC_SEND_CONFIG_BRDCAST_NLINE> BC-Inval for line " 7774 << std::hex << r_config_to_cc_send_nline.read() << std::endl; 7775 #endif 7776 break; 7777 } 7778 /////////////////////////////////// 7779 case CC_SEND_XRAM_RSP_INVAL_HEADER: // send first flit multi-inval (from XRAM_RSP FSM) 7780 { 7781 if(m_xram_rsp_to_cc_send_inst_fifo.rok()) 7782 { 7783 if(not p_dspin_m2p.read) break; 7784 // <Activity Counters> 7785 if (is_local_req(m_xram_rsp_to_cc_send_srcid_fifo.read())) 7786 { 7787 m_cpt_minval_local++; 7788 } 7789 else 7790 { 7791 m_cpt_minval_remote++; 7792 } 7793 // 2 flits for multi inval 7794 m_cpt_minval_cost += 2 * req_distance(m_xram_rsp_to_cc_send_srcid_fifo.read()); 7795 // </Activity Counters> 7796 r_cc_send_fsm = CC_SEND_XRAM_RSP_INVAL_NLINE; 7797 break; 7798 } 7799 if(r_xram_rsp_to_cc_send_multi_req.read()) r_xram_rsp_to_cc_send_multi_req = false; 7800 // <Activity Counters> 7801 m_cpt_minval++; 7802 // </Activity Counters> 7803 r_cc_send_fsm = CC_SEND_XRAM_RSP_IDLE; 7804 break; 7805 } 7806 ////////////////////////////////// 7807 case CC_SEND_XRAM_RSP_INVAL_NLINE: // send second flit multi-inval (from XRAM_RSP FSM) 7808 { 7809 if(not p_dspin_m2p.read) break; 7810 xram_rsp_to_cc_send_fifo_get = true; 7811 r_cc_send_fsm = CC_SEND_XRAM_RSP_INVAL_HEADER; 7812 7813 #if DEBUG_MEMC_CC_SEND 7814 if(m_debug) 7815 std::cout << " <MEMC " << name() 7816 << " CC_SEND_XRAM_RSP_INVAL_NLINE> Multicast-Inval for line " 7817 << std::hex << r_xram_rsp_to_cc_send_nline.read() << std::endl; 7818 #endif 7819 break; 7820 } 7821 ///////////////////////////////////// 7822 case CC_SEND_XRAM_RSP_BRDCAST_HEADER: // send first flit broadcast-inval (from XRAM_RSP FSM) 7823 { 7824 if(not p_dspin_m2p.read) break; 7825 r_cc_send_fsm = CC_SEND_XRAM_RSP_BRDCAST_NLINE; 7826 break; 7827 } 7828 //////////////////////////////////// 7829 case CC_SEND_XRAM_RSP_BRDCAST_NLINE: // send second flit broadcast-inval (from XRAM_RSP FSM) 7830 { 7831 if(not p_dspin_m2p.read) break; 7832 // <Activity Counters> 7833 m_cpt_binval++; 7834 // </Activity Counters> 7835 r_xram_rsp_to_cc_send_brdcast_req = false; 7836 r_cc_send_fsm = CC_SEND_XRAM_RSP_IDLE; 7837 7838 #if DEBUG_MEMC_CC_SEND 7839 if(m_debug) 7840 std::cout << " <MEMC " << name() 7841 << " CC_SEND_XRAM_RSP_BRDCAST_NLINE> BC-Inval for line " 7842 << std::hex << r_xram_rsp_to_cc_send_nline.read() << std::endl; 7843 #endif 7844 break; 7845 } 7846 7847 case CC_SEND_READ_NCC_INVAL_HEADER: 7848 { 7849 if(not p_dspin_m2p.read) break; 7850 7851 r_cc_send_fsm = CC_SEND_READ_NCC_INVAL_NLINE; 7852 break; 7853 } 7854 7855 case CC_SEND_READ_NCC_INVAL_NLINE: 7856 { 7857 if(not p_dspin_m2p.read) break; 7858 7859 r_read_to_cc_send_req = false; 7860 r_cc_send_fsm = CC_SEND_READ_IDLE; 7861 7862 #if DEBUG_MEMC_CC_SEND 7863 if(m_debug) 7864 { 7865 std::cout 7866 << " <MEMC " << name() 7867 << " CC_SEND_READ_NCC_INVAL_HEADER> Inval for line " 7868 << std::hex <<r_read_to_cc_send_nline.read() << std::dec 7869 << std::endl; 7870 } 7871 #endif 7872 break; 7873 } 7874 7875 7876 case CC_SEND_WRITE_NCC_INVAL_HEADER: 7877 { 7878 if(not p_dspin_m2p.read) break; 7879 7880 r_cc_send_fsm = CC_SEND_WRITE_NCC_INVAL_NLINE; 7881 break; 7882 } 7883 7884 case CC_SEND_WRITE_NCC_INVAL_NLINE: 7885 { 7886 if(not p_dspin_m2p.read) break; 7887 7888 r_write_to_cc_send_req = false; 7889 r_cc_send_fsm = CC_SEND_WRITE_IDLE; 7890 7891 #if DEBUG_MEMC_CC_SEND 7892 if(m_debug) 7893 { 7894 std::cout 7895 << " <MEMC " << name() 7896 << " CC_SEND_WRITE_NCC_INVAL_HEADER> Inval for line " 7897 << std::hex << r_write_to_cc_send_nline.read() << std::dec 7898 << std::endl; 7899 } 7900 #endif 7901 break; 7902 } 7903 7904 7905 ////////////////////////////////// 7906 case CC_SEND_WRITE_BRDCAST_HEADER: // send first flit broadcast-inval (from WRITE FSM) 7907 { 7908 if(not p_dspin_m2p.read) break; 7909 r_cc_send_fsm = CC_SEND_WRITE_BRDCAST_NLINE; 7910 break; 7911 } 7912 ///////////////////////////////// 7913 case CC_SEND_WRITE_BRDCAST_NLINE: // send second flit broadcast-inval (from WRITE FSM) 7914 { 7915 if(not p_dspin_m2p.read) break; 7916 7917 // <Activity Counters> 7918 m_cpt_binval++; 7919 // </Activity Counters> 7920 7921 r_write_to_cc_send_brdcast_req = false; 7922 r_cc_send_fsm = CC_SEND_WRITE_IDLE; 7923 7924 #if DEBUG_MEMC_CC_SEND 7925 if(m_debug) 7926 std::cout << " <MEMC " << name() 7927 << " CC_SEND_WRITE_BRDCAST_NLINE> BC-Inval for line " 7928 << std::hex << r_write_to_cc_send_nline.read() << std::endl; 7929 #endif 7930 break; 7931 } 7932 /////////////////////////////// 7933 case CC_SEND_WRITE_UPDT_HEADER: // send first flit for a multi-update (from WRITE FSM) 7934 { 7935 if(m_write_to_cc_send_inst_fifo.rok()) 7936 { 7937 if(not p_dspin_m2p.read) break; 7938 // <Activity Counters> 7939 if (is_local_req(m_write_to_cc_send_srcid_fifo.read())) 7940 { 7941 m_cpt_update_local++; 7942 } 7943 else 7944 { 7945 m_cpt_update_remote++; 7946 } 7947 // 2 flits for multi inval 7948 m_cpt_update_cost += 2 * req_distance(m_write_to_cc_send_srcid_fifo.read()); 7949 // </Activity Counters> 7950 7951 r_cc_send_fsm = CC_SEND_WRITE_UPDT_NLINE; 7952 break; 7953 } 7954 7955 if(r_write_to_cc_send_multi_req.read()) 7956 { 7957 r_write_to_cc_send_multi_req = false; 7958 } 7959 7960 // <Activity Counters> 7961 m_cpt_update++; 7962 // </Activity Counters> 7963 r_cc_send_fsm = CC_SEND_WRITE_IDLE; 7964 break; 7965 } 7966 ////////////////////////////// 7967 case CC_SEND_WRITE_UPDT_NLINE: // send second flit for a multi-update (from WRITE FSM) 7968 { 7969 if(not p_dspin_m2p.read) break; 7970 7971 r_cc_send_cpt = 0; 7972 r_cc_send_fsm = CC_SEND_WRITE_UPDT_DATA; 7973 7974 #if DEBUG_MEMC_CC_SEND 7975 if(m_debug) 7976 std::cout << " <MEMC " << name() 7977 << " CC_SEND_WRITE_UPDT_NLINE> Multicast-Update for line " 7978 << r_write_to_cc_send_nline.read() << std::endl; 7979 #endif 7980 break; 7981 } 7982 ///////////////////////////// 7983 case CC_SEND_WRITE_UPDT_DATA: // send N data flits for a multi-update (from WRITE FSM) 7984 { 7985 if(not p_dspin_m2p.read) break; 7986 if(r_cc_send_cpt.read() == r_write_to_cc_send_count.read()) 7987 { 7988 write_to_cc_send_fifo_get = true; 7989 r_cc_send_fsm = CC_SEND_WRITE_UPDT_HEADER; 7990 break; 7991 } 7992 7993 r_cc_send_cpt = r_cc_send_cpt.read() + 1; 7994 break; 7995 } 7996 //////////////////////////////// 7997 case CC_SEND_CAS_BRDCAST_HEADER: // send first flit broadcast-inval (from CAS FSM) 7998 { 7999 if(not p_dspin_m2p.read) break; 8000 r_cc_send_fsm = CC_SEND_CAS_BRDCAST_NLINE; 8001 break; 8002 } 8003 /////////////////////////////// 8004 case CC_SEND_CAS_BRDCAST_NLINE: // send second flit broadcast-inval (from CAS FSM) 8005 { 8006 if(not p_dspin_m2p.read) break; 8007 // <Activity Counters> 8008 m_cpt_binval++; 8009 // </Activity Counters> 8010 8011 r_cas_to_cc_send_brdcast_req = false; 8012 r_cc_send_fsm = CC_SEND_CAS_IDLE; 8013 8014 #if DEBUG_MEMC_CC_SEND 8015 if(m_debug) 8016 std::cout << " <MEMC " << name() 8017 << " CC_SEND_CAS_BRDCAST_NLINE> Broadcast-Inval for line " 8018 << r_cas_to_cc_send_nline.read() << std::endl; 8019 #endif 8020 break; 8021 } 8022 ///////////////////////////// 8023 case CC_SEND_CAS_UPDT_HEADER: // send first flit for a multi-update (from CAS FSM) 8024 { 8025 if(m_cas_to_cc_send_inst_fifo.rok()) 8026 { 8027 if(not p_dspin_m2p.read) break; 8028 // <Activity Counters> 8029 if (is_local_req(m_cas_to_cc_send_srcid_fifo.read())) 8030 { 8031 m_cpt_update_local++; 8032 } 8033 else 8034 { 8035 m_cpt_update_remote++; 8036 } 8037 // 2 flits for multi inval 8038 m_cpt_update_cost += 2 * req_distance(m_cas_to_cc_send_srcid_fifo.read()); 8039 // </Activity Counters> 8040 r_cc_send_fsm = CC_SEND_CAS_UPDT_NLINE; 8041 break; 8042 } 8043 8044 // no more packets to send for the multi-update 8045 if(r_cas_to_cc_send_multi_req.read()) 8046 { 8047 r_cas_to_cc_send_multi_req = false; 8048 } 8049 8050 // <Activity Counters> 8051 m_cpt_update++; 8052 // </Activity Counters> 8053 r_cc_send_fsm = CC_SEND_CAS_IDLE; 8054 break; 8055 } 8056 //////////////////////////// 8057 case CC_SEND_CAS_UPDT_NLINE: // send second flit for a multi-update (from CAS FSM) 8058 { 8059 if(not p_dspin_m2p.read) break; 8060 r_cc_send_cpt = 0; 8061 r_cc_send_fsm = CC_SEND_CAS_UPDT_DATA; 8062 8063 #if DEBUG_MEMC_CC_SEND 8064 if(m_debug) 8065 std::cout << " <MEMC " << name() 8066 << " CC_SEND_CAS_UPDT_NLINE> Multicast-Update for line " 8067 << r_cas_to_cc_send_nline.read() << std::endl; 8068 #endif 8069 break; 8070 } 8071 /////////////////////////// 8072 case CC_SEND_CAS_UPDT_DATA: // send first data for a multi-update (from CAS FSM) 8073 { 8074 if(not p_dspin_m2p.read) break; 8075 8076 if(r_cas_to_cc_send_is_long.read()) 8077 { 8078 r_cc_send_fsm = CC_SEND_CAS_UPDT_DATA_HIGH; 8079 break; 8080 } 8081 8082 cas_to_cc_send_fifo_get = true; 7502 // CAS 7503 if (m_cas_to_cc_send_inst_fifo.rok() or 7504 r_cas_to_cc_send_multi_req.read()) 7505 { 8083 7506 r_cc_send_fsm = CC_SEND_CAS_UPDT_HEADER; 8084 7507 break; 8085 7508 } 8086 //////////////////////////////// 8087 case CC_SEND_CAS_UPDT_DATA_HIGH: // send second data for a multi-update (from CAS FSM) 8088 { 8089 if(not p_dspin_m2p.read) break; 8090 cas_to_cc_send_fifo_get = true; 7509 if (r_cas_to_cc_send_brdcast_req.read()) 7510 { 7511 r_cc_send_fsm = CC_SEND_CAS_BRDCAST_HEADER; 7512 break; 7513 } 7514 7515 // READ 7516 if (r_read_to_cc_send_req.read()) 7517 { 7518 r_cc_send_fsm = CC_SEND_READ_NCC_INVAL_HEADER; 7519 break; 7520 } 7521 7522 // WRITE 7523 if (r_write_to_cc_send_req.read()) 7524 { 7525 r_cc_send_fsm = CC_SEND_WRITE_NCC_INVAL_HEADER; 7526 break; 7527 } 7528 7529 if (m_write_to_cc_send_inst_fifo.rok() or 7530 r_write_to_cc_send_multi_req.read()) 7531 { 7532 r_cc_send_fsm = CC_SEND_WRITE_UPDT_HEADER; 7533 break; 7534 } 7535 if (r_write_to_cc_send_brdcast_req.read()) 7536 { 7537 r_cc_send_fsm = CC_SEND_WRITE_BRDCAST_HEADER; 7538 break; 7539 } 7540 // CONFIG 7541 if (r_config_to_cc_send_multi_req.read()) 7542 { 7543 r_cc_send_fsm = CC_SEND_CONFIG_INVAL_HEADER; 7544 break; 7545 } 7546 if (r_config_to_cc_send_brdcast_req.read()) 7547 { 7548 r_cc_send_fsm = CC_SEND_CONFIG_BRDCAST_HEADER; 7549 break; 7550 } 7551 break; 7552 } 7553 //////////////////////// 7554 case CC_SEND_WRITE_IDLE: // CONFIG FSM has highest priority 7555 { 7556 // CONFIG 7557 if (r_config_to_cc_send_multi_req.read()) 7558 { 7559 r_cc_send_fsm = CC_SEND_CONFIG_INVAL_HEADER; 7560 break; 7561 } 7562 if (r_config_to_cc_send_brdcast_req.read()) 7563 { 7564 r_cc_send_fsm = CC_SEND_CONFIG_BRDCAST_HEADER; 7565 break; 7566 } 7567 // XRAM_RSP 7568 if (m_xram_rsp_to_cc_send_inst_fifo.rok() or 7569 r_xram_rsp_to_cc_send_multi_req.read()) 7570 { 7571 r_cc_send_fsm = CC_SEND_XRAM_RSP_INVAL_HEADER; 7572 break; 7573 } 7574 if (r_xram_rsp_to_cc_send_brdcast_req.read()) 7575 { 7576 r_cc_send_fsm = CC_SEND_XRAM_RSP_BRDCAST_HEADER; 7577 break; 7578 } 7579 // CAS 7580 if (m_cas_to_cc_send_inst_fifo.rok() or 7581 r_cas_to_cc_send_multi_req.read()) 7582 { 8091 7583 r_cc_send_fsm = CC_SEND_CAS_UPDT_HEADER; 8092 7584 break; 8093 7585 } 7586 if (r_cas_to_cc_send_brdcast_req.read()) 7587 { 7588 r_cc_send_fsm = CC_SEND_CAS_BRDCAST_HEADER; 7589 break; 7590 } 7591 // READ 7592 if (r_read_to_cc_send_req.read()) 7593 { 7594 r_cc_send_fsm = CC_SEND_READ_NCC_INVAL_HEADER; 7595 break; 7596 } 7597 // WRITE 7598 if (r_write_to_cc_send_req.read()) 7599 { 7600 r_cc_send_fsm = CC_SEND_WRITE_NCC_INVAL_HEADER; 7601 break; 7602 } 7603 if (m_write_to_cc_send_inst_fifo.rok() or 7604 r_write_to_cc_send_multi_req.read()) 7605 { 7606 r_cc_send_fsm = CC_SEND_WRITE_UPDT_HEADER; 7607 break; 7608 } 7609 if (r_write_to_cc_send_brdcast_req.read()) 7610 { 7611 r_cc_send_fsm = CC_SEND_WRITE_BRDCAST_HEADER; 7612 break; 7613 } 7614 break; 7615 } 7616 /////////////////////////// 7617 case CC_SEND_READ_IDLE: 7618 { 7619 // WRITE 7620 if (r_write_to_cc_send_req.read()) 7621 { 7622 r_cc_send_fsm = CC_SEND_WRITE_NCC_INVAL_HEADER; 7623 break; 7624 } 7625 if (m_write_to_cc_send_inst_fifo.rok() or 7626 r_write_to_cc_send_multi_req.read()) 7627 { 7628 r_cc_send_fsm = CC_SEND_WRITE_UPDT_HEADER; 7629 break; 7630 } 7631 if (r_write_to_cc_send_brdcast_req.read()) 7632 { 7633 r_cc_send_fsm = CC_SEND_WRITE_BRDCAST_HEADER; 7634 break; 7635 } 7636 // CONFIG 7637 if (r_config_to_cc_send_multi_req.read()) 7638 { 7639 r_cc_send_fsm = CC_SEND_CONFIG_INVAL_HEADER; 7640 break; 7641 } 7642 if (r_config_to_cc_send_brdcast_req.read()) 7643 { 7644 r_cc_send_fsm = CC_SEND_CONFIG_BRDCAST_HEADER; 7645 break; 7646 } 7647 // XRAM_RSP 7648 if (m_xram_rsp_to_cc_send_inst_fifo.rok() or 7649 r_xram_rsp_to_cc_send_multi_req.read()) 7650 { 7651 r_cc_send_fsm = CC_SEND_XRAM_RSP_INVAL_HEADER; 7652 break; 7653 } 7654 if (r_xram_rsp_to_cc_send_brdcast_req.read()) 7655 { 7656 r_cc_send_fsm = CC_SEND_XRAM_RSP_BRDCAST_HEADER; 7657 break; 7658 } 7659 // CAS 7660 if (m_cas_to_cc_send_inst_fifo.rok() or 7661 r_cas_to_cc_send_multi_req.read()) 7662 { 7663 r_cc_send_fsm = CC_SEND_CAS_UPDT_HEADER; 7664 break; 7665 } 7666 if (r_cas_to_cc_send_brdcast_req.read()) 7667 { 7668 r_cc_send_fsm = CC_SEND_CAS_BRDCAST_HEADER; 7669 break; 7670 } 7671 // READ 7672 if (r_read_to_cc_send_req.read()) 7673 { 7674 r_cc_send_fsm = CC_SEND_READ_NCC_INVAL_HEADER; 7675 break; 7676 } 7677 break; 7678 } 7679 /////////////////////////// 7680 case CC_SEND_XRAM_RSP_IDLE: // CAS FSM has highest priority 7681 { 7682 // CAS 7683 if (m_cas_to_cc_send_inst_fifo.rok() or 7684 r_cas_to_cc_send_multi_req.read()) 7685 { 7686 r_cc_send_fsm = CC_SEND_CAS_UPDT_HEADER; 7687 break; 7688 } 7689 if (r_cas_to_cc_send_brdcast_req.read()) 7690 { 7691 r_cc_send_fsm = CC_SEND_CAS_BRDCAST_HEADER; 7692 break; 7693 } 7694 // READ 7695 if (r_read_to_cc_send_req.read()) 7696 { 7697 r_cc_send_fsm = CC_SEND_READ_NCC_INVAL_HEADER; 7698 break; 7699 } 7700 // WRITE 7701 if (r_write_to_cc_send_req.read()) 7702 { 7703 r_cc_send_fsm = CC_SEND_WRITE_NCC_INVAL_HEADER; 7704 break; 7705 } 7706 if (m_write_to_cc_send_inst_fifo.rok() or 7707 r_write_to_cc_send_multi_req.read()) 7708 { 7709 r_cc_send_fsm = CC_SEND_WRITE_UPDT_HEADER; 7710 break; 7711 } 7712 7713 if (r_write_to_cc_send_brdcast_req.read()) 7714 { 7715 r_cc_send_fsm = CC_SEND_WRITE_BRDCAST_HEADER; 7716 break; 7717 } 7718 // CONFIG 7719 if (r_config_to_cc_send_multi_req.read()) 7720 { 7721 r_cc_send_fsm = CC_SEND_CONFIG_INVAL_HEADER; 7722 break; 7723 } 7724 if (r_config_to_cc_send_brdcast_req.read()) 7725 { 7726 r_cc_send_fsm = CC_SEND_CONFIG_BRDCAST_HEADER; 7727 break; 7728 } 7729 // XRAM_RSP 7730 if (m_xram_rsp_to_cc_send_inst_fifo.rok() or 7731 r_xram_rsp_to_cc_send_multi_req.read()) 7732 { 7733 r_cc_send_fsm = CC_SEND_XRAM_RSP_INVAL_HEADER; 7734 break; 7735 } 7736 if (r_xram_rsp_to_cc_send_brdcast_req.read()) 7737 { 7738 r_cc_send_fsm = CC_SEND_XRAM_RSP_BRDCAST_HEADER; 7739 break; 7740 } 7741 break; 7742 } 7743 ////////////////////// 7744 case CC_SEND_CAS_IDLE: // READ FSM has highest priority 7745 { 7746 // READ 7747 if (r_read_to_cc_send_req.read()) 7748 { 7749 r_cc_send_fsm = CC_SEND_READ_NCC_INVAL_HEADER; 7750 break; 7751 } 7752 // WRITE 7753 if (r_write_to_cc_send_req.read()) 7754 { 7755 r_cc_send_fsm = CC_SEND_WRITE_NCC_INVAL_HEADER; 7756 break; 7757 } 7758 7759 if (m_write_to_cc_send_inst_fifo.rok() or 7760 r_write_to_cc_send_multi_req.read()) 7761 { 7762 r_cc_send_fsm = CC_SEND_WRITE_UPDT_HEADER; 7763 break; 7764 } 7765 if (r_write_to_cc_send_brdcast_req.read()) 7766 { 7767 r_cc_send_fsm = CC_SEND_WRITE_BRDCAST_HEADER; 7768 break; 7769 } 7770 // CONFIG 7771 if (r_config_to_cc_send_multi_req.read()) 7772 { 7773 r_cc_send_fsm = CC_SEND_CONFIG_INVAL_HEADER; 7774 break; 7775 } 7776 if (r_config_to_cc_send_brdcast_req.read()) 7777 { 7778 r_cc_send_fsm = CC_SEND_CONFIG_BRDCAST_HEADER; 7779 break; 7780 } 7781 // XRAM RSP 7782 if (m_xram_rsp_to_cc_send_inst_fifo.rok() or 7783 r_xram_rsp_to_cc_send_multi_req.read()) 7784 { 7785 r_cc_send_fsm = CC_SEND_XRAM_RSP_INVAL_HEADER; 7786 break; 7787 } 7788 if (r_xram_rsp_to_cc_send_brdcast_req.read()) 7789 { 7790 r_cc_send_fsm = CC_SEND_XRAM_RSP_BRDCAST_HEADER; 7791 break; 7792 } 7793 // CAS 7794 if (m_cas_to_cc_send_inst_fifo.rok() or 7795 r_cas_to_cc_send_multi_req.read()) 7796 { 7797 r_cc_send_fsm = CC_SEND_CAS_UPDT_HEADER; 7798 break; 7799 } 7800 if (r_cas_to_cc_send_brdcast_req.read()) 7801 { 7802 r_cc_send_fsm = CC_SEND_CAS_BRDCAST_HEADER; 7803 break; 7804 } 7805 break; 7806 } 7807 ///////////////////////////////// 7808 case CC_SEND_CONFIG_INVAL_HEADER: // send first flit multi-inval (from CONFIG FSM) 7809 { 7810 if (m_config_to_cc_send_inst_fifo.rok()) 7811 { 7812 if (not p_dspin_m2p.read) break; 7813 // <Activity Counters> 7814 if (is_local_req(m_config_to_cc_send_srcid_fifo.read())) 7815 { 7816 m_cpt_minval_local++; 7817 } 7818 else 7819 { 7820 m_cpt_minval_remote++; 7821 } 7822 // 2 flits for multi inval 7823 m_cpt_minval_cost += 2 * req_distance(m_config_to_cc_send_srcid_fifo.read()); 7824 // </Activity Counters> 7825 r_cc_send_fsm = CC_SEND_CONFIG_INVAL_NLINE; 7826 break; 7827 } 7828 if (r_config_to_cc_send_multi_req.read()) r_config_to_cc_send_multi_req = false; 7829 // <Activity Counters> 7830 m_cpt_minval++; 7831 // </Activity Counters> 7832 r_cc_send_fsm = CC_SEND_CONFIG_IDLE; 7833 break; 7834 } 7835 //////////////////////////////// 7836 case CC_SEND_CONFIG_INVAL_NLINE: // send second flit multi-inval (from CONFIG FSM) 7837 { 7838 if (not p_dspin_m2p.read) break; 7839 config_to_cc_send_fifo_get = true; 7840 r_cc_send_fsm = CC_SEND_CONFIG_INVAL_HEADER; 7841 7842 #if DEBUG_MEMC_CC_SEND 7843 if (m_debug) 7844 { 7845 std::cout << " <MEMC " << name() 7846 << " CC_SEND_CONFIG_INVAL_NLINE> multi-inval for line " 7847 << std::hex << r_config_to_cc_send_nline.read() << std::endl; 7848 } 7849 #endif 7850 break; 7851 } 7852 /////////////////////////////////// 7853 case CC_SEND_CONFIG_BRDCAST_HEADER: // send first flit BC-inval (from CONFIG FSM) 7854 { 7855 if (not p_dspin_m2p.read) break; 7856 r_cc_send_fsm = CC_SEND_CONFIG_BRDCAST_NLINE; 7857 break; 7858 } 7859 ////////////////////////////////// 7860 case CC_SEND_CONFIG_BRDCAST_NLINE: // send second flit BC-inval (from CONFIG FSM) 7861 { 7862 if (not p_dspin_m2p.read) break; 7863 // <Activity Counters> 7864 m_cpt_binval++; 7865 // </Activity Counters> 7866 r_config_to_cc_send_brdcast_req = false; 7867 r_cc_send_fsm = CC_SEND_CONFIG_IDLE; 7868 7869 #if DEBUG_MEMC_CC_SEND 7870 if (m_debug) 7871 std::cout << " <MEMC " << name() 7872 << " CC_SEND_CONFIG_BRDCAST_NLINE> BC-Inval for line " 7873 << std::hex << r_config_to_cc_send_nline.read() << std::endl; 7874 #endif 7875 break; 7876 } 7877 /////////////////////////////////// 7878 case CC_SEND_XRAM_RSP_INVAL_HEADER: // send first flit multi-inval (from XRAM_RSP FSM) 7879 { 7880 if (m_xram_rsp_to_cc_send_inst_fifo.rok()) 7881 { 7882 if (not p_dspin_m2p.read) break; 7883 // <Activity Counters> 7884 if (is_local_req(m_xram_rsp_to_cc_send_srcid_fifo.read())) 7885 { 7886 m_cpt_minval_local++; 7887 } 7888 else 7889 { 7890 m_cpt_minval_remote++; 7891 } 7892 // 2 flits for multi inval 7893 m_cpt_minval_cost += 2 * req_distance(m_xram_rsp_to_cc_send_srcid_fifo.read()); 7894 // </Activity Counters> 7895 r_cc_send_fsm = CC_SEND_XRAM_RSP_INVAL_NLINE; 7896 break; 7897 } 7898 if (r_xram_rsp_to_cc_send_multi_req.read()) r_xram_rsp_to_cc_send_multi_req = false; 7899 // <Activity Counters> 7900 m_cpt_minval++; 7901 // </Activity Counters> 7902 r_cc_send_fsm = CC_SEND_XRAM_RSP_IDLE; 7903 break; 7904 } 7905 ////////////////////////////////// 7906 case CC_SEND_XRAM_RSP_INVAL_NLINE: // send second flit multi-inval (from XRAM_RSP FSM) 7907 { 7908 if (not p_dspin_m2p.read) break; 7909 xram_rsp_to_cc_send_fifo_get = true; 7910 r_cc_send_fsm = CC_SEND_XRAM_RSP_INVAL_HEADER; 7911 7912 #if DEBUG_MEMC_CC_SEND 7913 if (m_debug) 7914 { 7915 std::cout << " <MEMC " << name() 7916 << " CC_SEND_XRAM_RSP_INVAL_NLINE> Multicast-Inval for line " 7917 << std::hex << r_xram_rsp_to_cc_send_nline.read() << std::endl; 7918 } 7919 #endif 7920 break; 7921 } 7922 ///////////////////////////////////// 7923 case CC_SEND_XRAM_RSP_BRDCAST_HEADER: // send first flit broadcast-inval (from XRAM_RSP FSM) 7924 { 7925 if (not p_dspin_m2p.read) break; 7926 r_cc_send_fsm = CC_SEND_XRAM_RSP_BRDCAST_NLINE; 7927 break; 7928 } 7929 //////////////////////////////////// 7930 case CC_SEND_XRAM_RSP_BRDCAST_NLINE: // send second flit broadcast-inval (from XRAM_RSP FSM) 7931 { 7932 if (not p_dspin_m2p.read) break; 7933 // <Activity Counters> 7934 m_cpt_binval++; 7935 // </Activity Counters> 7936 r_xram_rsp_to_cc_send_brdcast_req = false; 7937 r_cc_send_fsm = CC_SEND_XRAM_RSP_IDLE; 7938 7939 #if DEBUG_MEMC_CC_SEND 7940 if (m_debug) 7941 { 7942 std::cout << " <MEMC " << name() 7943 << " CC_SEND_XRAM_RSP_BRDCAST_NLINE> BC-Inval for line " 7944 << std::hex << r_xram_rsp_to_cc_send_nline.read() << std::endl; 7945 } 7946 #endif 7947 break; 7948 } 7949 //////////////////////////////////// 7950 case CC_SEND_READ_NCC_INVAL_HEADER: 7951 { 7952 if (not p_dspin_m2p.read) break; 7953 r_cc_send_fsm = CC_SEND_READ_NCC_INVAL_NLINE; 7954 break; 7955 } 7956 //////////////////////////////////// 7957 case CC_SEND_READ_NCC_INVAL_NLINE: 7958 { 7959 if (not p_dspin_m2p.read) break; 7960 r_read_to_cc_send_req = false; 7961 r_cc_send_fsm = CC_SEND_READ_IDLE; 7962 7963 #if DEBUG_MEMC_CC_SEND 7964 if (m_debug) 7965 { 7966 std::cout 7967 << " <MEMC " << name() 7968 << " CC_SEND_READ_NCC_INVAL_HEADER> Inval for line " 7969 << std::hex <<r_read_to_cc_send_nline.read() << std::dec 7970 << std::endl; 7971 } 7972 #endif 7973 break; 7974 } 7975 //////////////////////////////////// 7976 case CC_SEND_WRITE_NCC_INVAL_HEADER: 7977 { 7978 if (not p_dspin_m2p.read) break; 7979 r_cc_send_fsm = CC_SEND_WRITE_NCC_INVAL_NLINE; 7980 break; 7981 } 7982 //////////////////////////////////// 7983 case CC_SEND_WRITE_NCC_INVAL_NLINE: 7984 { 7985 if (not p_dspin_m2p.read) break; 7986 r_write_to_cc_send_req = false; 7987 r_cc_send_fsm = CC_SEND_WRITE_IDLE; 7988 7989 #if DEBUG_MEMC_CC_SEND 7990 if (m_debug) 7991 { 7992 std::cout 7993 << " <MEMC " << name() 7994 << " CC_SEND_WRITE_NCC_INVAL_HEADER> Inval for line " 7995 << std::hex << r_write_to_cc_send_nline.read() << std::dec 7996 << std::endl; 7997 } 7998 #endif 7999 break; 8000 } 8001 ////////////////////////////////// 8002 case CC_SEND_WRITE_BRDCAST_HEADER: // send first flit broadcast-inval (from WRITE FSM) 8003 { 8004 if (not p_dspin_m2p.read) break; 8005 r_cc_send_fsm = CC_SEND_WRITE_BRDCAST_NLINE; 8006 break; 8007 } 8008 ///////////////////////////////// 8009 case CC_SEND_WRITE_BRDCAST_NLINE: // send second flit broadcast-inval (from WRITE FSM) 8010 { 8011 if (not p_dspin_m2p.read) break; 8012 8013 // <Activity Counters> 8014 m_cpt_binval++; 8015 // </Activity Counters> 8016 8017 r_write_to_cc_send_brdcast_req = false; 8018 r_cc_send_fsm = CC_SEND_WRITE_IDLE; 8019 8020 #if DEBUG_MEMC_CC_SEND 8021 if (m_debug) 8022 { 8023 std::cout << " <MEMC " << name() 8024 << " CC_SEND_WRITE_BRDCAST_NLINE> BC-Inval for line " 8025 << std::hex << r_write_to_cc_send_nline.read() << std::endl; 8026 } 8027 #endif 8028 break; 8029 } 8030 /////////////////////////////// 8031 case CC_SEND_WRITE_UPDT_HEADER: // send first flit for a multi-update (from WRITE FSM) 8032 { 8033 if (m_write_to_cc_send_inst_fifo.rok()) 8034 { 8035 if (not p_dspin_m2p.read) break; 8036 // <Activity Counters> 8037 if (is_local_req(m_write_to_cc_send_srcid_fifo.read())) 8038 { 8039 m_cpt_update_local++; 8040 } 8041 else 8042 { 8043 m_cpt_update_remote++; 8044 } 8045 // 2 flits for multi update 8046 m_cpt_update_cost += 2 * req_distance(m_write_to_cc_send_srcid_fifo.read()); 8047 // </Activity Counters> 8048 8049 r_cc_send_fsm = CC_SEND_WRITE_UPDT_NLINE; 8050 break; 8051 } 8052 8053 if (r_write_to_cc_send_multi_req.read()) 8054 { 8055 r_write_to_cc_send_multi_req = false; 8056 } 8057 8058 // <Activity Counters> 8059 m_cpt_update++; 8060 // </Activity Counters> 8061 r_cc_send_fsm = CC_SEND_WRITE_IDLE; 8062 break; 8063 } 8064 ////////////////////////////// 8065 case CC_SEND_WRITE_UPDT_NLINE: // send second flit for a multi-update (from WRITE FSM) 8066 { 8067 if (not p_dspin_m2p.read) break; 8068 8069 r_cc_send_cpt = 0; 8070 r_cc_send_fsm = CC_SEND_WRITE_UPDT_DATA; 8071 8072 #if DEBUG_MEMC_CC_SEND 8073 if (m_debug) 8074 { 8075 std::cout << " <MEMC " << name() 8076 << " CC_SEND_WRITE_UPDT_NLINE> Multicast-Update for address " << std::hex 8077 << r_write_to_cc_send_nline.read() * m_words * 4 << std::dec << std::endl; 8078 } 8079 #endif 8080 break; 8081 } 8082 ///////////////////////////// 8083 case CC_SEND_WRITE_UPDT_DATA: // send data flits for multi-update (from WRITE FSM) 8084 { 8085 if (not p_dspin_m2p.read) break; 8086 if (r_cc_send_cpt.read() == r_write_to_cc_send_count.read()) 8087 { 8088 write_to_cc_send_fifo_get = true; 8089 r_cc_send_fsm = CC_SEND_WRITE_UPDT_HEADER; 8090 break; 8091 } 8092 8093 r_cc_send_cpt = r_cc_send_cpt.read() + 1; 8094 break; 8095 } 8096 //////////////////////////////// 8097 case CC_SEND_CAS_BRDCAST_HEADER: // send first flit broadcast-inval (from CAS FSM) 8098 { 8099 if (not p_dspin_m2p.read) break; 8100 r_cc_send_fsm = CC_SEND_CAS_BRDCAST_NLINE; 8101 break; 8102 } 8103 /////////////////////////////// 8104 case CC_SEND_CAS_BRDCAST_NLINE: // send second flit broadcast-inval (from CAS FSM) 8105 { 8106 if (not p_dspin_m2p.read) break; 8107 // <Activity Counters> 8108 m_cpt_binval++; 8109 // </Activity Counters> 8110 8111 r_cas_to_cc_send_brdcast_req = false; 8112 r_cc_send_fsm = CC_SEND_CAS_IDLE; 8113 8114 #if DEBUG_MEMC_CC_SEND 8115 if (m_debug) 8116 { 8117 std::cout << " <MEMC " << name() 8118 << " CC_SEND_CAS_BRDCAST_NLINE> Broadcast-Inval for address: " << std::hex 8119 << r_cas_to_cc_send_nline.read() * m_words * 4 << std::dec << std::endl; 8120 } 8121 #endif 8122 break; 8123 } 8124 ///////////////////////////// 8125 case CC_SEND_CAS_UPDT_HEADER: // send first flit for a multi-update (from CAS FSM) 8126 { 8127 if (m_cas_to_cc_send_inst_fifo.rok()) 8128 { 8129 if (not p_dspin_m2p.read) break; 8130 // <Activity Counters> 8131 if (is_local_req(m_cas_to_cc_send_srcid_fifo.read())) 8132 { 8133 m_cpt_update_local++; 8134 } 8135 else 8136 { 8137 m_cpt_update_remote++; 8138 } 8139 // 2 flits for multi update 8140 m_cpt_update_cost += 2 * req_distance(m_cas_to_cc_send_srcid_fifo.read()); 8141 // </Activity Counters> 8142 r_cc_send_fsm = CC_SEND_CAS_UPDT_NLINE; 8143 break; 8144 } 8145 8146 // no more packets to send for the multi-update 8147 if (r_cas_to_cc_send_multi_req.read()) 8148 { 8149 r_cas_to_cc_send_multi_req = false; 8150 } 8151 8152 // <Activity Counters> 8153 m_cpt_update++; 8154 // </Activity Counters> 8155 r_cc_send_fsm = CC_SEND_CAS_IDLE; 8156 break; 8157 } 8158 //////////////////////////// 8159 case CC_SEND_CAS_UPDT_NLINE: // send second flit for a multi-update (from CAS FSM) 8160 { 8161 if (not p_dspin_m2p.read) break; 8162 r_cc_send_cpt = 0; 8163 r_cc_send_fsm = CC_SEND_CAS_UPDT_DATA; 8164 8165 #if DEBUG_MEMC_CC_SEND 8166 if (m_debug) 8167 { 8168 std::cout << " <MEMC " << name() 8169 << " CC_SEND_CAS_UPDT_NLINE> Multicast-Update for address " << std::hex 8170 << r_cas_to_cc_send_nline.read() * m_words * 4 << std::dec << std::endl; 8171 } 8172 #endif 8173 break; 8174 } 8175 /////////////////////////// 8176 case CC_SEND_CAS_UPDT_DATA: // send first data for a multi-update (from CAS FSM) 8177 { 8178 if (not p_dspin_m2p.read) break; 8179 8180 if (r_cas_to_cc_send_is_long.read()) 8181 { 8182 r_cc_send_fsm = CC_SEND_CAS_UPDT_DATA_HIGH; 8183 break; 8184 } 8185 8186 cas_to_cc_send_fifo_get = true; 8187 r_cc_send_fsm = CC_SEND_CAS_UPDT_HEADER; 8188 break; 8189 } 8190 //////////////////////////////// 8191 case CC_SEND_CAS_UPDT_DATA_HIGH: // send second data for multi-update (from CAS FSM) 8192 { 8193 if (not p_dspin_m2p.read) break; 8194 cas_to_cc_send_fifo_get = true; 8195 r_cc_send_fsm = CC_SEND_CAS_UPDT_HEADER; 8196 break; 8197 } 8094 8198 } 8095 8199 // end switch r_cc_send_fsm … … 8102 8206 ////////////////////////////////////////////////////////////////////////////// 8103 8207 8104 switch (r_cc_receive_fsm.read())8208 switch (r_cc_receive_fsm.read()) 8105 8209 { 8106 8210 ///////////////////// 8107 8211 case CC_RECEIVE_IDLE: 8108 { 8109 if(not p_dspin_p2m.write) break; 8110 8111 uint8_t type = 8112 DspinRwtParam::dspin_get( 8113 p_dspin_p2m.data.read(), 8114 DspinRwtParam::P2M_TYPE); 8115 8116 if((type == DspinRwtParam::TYPE_CLEANUP_DATA) or 8117 (type == DspinRwtParam::TYPE_CLEANUP_INST)) 8118 { 8119 r_cc_receive_fsm = CC_RECEIVE_CLEANUP; 8120 break; 8121 } 8122 8123 if(type == DspinRwtParam::TYPE_MULTI_ACK) 8124 { 8125 r_cc_receive_fsm = CC_RECEIVE_MULTI_ACK; 8126 break; 8127 } 8128 8129 assert(false and 8130 "VCI_MEM_CACHE ERROR in CC_RECEIVE : " 8131 "Illegal type in coherence request"); 8132 8212 { 8213 if (not p_dspin_p2m.write) break; 8214 8215 uint8_t type = 8216 DspinRwtParam::dspin_get( 8217 p_dspin_p2m.data.read(), 8218 DspinRwtParam::P2M_TYPE); 8219 8220 if ((type == DspinRwtParam::TYPE_CLEANUP_DATA) or 8221 (type == DspinRwtParam::TYPE_CLEANUP_INST)) 8222 { 8223 r_cc_receive_fsm = CC_RECEIVE_CLEANUP; 8133 8224 break; 8134 8225 } 8135 //////////////////////// 8226 8227 if (type == DspinRwtParam::TYPE_MULTI_ACK) 8228 { 8229 r_cc_receive_fsm = CC_RECEIVE_MULTI_ACK; 8230 break; 8231 } 8232 8233 assert(false and 8234 "VCI_MEM_CACHE ERROR in CC_RECEIVE : " 8235 "Illegal type in coherence request"); 8236 8237 break; 8238 } 8239 //////////////////////// 8136 8240 case CC_RECEIVE_CLEANUP: 8137 { 8138 // write first CLEANUP flit in CC_RECEIVE to CLEANUP fifo 8139 8140 if(not p_dspin_p2m.write or not m_cc_receive_to_cleanup_fifo.wok()) 8141 break; 8142 8143 cc_receive_to_cleanup_fifo_put = true; 8144 if(p_dspin_p2m.eop.read()) 8145 r_cc_receive_fsm = CC_RECEIVE_IDLE; 8146 8241 { 8242 // write first CLEANUP flit in CC_RECEIVE to CLEANUP fifo 8243 8244 if (not p_dspin_p2m.write or not m_cc_receive_to_cleanup_fifo.wok()) 8147 8245 break; 8148 } 8149 //////////////////////////// 8150 case CC_RECEIVE_CLEANUP_EOP: 8151 { 8152 // write second CLEANUP flit in CC_RECEIVE to CLEANUP fifo 8153 8154 if(not p_dspin_p2m.write or not m_cc_receive_to_cleanup_fifo.wok()) 8155 break; 8156 8157 assert(p_dspin_p2m.eop.read() and 8158 "VCI_MEM_CACHE ERROR in CC_RECEIVE : " 8159 "CLEANUP command must have two flits"); 8160 8161 cc_receive_to_cleanup_fifo_put = true; 8162 if(p_dspin_p2m.eop.read()) 8163 r_cc_receive_fsm = CC_RECEIVE_IDLE; 8246 8247 cc_receive_to_cleanup_fifo_put = true; 8248 if (p_dspin_p2m.eop.read()) 8249 r_cc_receive_fsm = CC_RECEIVE_IDLE; 8250 8251 break; 8252 } 8253 ////////////////////////// 8254 case CC_RECEIVE_MULTI_ACK: 8255 { 8256 // write MULTI_ACK flit in CC_RECEIVE to MULTI_ACK fifo 8257 8258 // wait for a WOK in the CC_RECEIVE to MULTI_ACK fifo 8259 if (not p_dspin_p2m.write or not m_cc_receive_to_multi_ack_fifo.wok()) 8164 8260 break; 8165 } 8166 8167 ////////////////////////// 8168 case CC_RECEIVE_MULTI_ACK: 8169 { 8170 // write MULTI_ACK flit in CC_RECEIVE to MULTI_ACK fifo 8171 8172 // wait for a WOK in the CC_RECEIVE to MULTI_ACK fifo 8173 if(not p_dspin_p2m.write or not m_cc_receive_to_multi_ack_fifo.wok()) 8174 break; 8175 8176 assert(p_dspin_p2m.eop.read() and 8177 "VCI_MEM_CACHE ERROR in CC_RECEIVE : " 8178 "MULTI_ACK command must have one flit"); 8179 8180 cc_receive_to_multi_ack_fifo_put = true; 8181 r_cc_receive_fsm = CC_RECEIVE_IDLE; 8182 break; 8183 } 8261 8262 assert(p_dspin_p2m.eop.read() and 8263 "VCI_MEM_CACHE ERROR in CC_RECEIVE : " 8264 "MULTI_ACK command must have one flit"); 8265 8266 cc_receive_to_multi_ack_fifo_put = true; 8267 r_cc_receive_fsm = CC_RECEIVE_IDLE; 8268 break; 8269 } 8184 8270 } 8271 8185 8272 ////////////////////////////////////////////////////////////////////////// 8186 8273 // TGT_RSP FSM … … 8201 8288 ////////////////////////////////////////////////////////////////////////// 8202 8289 8203 switch (r_tgt_rsp_fsm.read())8290 switch (r_tgt_rsp_fsm.read()) 8204 8291 { 8205 8292 ///////////////////////// 8206 8293 case TGT_RSP_CONFIG_IDLE: // tgt_cmd requests have the highest priority 8207 8208 if(r_tgt_cmd_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_TGT_CMD;8209 else if(r_read_to_tgt_rsp_req)8210 8211 8212 8213 8214 else if(r_write_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_WRITE;8215 else if(r_cas_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CAS;8216 else if(r_xram_rsp_to_tgt_rsp_req)8217 8218 8219 8220 8221 else if(r_multi_ack_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_MULTI_ACK;8222 else if(r_cleanup_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CLEANUP;8223 else if(r_config_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CONFIG;8224 8225 8226 8294 { 8295 if (r_tgt_cmd_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_TGT_CMD; 8296 else if (r_read_to_tgt_rsp_req) 8297 { 8298 r_tgt_rsp_fsm = TGT_RSP_READ; 8299 r_tgt_rsp_cpt = r_read_to_tgt_rsp_word.read(); 8300 } 8301 else if (r_write_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_WRITE; 8302 else if (r_cas_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CAS; 8303 else if (r_xram_rsp_to_tgt_rsp_req) 8304 { 8305 r_tgt_rsp_fsm = TGT_RSP_XRAM; 8306 r_tgt_rsp_cpt = r_xram_rsp_to_tgt_rsp_word.read(); 8307 } 8308 else if (r_multi_ack_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_MULTI_ACK; 8309 else if (r_cleanup_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CLEANUP; 8310 else if (r_config_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CONFIG; 8311 break; 8312 } 8313 ////////////////////////// 8227 8314 case TGT_RSP_TGT_CMD_IDLE: // read requests have the highest priority 8228 8229 if(r_read_to_tgt_rsp_req)8230 8231 8232 8233 8234 else if(r_write_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_WRITE;8235 else if(r_cas_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CAS;8236 else if(r_xram_rsp_to_tgt_rsp_req)8237 8238 8239 8240 8241 else if(r_multi_ack_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_MULTI_ACK;8242 else if(r_cleanup_to_tgt_rsp_req)8243 8244 8245 8246 8247 else if(r_config_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CONFIG;8248 else if(r_tgt_cmd_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_TGT_CMD;8249 8250 8251 8315 { 8316 if (r_read_to_tgt_rsp_req) 8317 { 8318 r_tgt_rsp_fsm = TGT_RSP_READ; 8319 r_tgt_rsp_cpt = r_read_to_tgt_rsp_word.read(); 8320 } 8321 else if (r_write_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_WRITE; 8322 else if (r_cas_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CAS; 8323 else if (r_xram_rsp_to_tgt_rsp_req) 8324 { 8325 r_tgt_rsp_fsm = TGT_RSP_XRAM; 8326 r_tgt_rsp_cpt = r_xram_rsp_to_tgt_rsp_word.read(); 8327 } 8328 else if (r_multi_ack_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_MULTI_ACK; 8329 else if (r_cleanup_to_tgt_rsp_req) 8330 { 8331 r_tgt_rsp_fsm = TGT_RSP_CLEANUP; 8332 r_tgt_rsp_cpt = r_cleanup_to_tgt_rsp_first_word.read(); 8333 } 8334 else if (r_config_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CONFIG; 8335 else if (r_tgt_cmd_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_TGT_CMD; 8336 break; 8337 } 8338 /////////////////////// 8252 8339 case TGT_RSP_READ_IDLE: // write requests have the highest priority 8253 8254 if(r_write_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_WRITE;8255 else if(r_cas_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CAS;8256 else if(r_xram_rsp_to_tgt_rsp_req)8257 8258 8259 8260 8261 else if(r_multi_ack_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_MULTI_ACK;8262 else if(r_cleanup_to_tgt_rsp_req)8263 8264 8265 8266 8267 else if(r_config_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CONFIG;8268 else if(r_tgt_cmd_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_TGT_CMD;8269 else if(r_read_to_tgt_rsp_req)8270 8271 8272 8273 8274 8275 8276 8340 { 8341 if (r_write_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_WRITE; 8342 else if (r_cas_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CAS; 8343 else if (r_xram_rsp_to_tgt_rsp_req) 8344 { 8345 r_tgt_rsp_fsm = TGT_RSP_XRAM; 8346 r_tgt_rsp_cpt = r_xram_rsp_to_tgt_rsp_word.read(); 8347 } 8348 else if (r_multi_ack_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_MULTI_ACK; 8349 else if (r_cleanup_to_tgt_rsp_req) 8350 { 8351 r_tgt_rsp_fsm = TGT_RSP_CLEANUP; 8352 r_tgt_rsp_cpt = r_cleanup_to_tgt_rsp_first_word.read(); 8353 } 8354 else if (r_config_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CONFIG; 8355 else if (r_tgt_cmd_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_TGT_CMD; 8356 else if (r_read_to_tgt_rsp_req) 8357 { 8358 r_tgt_rsp_fsm = TGT_RSP_READ; 8359 r_tgt_rsp_cpt = r_read_to_tgt_rsp_word.read(); 8360 } 8361 break; 8362 } 8363 //////////////////////// 8277 8364 case TGT_RSP_WRITE_IDLE: // cas requests have the highest priority 8278 8279 if(r_cas_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CAS;8280 else if(r_xram_rsp_to_tgt_rsp_req)8281 8282 8283 8284 8285 else if(r_multi_ack_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_MULTI_ACK;8286 else if(r_cleanup_to_tgt_rsp_req)8287 8288 8289 8290 8291 else if(r_config_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CONFIG;8292 else if(r_tgt_cmd_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_TGT_CMD;8293 else if(r_read_to_tgt_rsp_req)8294 8295 8296 8297 8298 else if(r_write_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_WRITE;8299 8300 8301 8365 { 8366 if (r_cas_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CAS; 8367 else if (r_xram_rsp_to_tgt_rsp_req) 8368 { 8369 r_tgt_rsp_fsm = TGT_RSP_XRAM; 8370 r_tgt_rsp_cpt = r_xram_rsp_to_tgt_rsp_word.read(); 8371 } 8372 else if (r_multi_ack_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_MULTI_ACK; 8373 else if (r_cleanup_to_tgt_rsp_req) 8374 { 8375 r_tgt_rsp_fsm = TGT_RSP_CLEANUP; 8376 r_tgt_rsp_cpt = r_cleanup_to_tgt_rsp_first_word.read(); 8377 } 8378 else if (r_config_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CONFIG; 8379 else if (r_tgt_cmd_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_TGT_CMD; 8380 else if (r_read_to_tgt_rsp_req) 8381 { 8382 r_tgt_rsp_fsm = TGT_RSP_READ; 8383 r_tgt_rsp_cpt = r_read_to_tgt_rsp_word.read(); 8384 } 8385 else if (r_write_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_WRITE; 8386 break; 8387 } 8388 /////////////////////// 8302 8389 case TGT_RSP_CAS_IDLE: // xram_rsp requests have the highest priority 8303 8304 if(r_xram_rsp_to_tgt_rsp_req)8305 8306 8307 8308 8309 else if(r_multi_ack_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_MULTI_ACK;8310 else if(r_cleanup_to_tgt_rsp_req)8311 8312 8313 8314 8315 else if(r_config_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CONFIG;8316 else if(r_tgt_cmd_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_TGT_CMD;8317 else if(r_read_to_tgt_rsp_req)8318 8319 8320 8321 8322 else if(r_write_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_WRITE;8323 else if(r_cas_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CAS;8324 8325 8326 8390 { 8391 if (r_xram_rsp_to_tgt_rsp_req) 8392 { 8393 r_tgt_rsp_fsm = TGT_RSP_XRAM; 8394 r_tgt_rsp_cpt = r_xram_rsp_to_tgt_rsp_word.read(); 8395 } 8396 else if (r_multi_ack_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_MULTI_ACK; 8397 else if (r_cleanup_to_tgt_rsp_req) 8398 { 8399 r_tgt_rsp_fsm = TGT_RSP_CLEANUP; 8400 r_tgt_rsp_cpt = r_cleanup_to_tgt_rsp_first_word.read(); 8401 } 8402 else if (r_config_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CONFIG; 8403 else if (r_tgt_cmd_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_TGT_CMD; 8404 else if (r_read_to_tgt_rsp_req) 8405 { 8406 r_tgt_rsp_fsm = TGT_RSP_READ; 8407 r_tgt_rsp_cpt = r_read_to_tgt_rsp_word.read(); 8408 } 8409 else if (r_write_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_WRITE; 8410 else if (r_cas_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CAS; 8411 break; 8412 } 8413 /////////////////////// 8327 8414 case TGT_RSP_XRAM_IDLE: // multi ack requests have the highest priority 8328 8329 8330 if(r_multi_ack_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_MULTI_ACK;8331 else if(r_cleanup_to_tgt_rsp_req)8332 8333 8334 8335 8336 else if(r_cleanup_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CLEANUP;8337 else if(r_config_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CONFIG;8338 else if(r_tgt_cmd_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_TGT_CMD;8339 else if(r_read_to_tgt_rsp_req)8340 8341 8342 8343 8344 else if(r_write_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_WRITE;8345 else if(r_cas_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CAS;8346 else if(r_xram_rsp_to_tgt_rsp_req)8347 8348 8349 8350 8351 8352 8353 8415 { 8416 8417 if (r_multi_ack_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_MULTI_ACK; 8418 else if (r_cleanup_to_tgt_rsp_req) 8419 { 8420 r_tgt_rsp_fsm = TGT_RSP_CLEANUP; 8421 r_tgt_rsp_cpt = r_cleanup_to_tgt_rsp_first_word.read(); 8422 } 8423 else if (r_cleanup_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CLEANUP; 8424 else if (r_config_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CONFIG; 8425 else if (r_tgt_cmd_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_TGT_CMD; 8426 else if (r_read_to_tgt_rsp_req) 8427 { 8428 r_tgt_rsp_fsm = TGT_RSP_READ; 8429 r_tgt_rsp_cpt = r_read_to_tgt_rsp_word.read(); 8430 } 8431 else if (r_write_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_WRITE; 8432 else if (r_cas_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CAS; 8433 else if (r_xram_rsp_to_tgt_rsp_req) 8434 { 8435 r_tgt_rsp_fsm = TGT_RSP_XRAM; 8436 r_tgt_rsp_cpt = r_xram_rsp_to_tgt_rsp_word.read(); 8437 } 8438 break; 8439 } 8440 //////////////////////////// 8354 8441 case TGT_RSP_MULTI_ACK_IDLE: // cleanup requests have the highest priority 8355 8356 if(r_cleanup_to_tgt_rsp_req)8357 8358 8359 8360 8361 else if(r_config_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CONFIG;8362 else if(r_tgt_cmd_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_TGT_CMD;8363 else if(r_read_to_tgt_rsp_req)8364 8365 8366 8367 8368 else if(r_write_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_WRITE;8369 else if(r_cas_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CAS;8370 else if(r_xram_rsp_to_tgt_rsp_req)8371 8372 8373 8374 8375 else if(r_multi_ack_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_MULTI_ACK;8376 8377 8378 8442 { 8443 if (r_cleanup_to_tgt_rsp_req) 8444 { 8445 r_tgt_rsp_fsm = TGT_RSP_CLEANUP; 8446 r_tgt_rsp_cpt = r_cleanup_to_tgt_rsp_first_word.read(); 8447 } 8448 else if (r_config_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CONFIG; 8449 else if (r_tgt_cmd_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_TGT_CMD; 8450 else if (r_read_to_tgt_rsp_req) 8451 { 8452 r_tgt_rsp_fsm = TGT_RSP_READ; 8453 r_tgt_rsp_cpt = r_read_to_tgt_rsp_word.read(); 8454 } 8455 else if (r_write_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_WRITE; 8456 else if (r_cas_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CAS; 8457 else if (r_xram_rsp_to_tgt_rsp_req) 8458 { 8459 r_tgt_rsp_fsm = TGT_RSP_XRAM; 8460 r_tgt_rsp_cpt = r_xram_rsp_to_tgt_rsp_word.read(); 8461 } 8462 else if (r_multi_ack_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_MULTI_ACK; 8463 break; 8464 } 8465 ////////////////////////// 8379 8466 case TGT_RSP_CLEANUP_IDLE: // tgt cmd requests have the highest priority 8380 8381 if(r_config_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CONFIG;8382 else if(r_tgt_cmd_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_TGT_CMD;8383 else if(r_read_to_tgt_rsp_req)8384 8385 8386 8387 8388 else if(r_write_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_WRITE;8389 else if(r_cas_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CAS;8390 else if(r_xram_rsp_to_tgt_rsp_req)8391 8392 8393 8394 8395 else if(r_multi_ack_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_MULTI_ACK;8396 else if(r_cleanup_to_tgt_rsp_req)8397 8398 8399 8400 8401 8402 8403 8467 { 8468 if (r_config_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CONFIG; 8469 else if (r_tgt_cmd_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_TGT_CMD; 8470 else if (r_read_to_tgt_rsp_req) 8471 { 8472 r_tgt_rsp_fsm = TGT_RSP_READ; 8473 r_tgt_rsp_cpt = r_read_to_tgt_rsp_word.read(); 8474 } 8475 else if (r_write_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_WRITE; 8476 else if (r_cas_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_CAS; 8477 else if (r_xram_rsp_to_tgt_rsp_req) 8478 { 8479 r_tgt_rsp_fsm = TGT_RSP_XRAM; 8480 r_tgt_rsp_cpt = r_xram_rsp_to_tgt_rsp_word.read(); 8481 } 8482 else if (r_multi_ack_to_tgt_rsp_req) r_tgt_rsp_fsm = TGT_RSP_MULTI_ACK; 8483 else if (r_cleanup_to_tgt_rsp_req) 8484 { 8485 r_tgt_rsp_fsm = TGT_RSP_CLEANUP; 8486 r_tgt_rsp_cpt = r_cleanup_to_tgt_rsp_first_word.read(); 8487 } 8488 break; 8489 } 8490 //////////////////// 8404 8491 case TGT_RSP_CONFIG: // send the response for a config transaction 8405 8406 if ( p_vci_tgt.rspack)8407 8408 8409 8492 { 8493 if (p_vci_tgt.rspack) 8494 { 8495 r_config_to_tgt_rsp_req = false; 8496 r_tgt_rsp_fsm = TGT_RSP_CONFIG_IDLE; 8410 8497 8411 8498 #if DEBUG_MEMC_TGT_RSP 8412 if( m_debug ) 8499 if (m_debug) 8500 { 8501 std::cout 8502 << " <MEMC " << name() 8503 << " TGT_RSP_CONFIG> Config transaction completed response" 8504 << " / rsrcid = " << std::hex << r_config_to_tgt_rsp_srcid.read() 8505 << " / rtrdid = " << r_config_to_tgt_rsp_trdid.read() 8506 << " / rpktid = " << r_config_to_tgt_rsp_pktid.read() 8507 << std::endl; 8508 } 8509 #endif 8510 } 8511 break; 8512 } 8513 ///////////////////// 8514 case TGT_RSP_TGT_CMD: // send the response for a configuration access 8515 { 8516 if (p_vci_tgt.rspack) 8517 { 8518 r_tgt_cmd_to_tgt_rsp_req = false; 8519 r_tgt_rsp_fsm = TGT_RSP_TGT_CMD_IDLE; 8520 8521 #if DEBUG_MEMC_TGT_RSP 8522 if (m_debug) 8523 { 8524 std::cout 8525 << " <MEMC " << name() 8526 << " TGT_RSP_TGT_CMD> Send response for a configuration access" 8527 << " / rsrcid = " << std::hex << r_tgt_cmd_to_tgt_rsp_srcid.read() 8528 << " / rtrdid = " << r_tgt_cmd_to_tgt_rsp_trdid.read() 8529 << " / rpktid = " << r_tgt_cmd_to_tgt_rsp_pktid.read() 8530 << " / error = " << r_tgt_cmd_to_tgt_rsp_error.read() 8531 << std::endl; 8532 } 8533 #endif 8534 } 8535 break; 8536 } 8537 ////////////////// 8538 case TGT_RSP_READ: // send the response to a read 8539 { 8540 if (p_vci_tgt.rspack) 8541 { 8542 8543 #if DEBUG_MEMC_TGT_RSP 8544 if (m_debug) 8545 { 8546 std::cout 8547 << " <MEMC " << name() << " TGT_RSP_READ> Read response" 8548 << " / rsrcid = " << std::hex << r_read_to_tgt_rsp_srcid.read() 8549 << " / rtrdid = " << r_read_to_tgt_rsp_trdid.read() 8550 << " / rpktid = " << r_read_to_tgt_rsp_pktid.read() 8551 << " / rdata = " << r_read_to_tgt_rsp_data[r_tgt_rsp_cpt.read()].read() 8552 << " / cpt = " << std::dec << r_tgt_rsp_cpt.read() << std::endl; 8553 } 8554 #endif 8555 8556 uint32_t last_word_idx = r_read_to_tgt_rsp_word.read() + 8557 r_read_to_tgt_rsp_length.read() - 1; 8558 bool is_last_word = (r_tgt_rsp_cpt.read() == last_word_idx); 8559 bool is_ll = ((r_read_to_tgt_rsp_pktid.read() & 0x7) == TYPE_LL); 8560 8561 if ((is_last_word and not is_ll) or 8562 (r_tgt_rsp_key_sent.read() and is_ll)) 8563 { 8564 // Last word in case of READ or second flit in case if LL 8565 r_tgt_rsp_key_sent = false; 8566 r_read_to_tgt_rsp_req = false; 8567 r_tgt_rsp_fsm = TGT_RSP_READ_IDLE; 8568 } 8569 else 8570 { 8571 if (is_ll) 8413 8572 { 8414 std::cout 8415 << " <MEMC " << name() 8416 << " TGT_RSP_CONFIG> Config transaction completed response" 8417 << " / rsrcid = " << std::hex << r_config_to_tgt_rsp_srcid.read() 8418 << " / rtrdid = " << r_config_to_tgt_rsp_trdid.read() 8419 << " / rpktid = " << r_config_to_tgt_rsp_pktid.read() 8420 << std::endl; 8421 } 8422 #endif 8423 } 8424 break; 8425 } 8426 ///////////////////// 8427 case TGT_RSP_TGT_CMD: // send the response for a configuration access 8428 { 8429 if ( p_vci_tgt.rspack ) 8430 { 8431 r_tgt_cmd_to_tgt_rsp_req = false; 8432 r_tgt_rsp_fsm = TGT_RSP_TGT_CMD_IDLE; 8433 8434 #if DEBUG_MEMC_TGT_RSP 8435 if( m_debug ) 8436 { 8437 std::cout 8438 << " <MEMC " << name() 8439 << " TGT_RSP_TGT_CMD> Send response for a configuration access" 8440 << " / rsrcid = " << std::hex << r_tgt_cmd_to_tgt_rsp_srcid.read() 8441 << " / rtrdid = " << r_tgt_cmd_to_tgt_rsp_trdid.read() 8442 << " / rpktid = " << r_tgt_cmd_to_tgt_rsp_pktid.read() 8443 << " / error = " << r_tgt_cmd_to_tgt_rsp_error.read() 8444 << std::endl; 8445 } 8446 #endif 8447 } 8448 break; 8449 } 8450 ////////////////// 8451 case TGT_RSP_READ: // send the response to a read 8452 { 8453 if ( p_vci_tgt.rspack ) 8454 { 8455 8456 #if DEBUG_MEMC_TGT_RSP 8457 if( m_debug ) 8458 { 8459 std::cout 8460 << " <MEMC " << name() << " TGT_RSP_READ> Read response" 8461 << " / rsrcid = " << std::hex << r_read_to_tgt_rsp_srcid.read() 8462 << " / rtrdid = " << r_read_to_tgt_rsp_trdid.read() 8463 << " / rpktid = " << r_read_to_tgt_rsp_pktid.read() 8464 << " / rdata = " << r_read_to_tgt_rsp_data[r_tgt_rsp_cpt.read()].read() 8465 << " / cpt = " << std::dec << r_tgt_rsp_cpt.read() << std::endl; 8466 } 8467 #endif 8468 8469 8470 uint32_t last_word_idx = r_read_to_tgt_rsp_word.read() + 8471 r_read_to_tgt_rsp_length.read() - 1; 8472 bool is_last_word = (r_tgt_rsp_cpt.read() == last_word_idx); 8473 bool is_ll = ((r_read_to_tgt_rsp_pktid.read() & 0x7) == TYPE_LL); 8474 8475 if ((is_last_word and not is_ll) or 8476 (r_tgt_rsp_key_sent.read() and is_ll)) 8477 { 8478 // Last word in case of READ or second flit in case if LL 8479 r_tgt_rsp_key_sent = false; 8480 r_read_to_tgt_rsp_req = false; 8481 r_tgt_rsp_fsm = TGT_RSP_READ_IDLE; 8482 if (r_read_to_tgt_rsp_pktid.read() == 0x0) 8483 { 8484 m_cpt_read_data_unc ++; 8485 } 8486 else if (r_read_to_tgt_rsp_pktid.read() == 0x1) 8487 { 8488 m_cpt_read_data_miss_CC ++; 8489 } 8490 else if (r_read_to_tgt_rsp_pktid.read() == 0x2) 8491 { 8492 m_cpt_read_ins_unc ++; 8493 } 8494 else if (r_read_to_tgt_rsp_pktid.read() == 0x3) 8495 { 8496 m_cpt_read_ins_miss ++; 8497 } 8498 else if (r_read_to_tgt_rsp_pktid.read() == 0x6) 8499 { 8500 m_cpt_read_ll_CC ++; 8501 } 8502 else if (r_read_to_tgt_rsp_pktid.read() == 0x9) 8503 { 8504 m_cpt_read_data_miss_NCC ++; 8505 } 8506 else if (r_read_to_tgt_rsp_pktid.read() == 0x14) 8507 { 8508 m_cpt_read_ll_NCC ++; 8509 } 8510 else 8511 { 8512 assert(false); 8513 } 8573 r_tgt_rsp_key_sent = true; // Send second flit of ll 8514 8574 } 8515 8575 else 8516 8576 { 8517 if (is_ll) 8518 { 8519 r_tgt_rsp_key_sent = true; // Send second flit of ll 8520 } 8521 else 8522 { 8523 r_tgt_rsp_cpt = r_tgt_rsp_cpt.read() + 1; // Send next word of read 8524 } 8577 r_tgt_rsp_cpt = r_tgt_rsp_cpt.read() + 1; // Send next word of read 8525 8578 } 8526 8579 } 8527 break; 8528 } 8529 ////////////////// 8580 } 8581 break; 8582 } 8583 ////////////////// 8530 8584 case TGT_RSP_WRITE: // send the write acknowledge 8531 8532 if(p_vci_tgt.rspack)8533 8585 { 8586 if (p_vci_tgt.rspack) 8587 { 8534 8588 8535 8589 #if DEBUG_MEMC_TGT_RSP 8536 if(m_debug) 8537 std::cout << " <MEMC " << name() << " TGT_RSP_WRITE> Write response" 8538 << " / rsrcid = " << std::hex << r_write_to_tgt_rsp_srcid.read() 8539 << " / rtrdid = " << r_write_to_tgt_rsp_trdid.read() 8540 << " / rpktid = " << r_write_to_tgt_rsp_pktid.read() << std::endl; 8541 #endif 8542 r_tgt_rsp_fsm = TGT_RSP_WRITE_IDLE; 8543 r_write_to_tgt_rsp_req = false; 8544 } 8545 break; 8546 } 8547 ///////////////////// 8590 if (m_debug) 8591 { 8592 std::cout << " <MEMC " << name() << " TGT_RSP_WRITE> Write response" 8593 << " / rsrcid = " << std::hex << r_write_to_tgt_rsp_srcid.read() 8594 << " / rtrdid = " << r_write_to_tgt_rsp_trdid.read() 8595 << " / rpktid = " << r_write_to_tgt_rsp_pktid.read() << std::endl; 8596 } 8597 #endif 8598 r_tgt_rsp_fsm = TGT_RSP_WRITE_IDLE; 8599 r_write_to_tgt_rsp_req = false; 8600 } 8601 break; 8602 } 8603 ///////////////////// 8548 8604 case TGT_RSP_CLEANUP: 8549 8550 if(p_vci_tgt.rspack)8551 8605 { 8606 if (p_vci_tgt.rspack) 8607 { 8552 8608 8553 8609 #if DEBUG_MEMC_TGT_RSP 8554 if(m_debug) 8610 if (m_debug) 8611 { 8612 std::cout << " <MEMC " << name() << " TGT_RSP_CLEANUP> Cleanup response" 8613 << " / rsrcid = " << std::hex << r_cleanup_to_tgt_rsp_srcid.read() 8614 << " / rtrdid = " << r_cleanup_to_tgt_rsp_trdid.read() 8615 << " / rpktid = " << r_cleanup_to_tgt_rsp_pktid.read() << std::endl 8616 << " / data = " << r_cleanup_to_tgt_rsp_data[r_tgt_rsp_cpt.read()].read() << std::dec << std::endl; 8617 } 8618 #endif 8619 8620 uint32_t last_word_idx = r_cleanup_to_tgt_rsp_first_word.read() + r_cleanup_to_tgt_rsp_length.read() - 1; 8621 bool is_ll = ((r_cleanup_to_tgt_rsp_pktid.read() & 0x7) == TYPE_LL); 8622 8623 if (r_cleanup_to_tgt_rsp_type.read() or ((r_tgt_rsp_cpt.read() == last_word_idx) and not is_ll) or (r_tgt_rsp_key_sent.read() and is_ll)) 8624 { 8625 r_tgt_rsp_fsm = TGT_RSP_CLEANUP_IDLE; 8626 r_cleanup_to_tgt_rsp_req = false; 8627 r_tgt_rsp_key_sent = false; 8628 } 8629 else 8630 { 8631 if (is_ll) 8555 8632 { 8556 std::cout << " <MEMC " << name() << " TGT_RSP_CLEANUP> Cleanup response" 8557 << " / rsrcid = " << std::dec << r_cleanup_to_tgt_rsp_srcid.read() 8558 << " / rtrdid = " << r_cleanup_to_tgt_rsp_trdid.read() 8559 << " / rpktid = " << r_cleanup_to_tgt_rsp_pktid.read() << std::endl 8560 << " / data = " << std::hex << r_cleanup_to_tgt_rsp_data[r_tgt_rsp_cpt.read()].read() << std::dec << std::endl; 8561 } 8562 #endif 8563 8564 uint32_t last_word_idx = r_cleanup_to_tgt_rsp_first_word.read() + r_cleanup_to_tgt_rsp_length.read() - 1; 8565 bool is_ll = ((r_cleanup_to_tgt_rsp_pktid.read() & 0x7) == TYPE_LL); 8566 8567 if (r_cleanup_to_tgt_rsp_type.read() or ((r_tgt_rsp_cpt.read() == last_word_idx) and not is_ll) or (r_tgt_rsp_key_sent.read() and is_ll) ) 8568 { 8569 r_tgt_rsp_fsm = TGT_RSP_CLEANUP_IDLE; 8570 r_cleanup_to_tgt_rsp_req = false; 8571 r_tgt_rsp_key_sent = false; 8572 8573 8574 if (r_cleanup_to_tgt_rsp_pktid.read() == 0x0) 8575 { 8576 m_cpt_read_data_unc ++; 8577 } 8578 else if (r_cleanup_to_tgt_rsp_pktid.read() == 0x1) 8579 { 8580 m_cpt_read_data_miss_CC ++; 8581 } 8582 else if (r_cleanup_to_tgt_rsp_pktid.read() == 0x2) 8583 { 8584 m_cpt_read_ins_unc ++; 8585 } 8586 else if (r_cleanup_to_tgt_rsp_pktid.read() == 0x3) 8587 { 8588 m_cpt_read_ins_miss ++; 8589 } 8590 else if (r_cleanup_to_tgt_rsp_pktid.read() == 0x6) 8591 { 8592 m_cpt_read_ll_CC ++; 8593 } 8594 else if (r_cleanup_to_tgt_rsp_pktid.read() == 0x9) 8595 { 8596 m_cpt_read_data_miss_NCC ++; 8597 } 8598 else if (r_cleanup_to_tgt_rsp_pktid.read() == 0x14) 8599 { 8600 m_cpt_read_ll_NCC ++; 8601 } 8602 else if (!r_cleanup_to_tgt_rsp_type.read()) 8603 { 8604 assert(false); 8605 } 8606 8633 r_tgt_rsp_key_sent = true; 8607 8634 } 8608 8635 else 8609 8636 { 8610 if (is_ll) 8611 { 8612 r_tgt_rsp_key_sent = true; 8613 } 8614 else 8615 { 8616 r_tgt_rsp_cpt = r_tgt_rsp_cpt.read() + 1; 8617 } 8637 r_tgt_rsp_cpt = r_tgt_rsp_cpt.read() + 1; 8618 8638 } 8619 8639 } 8620 break; 8621 } 8622 ///////////////// 8640 } 8641 break; 8642 } 8643 ///////////////// 8623 8644 case TGT_RSP_CAS: // send one atomic word response 8624 8625 if(p_vci_tgt.rspack)8626 8645 { 8646 if (p_vci_tgt.rspack) 8647 { 8627 8648 8628 8649 #if DEBUG_MEMC_TGT_RSP 8629 if(m_debug) 8630 std::cout << " <MEMC " << name() << " TGT_RSP_CAS> CAS response" 8631 << " / rsrcid = " << std::hex << r_cas_to_tgt_rsp_srcid.read() 8632 << " / rtrdid = " << r_cas_to_tgt_rsp_trdid.read() 8633 << " / rpktid = " << r_cas_to_tgt_rsp_pktid.read() << std::endl; 8634 #endif 8635 r_tgt_rsp_fsm = TGT_RSP_CAS_IDLE; 8636 r_cas_to_tgt_rsp_req = false; 8637 } 8638 break; 8639 } 8640 ////////////////// 8650 if (m_debug) 8651 { 8652 std::cout << " <MEMC " << name() << " TGT_RSP_CAS> CAS response" 8653 << " / rsrcid = " << std::hex << r_cas_to_tgt_rsp_srcid.read() 8654 << " / rtrdid = " << r_cas_to_tgt_rsp_trdid.read() 8655 << " / rpktid = " << r_cas_to_tgt_rsp_pktid.read() << std::endl; 8656 } 8657 #endif 8658 r_tgt_rsp_fsm = TGT_RSP_CAS_IDLE; 8659 r_cas_to_tgt_rsp_req = false; 8660 } 8661 break; 8662 } 8663 ////////////////// 8641 8664 case TGT_RSP_XRAM: // send the response after XRAM access 8642 8643 if ( p_vci_tgt.rspack)8644 8665 { 8666 if (p_vci_tgt.rspack) 8667 { 8645 8668 8646 8669 #if DEBUG_MEMC_TGT_RSP 8647 if( m_debug ) 8648 std::cout << " <MEMC " << name() << " TGT_RSP_XRAM> Response following XRAM access" 8649 << " / rsrcid = " << std::hex << r_xram_rsp_to_tgt_rsp_srcid.read() 8650 << " / rtrdid = " << r_xram_rsp_to_tgt_rsp_trdid.read() 8651 << " / rpktid = " << r_xram_rsp_to_tgt_rsp_pktid.read() 8652 << " / rdata = " << r_xram_rsp_to_tgt_rsp_data[r_tgt_rsp_cpt.read()].read() 8653 << " / cpt = " << std::dec << r_tgt_rsp_cpt.read() << std::endl; 8654 #endif 8655 uint32_t last_word_idx = r_xram_rsp_to_tgt_rsp_word.read() + 8656 r_xram_rsp_to_tgt_rsp_length.read() - 1; 8657 bool is_last_word = (r_tgt_rsp_cpt.read() == last_word_idx); 8658 bool is_ll = ((r_xram_rsp_to_tgt_rsp_pktid.read() & 0x7) == TYPE_LL); 8659 bool is_error = r_xram_rsp_to_tgt_rsp_rerror.read(); 8660 8661 if (((is_last_word or is_error) and not is_ll) or 8662 (r_tgt_rsp_key_sent.read() and is_ll)) 8670 if (m_debug) 8671 { 8672 std::cout << " <MEMC " << name() << " TGT_RSP_XRAM> Response following XRAM access" 8673 << " / rsrcid = " << std::hex << r_xram_rsp_to_tgt_rsp_srcid.read() 8674 << " / rtrdid = " << r_xram_rsp_to_tgt_rsp_trdid.read() 8675 << " / rpktid = " << r_xram_rsp_to_tgt_rsp_pktid.read() 8676 << " / rdata = " << r_xram_rsp_to_tgt_rsp_data[r_tgt_rsp_cpt.read()].read() 8677 << " / cpt = " << std::dec << r_tgt_rsp_cpt.read() << std::endl; 8678 } 8679 #endif 8680 uint32_t last_word_idx = r_xram_rsp_to_tgt_rsp_word.read() + 8681 r_xram_rsp_to_tgt_rsp_length.read() - 1; 8682 bool is_last_word = (r_tgt_rsp_cpt.read() == last_word_idx); 8683 bool is_ll = ((r_xram_rsp_to_tgt_rsp_pktid.read() & 0x7) == TYPE_LL); 8684 bool is_error = r_xram_rsp_to_tgt_rsp_rerror.read(); 8685 8686 if (((is_last_word or is_error) and not is_ll) or 8687 (r_tgt_rsp_key_sent.read() and is_ll)) 8688 { 8689 // Last word sent in case of READ or second flit sent in case if LL 8690 r_tgt_rsp_key_sent = false; 8691 r_xram_rsp_to_tgt_rsp_req = false; 8692 r_tgt_rsp_fsm = TGT_RSP_XRAM_IDLE; 8693 } 8694 else 8695 { 8696 if (is_ll) 8663 8697 { 8664 // Last word sent in case of READ or second flit sent in case if LL 8665 r_tgt_rsp_key_sent = false; 8666 r_xram_rsp_to_tgt_rsp_req = false; 8667 r_tgt_rsp_fsm = TGT_RSP_XRAM_IDLE; 8668 8669 8670 if (r_xram_rsp_to_tgt_rsp_pktid.read() == 0x0) 8671 { 8672 m_cpt_read_data_unc ++; 8673 } 8674 else if (r_xram_rsp_to_tgt_rsp_pktid.read() == 0x1) 8675 { 8676 m_cpt_read_data_miss_CC ++; 8677 } 8678 else if (r_xram_rsp_to_tgt_rsp_pktid.read() == 0x2) 8679 { 8680 m_cpt_read_ins_unc ++; 8681 } 8682 else if (r_xram_rsp_to_tgt_rsp_pktid.read() == 0x3) 8683 { 8684 m_cpt_read_ins_miss ++; 8685 } 8686 else if (r_xram_rsp_to_tgt_rsp_pktid.read() == 0x6) 8687 { 8688 m_cpt_read_ll_CC ++; 8689 } 8690 else if (r_xram_rsp_to_tgt_rsp_pktid.read() == 0x9) 8691 { 8692 m_cpt_read_data_miss_NCC ++; 8693 } 8694 else if (r_xram_rsp_to_tgt_rsp_pktid.read() == 0x14) 8695 { 8696 m_cpt_read_ll_NCC ++; 8697 } 8698 else 8699 { 8700 assert(false); 8701 } 8702 8698 r_tgt_rsp_key_sent = true; // Send second flit of ll 8703 8699 } 8704 8700 else 8705 8701 { 8706 if (is_ll) 8707 { 8708 r_tgt_rsp_key_sent = true; // Send second flit of ll 8709 } 8710 else 8711 { 8712 r_tgt_rsp_cpt = r_tgt_rsp_cpt.read() + 1; // Send next word of read 8713 } 8702 r_tgt_rsp_cpt = r_tgt_rsp_cpt.read() + 1; // Send next word of read 8714 8703 } 8715 8704 } 8716 break; 8717 } 8718 /////////////////////// 8705 } 8706 break; 8707 } 8708 /////////////////////// 8719 8709 case TGT_RSP_MULTI_ACK: // send the write response after coherence transaction 8720 8721 if(p_vci_tgt.rspack)8722 8710 { 8711 if (p_vci_tgt.rspack) 8712 { 8723 8713 8724 8714 #if DEBUG_MEMC_TGT_RSP 8725 if(m_debug) 8726 std::cout << " <MEMC " << name() << " TGT_RSP_MULTI_ACK> Write response after coherence transaction" 8727 << " / rsrcid = " << std::hex << r_multi_ack_to_tgt_rsp_srcid.read() 8728 << " / rtrdid = " << r_multi_ack_to_tgt_rsp_trdid.read() 8729 << " / rpktid = " << r_multi_ack_to_tgt_rsp_pktid.read() << std::endl; 8730 #endif 8731 r_tgt_rsp_fsm = TGT_RSP_MULTI_ACK_IDLE; 8732 r_multi_ack_to_tgt_rsp_req = false; 8733 } 8734 break; 8735 } 8715 if (m_debug) 8716 { 8717 std::cout << " <MEMC " << name() << " TGT_RSP_MULTI_ACK> Write response after coherence transaction" 8718 << " / rsrcid = " << std::hex << r_multi_ack_to_tgt_rsp_srcid.read() 8719 << " / rtrdid = " << r_multi_ack_to_tgt_rsp_trdid.read() 8720 << " / rpktid = " << r_multi_ack_to_tgt_rsp_pktid.read() << std::endl; 8721 } 8722 #endif 8723 r_tgt_rsp_fsm = TGT_RSP_MULTI_ACK_IDLE; 8724 r_multi_ack_to_tgt_rsp_req = false; 8725 } 8726 break; 8727 } 8736 8728 } // end switch tgt_rsp_fsm 8737 8729 … … 8747 8739 // The resource is always allocated. 8748 8740 ///////////////////////////////////////////////////////////////////////////////////// 8749 switch (r_alloc_upt_fsm.read())8741 switch (r_alloc_upt_fsm.read()) 8750 8742 { 8751 8743 ///////////////////////// 8752 8744 case ALLOC_UPT_WRITE: // allocated to WRITE FSM 8745 { 8753 8746 if (r_write_fsm.read() != WRITE_UPT_LOCK) 8754 8747 { … … 8758 8751 else if (r_multi_ack_fsm.read() == MULTI_ACK_UPT_LOCK) 8759 8752 r_alloc_upt_fsm = ALLOC_UPT_MULTI_ACK; 8753 8760 8754 else 8761 8755 m_cpt_upt_unused++; 8762 8756 } 8763 8757 break; 8764 8765 8758 } 8759 ///////////////////////// 8766 8760 case ALLOC_UPT_CAS: // allocated to CAS FSM 8761 { 8767 8762 if (r_cas_fsm.read() != CAS_UPT_LOCK) 8768 8763 { … … 8777 8772 } 8778 8773 break; 8779 8780 8774 } 8775 ///////////////////////// 8781 8776 case ALLOC_UPT_MULTI_ACK: // allocated to MULTI_ACK FSM 8777 { 8782 8778 if ((r_multi_ack_fsm.read() != MULTI_ACK_UPT_LOCK ) and 8783 8779 (r_multi_ack_fsm.read() != MULTI_ACK_UPT_CLEAR)) … … 8788 8784 else if (r_cas_fsm.read() == CAS_UPT_LOCK) 8789 8785 r_alloc_upt_fsm = ALLOC_UPT_CAS; 8786 8790 8787 else 8791 8788 m_cpt_upt_unused++; 8792 8789 } 8793 8790 break; 8791 } 8794 8792 } // end switch r_alloc_upt_fsm 8795 8793 … … 8809 8807 // The resource is always allocated. 8810 8808 ///////////////////////////////////////////////////////////////////////////////////// 8811 switch (r_alloc_ivt_fsm.read())8809 switch (r_alloc_ivt_fsm.read()) 8812 8810 { 8813 ///////////////////// /////8811 ///////////////////// 8814 8812 case ALLOC_IVT_WRITE: // allocated to WRITE FSM 8813 { 8815 8814 if ((r_write_fsm.read() != WRITE_BC_IVT_LOCK) and 8816 8815 (r_write_fsm.read() != WRITE_IVT_LOCK_HIT_WB) and 8817 8816 (r_write_fsm.read() != WRITE_MISS_IVT_LOCK)) 8818 8817 { 8819 if (r_read_fsm.read() == READ_IVT_LOCK)8818 if (r_read_fsm.read() == READ_IVT_LOCK) 8820 8819 r_alloc_ivt_fsm = ALLOC_IVT_READ; 8821 8820 … … 8837 8836 } 8838 8837 break; 8839 8840 8838 } 8839 ////////////////////////// 8841 8840 case ALLOC_IVT_READ: // allocated to READ FSM 8841 { 8842 8842 if (r_read_fsm.read() != READ_IVT_LOCK) 8843 8843 { … … 8863 8863 } 8864 8864 break; 8865 8866 8865 } 8866 ////////////////////////// 8867 8867 case ALLOC_IVT_XRAM_RSP: // allocated to XRAM_RSP FSM 8868 if(r_xram_rsp_fsm.read() != XRAM_RSP_IVT_LOCK) 8868 { 8869 if (r_xram_rsp_fsm.read() != XRAM_RSP_IVT_LOCK) 8869 8870 { 8870 8871 if ((r_cleanup_fsm.read() == CLEANUP_IVT_LOCK) or … … 8883 8884 r_alloc_ivt_fsm = ALLOC_IVT_WRITE; 8884 8885 8885 else if (r_read_fsm.read() == READ_IVT_LOCK)8886 else if (r_read_fsm.read() == READ_IVT_LOCK) 8886 8887 r_alloc_ivt_fsm = ALLOC_IVT_READ; 8887 8888 … … 8890 8891 } 8891 8892 break; 8892 8893 8893 } 8894 ////////////////////////// 8894 8895 case ALLOC_IVT_CLEANUP: // allocated to CLEANUP FSM 8895 if ((r_cleanup_fsm.read() != CLEANUP_IVT_LOCK ) and 8896 { 8897 if ((r_cleanup_fsm.read() != CLEANUP_IVT_LOCK) and 8896 8898 (r_cleanup_fsm.read() != CLEANUP_IVT_DECREMENT) and 8897 8899 (r_cleanup_fsm.read() != CLEANUP_IVT_LOCK_DATA)) … … 8908 8910 r_alloc_ivt_fsm = ALLOC_IVT_WRITE; 8909 8911 8910 else if (r_read_fsm.read() == READ_IVT_LOCK)8912 else if (r_read_fsm.read() == READ_IVT_LOCK) 8911 8913 r_alloc_ivt_fsm = ALLOC_IVT_READ; 8912 8914 … … 8918 8920 } 8919 8921 break; 8920 8921 8922 } 8923 ////////////////////////// 8922 8924 case ALLOC_IVT_CAS: // allocated to CAS FSM 8925 { 8923 8926 if (r_cas_fsm.read() != CAS_BC_IVT_LOCK) 8924 8927 { … … 8931 8934 r_alloc_ivt_fsm = ALLOC_IVT_WRITE; 8932 8935 8933 else if (r_read_fsm.read() == READ_IVT_LOCK)8936 else if (r_read_fsm.read() == READ_IVT_LOCK) 8934 8937 r_alloc_ivt_fsm = ALLOC_IVT_READ; 8935 8938 … … 8945 8948 } 8946 8949 break; 8947 8948 8950 } 8951 ////////////////////////// 8949 8952 case ALLOC_IVT_CONFIG: // allocated to CONFIG FSM 8953 { 8950 8954 if (r_config_fsm.read() != CONFIG_IVT_LOCK) 8951 8955 { … … 8955 8959 r_alloc_ivt_fsm = ALLOC_IVT_WRITE; 8956 8960 8957 else if (r_read_fsm.read() == READ_IVT_LOCK)8961 else if (r_read_fsm.read() == READ_IVT_LOCK) 8958 8962 r_alloc_ivt_fsm = ALLOC_IVT_READ; 8959 8963 … … 8965 8969 r_alloc_ivt_fsm = ALLOC_IVT_CLEANUP; 8966 8970 8967 else if (r_cas_fsm.read() == CAS_BC_IVT_LOCK)8971 else if (r_cas_fsm.read() == CAS_BC_IVT_LOCK) 8968 8972 r_alloc_ivt_fsm = ALLOC_IVT_CAS; 8969 8973 else … … 8971 8975 } 8972 8976 break; 8977 } 8973 8978 } // end switch r_alloc_ivt_fsm 8974 8979 … … 8982 8987 ///////////////////////////////////////////////////////////////////////////////////// 8983 8988 8984 switch (r_alloc_dir_fsm.read())8989 switch (r_alloc_dir_fsm.read()) 8985 8990 { 8986 8991 ///////////////////// 8987 8992 case ALLOC_DIR_RESET: // Initializes the directory one SET per cycle. 8988 // All the WAYS of a SET initialized in parallel 8993 // All the WAYS of a SET initialized in parallel 8994 { 8989 8995 8990 8996 r_alloc_dir_reset_cpt.write(r_alloc_dir_reset_cpt.read() + 1); 8991 8997 8992 if (r_alloc_dir_reset_cpt.read() == (m_sets - 1))8998 if (r_alloc_dir_reset_cpt.read() == (m_sets - 1)) 8993 8999 { 8994 9000 m_cache_directory.init(); … … 8996 9002 } 8997 9003 break; 8998 8999 9004 } 9005 ////////////////////// 9000 9006 case ALLOC_DIR_CONFIG: // allocated to CONFIG FSM 9001 if ( (r_config_fsm.read() != CONFIG_DIR_REQ) and 9002 (r_config_fsm.read() != CONFIG_DIR_ACCESS) and 9003 (r_config_fsm.read() != CONFIG_TRT_LOCK) and 9004 (r_config_fsm.read() != CONFIG_TRT_SET) and 9005 (r_config_fsm.read() != CONFIG_IVT_LOCK) ) 9006 { 9007 if(r_read_fsm.read() == READ_DIR_REQ) 9007 { 9008 if ((r_config_fsm.read() != CONFIG_DIR_REQ) and 9009 (r_config_fsm.read() != CONFIG_DIR_ACCESS) and 9010 (r_config_fsm.read() != CONFIG_TRT_LOCK) and 9011 (r_config_fsm.read() != CONFIG_TRT_SET) and 9012 (r_config_fsm.read() != CONFIG_IVT_LOCK)) 9013 { 9014 if (r_read_fsm.read() == READ_DIR_REQ) 9008 9015 r_alloc_dir_fsm = ALLOC_DIR_READ; 9009 9016 9010 else if (r_write_fsm.read() == WRITE_DIR_REQ)9017 else if (r_write_fsm.read() == WRITE_DIR_REQ) 9011 9018 r_alloc_dir_fsm = ALLOC_DIR_WRITE; 9012 9019 9013 else if (r_cas_fsm.read() == CAS_DIR_REQ)9020 else if (r_cas_fsm.read() == CAS_DIR_REQ) 9014 9021 r_alloc_dir_fsm = ALLOC_DIR_CAS; 9015 9022 9016 else if (r_cleanup_fsm.read() == CLEANUP_DIR_REQ)9023 else if (r_cleanup_fsm.read() == CLEANUP_DIR_REQ) 9017 9024 r_alloc_dir_fsm = ALLOC_DIR_CLEANUP; 9018 9025 9019 else if (r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK)9026 else if (r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) 9020 9027 r_alloc_dir_fsm = ALLOC_DIR_XRAM_RSP; 9021 9028 } 9022 9029 break; 9023 9024 9030 } 9031 //////////////////// 9025 9032 case ALLOC_DIR_READ: // allocated to READ FSM 9026 if( ((r_read_fsm.read() != READ_DIR_REQ) and 9027 (r_read_fsm.read() != READ_DIR_LOCK) and 9028 (r_read_fsm.read() != READ_TRT_LOCK) and 9029 (r_read_fsm.read() != READ_HEAP_REQ) and 9030 (r_read_fsm.read() != READ_IVT_LOCK)) 9031 or 9032 ((r_read_fsm.read() == READ_TRT_LOCK) and 9033 (r_alloc_trt_fsm.read() == ALLOC_TRT_READ)) ) 9034 { 9035 if(r_write_fsm.read() == WRITE_DIR_REQ) 9033 { 9034 if (((r_read_fsm.read() != READ_DIR_REQ) and 9035 (r_read_fsm.read() != READ_DIR_LOCK) and 9036 (r_read_fsm.read() != READ_TRT_LOCK) and 9037 (r_read_fsm.read() != READ_HEAP_REQ) and 9038 (r_read_fsm.read() != READ_IVT_LOCK)) 9039 or 9040 ((r_read_fsm.read() == READ_TRT_LOCK) and 9041 (r_alloc_trt_fsm.read() == ALLOC_TRT_READ))) 9042 { 9043 if (r_write_fsm.read() == WRITE_DIR_REQ) 9036 9044 r_alloc_dir_fsm = ALLOC_DIR_WRITE; 9037 9045 9038 else if (r_cas_fsm.read() == CAS_DIR_REQ)9046 else if (r_cas_fsm.read() == CAS_DIR_REQ) 9039 9047 r_alloc_dir_fsm = ALLOC_DIR_CAS; 9040 9048 9041 else if (r_cleanup_fsm.read() == CLEANUP_DIR_REQ)9049 else if (r_cleanup_fsm.read() == CLEANUP_DIR_REQ) 9042 9050 r_alloc_dir_fsm = ALLOC_DIR_CLEANUP; 9043 9051 9044 else if (r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK)9052 else if (r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) 9045 9053 r_alloc_dir_fsm = ALLOC_DIR_XRAM_RSP; 9046 9054 9047 else if (r_config_fsm.read() == CONFIG_DIR_REQ)9055 else if (r_config_fsm.read() == CONFIG_DIR_REQ) 9048 9056 r_alloc_dir_fsm = ALLOC_DIR_CONFIG; 9049 9057 … … 9052 9060 } 9053 9061 break; 9054 9055 9062 } 9063 ///////////////////// 9056 9064 case ALLOC_DIR_WRITE: 9057 if(((r_write_fsm.read() != WRITE_DIR_REQ) and 9058 (r_write_fsm.read() != WRITE_DIR_LOCK) and 9059 (r_write_fsm.read() != WRITE_BC_DIR_READ) and 9060 (r_write_fsm.read() != WRITE_DIR_HIT) and 9061 (r_write_fsm.read() != WRITE_BC_TRT_LOCK) and 9062 (r_write_fsm.read() != WRITE_BC_IVT_LOCK) and 9063 (r_write_fsm.read() != WRITE_MISS_IVT_LOCK) and 9064 (r_write_fsm.read() != WRITE_MISS_TRT_LOCK) and 9065 (r_write_fsm.read() != WRITE_UPT_LOCK) and 9066 (r_write_fsm.read() != WRITE_UPT_HEAP_LOCK) and 9067 (r_write_fsm.read() != WRITE_IVT_LOCK_HIT_WB)) 9068 or 9069 ((r_write_fsm.read() == WRITE_UPT_HEAP_LOCK) and 9070 (r_alloc_heap_fsm.read() == ALLOC_HEAP_WRITE)) 9071 or 9072 ((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) and 9073 (r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE))) 9074 { 9075 if(r_cas_fsm.read() == CAS_DIR_REQ) 9065 { 9066 if (((r_write_fsm.read() != WRITE_DIR_REQ) and 9067 (r_write_fsm.read() != WRITE_DIR_LOCK) and 9068 (r_write_fsm.read() != WRITE_BC_DIR_READ) and 9069 (r_write_fsm.read() != WRITE_DIR_HIT) and 9070 (r_write_fsm.read() != WRITE_BC_TRT_LOCK) and 9071 (r_write_fsm.read() != WRITE_BC_IVT_LOCK) and 9072 (r_write_fsm.read() != WRITE_MISS_IVT_LOCK) and 9073 (r_write_fsm.read() != WRITE_MISS_TRT_LOCK) and 9074 (r_write_fsm.read() != WRITE_UPT_LOCK) and 9075 (r_write_fsm.read() != WRITE_UPT_HEAP_LOCK) and 9076 (r_write_fsm.read() != WRITE_IVT_LOCK_HIT_WB)) 9077 or 9078 ((r_write_fsm.read() == WRITE_UPT_HEAP_LOCK) and 9079 (r_alloc_heap_fsm.read() == ALLOC_HEAP_WRITE)) 9080 or 9081 ((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) and 9082 (r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE))) 9083 { 9084 if (r_cas_fsm.read() == CAS_DIR_REQ) 9076 9085 r_alloc_dir_fsm = ALLOC_DIR_CAS; 9077 9086 9078 else if (r_cleanup_fsm.read() == CLEANUP_DIR_REQ)9087 else if (r_cleanup_fsm.read() == CLEANUP_DIR_REQ) 9079 9088 r_alloc_dir_fsm = ALLOC_DIR_CLEANUP; 9080 9089 9081 else if (r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK)9090 else if (r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) 9082 9091 r_alloc_dir_fsm = ALLOC_DIR_XRAM_RSP; 9083 9092 9084 else if (r_config_fsm.read() == CONFIG_DIR_REQ)9093 else if (r_config_fsm.read() == CONFIG_DIR_REQ) 9085 9094 r_alloc_dir_fsm = ALLOC_DIR_CONFIG; 9086 9095 9087 else if (r_read_fsm.read() == READ_DIR_REQ)9096 else if (r_read_fsm.read() == READ_DIR_REQ) 9088 9097 r_alloc_dir_fsm = ALLOC_DIR_READ; 9089 9098 … … 9092 9101 } 9093 9102 break; 9094 9095 9103 } 9104 /////////////////// 9096 9105 case ALLOC_DIR_CAS: // allocated to CAS FSM 9097 if(((r_cas_fsm.read() != CAS_DIR_REQ) and 9098 (r_cas_fsm.read() != CAS_DIR_LOCK) and 9099 (r_cas_fsm.read() != CAS_DIR_HIT_READ) and 9100 (r_cas_fsm.read() != CAS_DIR_HIT_COMPARE) and 9101 (r_cas_fsm.read() != CAS_DIR_HIT_WRITE) and 9102 (r_cas_fsm.read() != CAS_BC_TRT_LOCK) and 9103 (r_cas_fsm.read() != CAS_BC_IVT_LOCK) and 9104 (r_cas_fsm.read() != CAS_MISS_TRT_LOCK) and 9105 (r_cas_fsm.read() != CAS_UPT_LOCK) and 9106 (r_cas_fsm.read() != CAS_UPT_HEAP_LOCK)) 9107 or 9108 ((r_cas_fsm.read() == CAS_UPT_HEAP_LOCK) and 9109 (r_alloc_heap_fsm.read() == ALLOC_HEAP_CAS)) 9110 or 9111 ((r_cas_fsm.read() == CAS_MISS_TRT_LOCK) and 9112 (r_alloc_trt_fsm.read() == ALLOC_TRT_CAS))) 9113 { 9114 if(r_cleanup_fsm.read() == CLEANUP_DIR_REQ) 9106 { 9107 if (((r_cas_fsm.read() != CAS_DIR_REQ) and 9108 (r_cas_fsm.read() != CAS_DIR_LOCK) and 9109 (r_cas_fsm.read() != CAS_DIR_HIT_READ) and 9110 (r_cas_fsm.read() != CAS_DIR_HIT_COMPARE) and 9111 (r_cas_fsm.read() != CAS_DIR_HIT_WRITE) and 9112 (r_cas_fsm.read() != CAS_BC_TRT_LOCK) and 9113 (r_cas_fsm.read() != CAS_BC_IVT_LOCK) and 9114 (r_cas_fsm.read() != CAS_MISS_TRT_LOCK) and 9115 (r_cas_fsm.read() != CAS_UPT_LOCK) and 9116 (r_cas_fsm.read() != CAS_UPT_HEAP_LOCK)) 9117 or 9118 ((r_cas_fsm.read() == CAS_UPT_HEAP_LOCK) and 9119 (r_alloc_heap_fsm.read() == ALLOC_HEAP_CAS)) 9120 or 9121 ((r_cas_fsm.read() == CAS_MISS_TRT_LOCK) and 9122 (r_alloc_trt_fsm.read() == ALLOC_TRT_CAS))) 9123 { 9124 if (r_cleanup_fsm.read() == CLEANUP_DIR_REQ) 9115 9125 r_alloc_dir_fsm = ALLOC_DIR_CLEANUP; 9116 9126 9117 else if (r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK)9127 else if (r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) 9118 9128 r_alloc_dir_fsm = ALLOC_DIR_XRAM_RSP; 9119 9129 9120 else if (r_config_fsm.read() == CONFIG_DIR_REQ)9130 else if (r_config_fsm.read() == CONFIG_DIR_REQ) 9121 9131 r_alloc_dir_fsm = ALLOC_DIR_CONFIG; 9122 9132 9123 else if (r_read_fsm.read() == READ_DIR_REQ)9133 else if (r_read_fsm.read() == READ_DIR_REQ) 9124 9134 r_alloc_dir_fsm = ALLOC_DIR_READ; 9125 9135 9126 else if (r_write_fsm.read() == WRITE_DIR_REQ)9136 else if (r_write_fsm.read() == WRITE_DIR_REQ) 9127 9137 r_alloc_dir_fsm = ALLOC_DIR_WRITE; 9128 9138 … … 9131 9141 } 9132 9142 break; 9133 9134 9143 } 9144 /////////////////////// 9135 9145 case ALLOC_DIR_CLEANUP: // allocated to CLEANUP FSM 9136 if((r_cleanup_fsm.read() != CLEANUP_DIR_REQ) and 9137 (r_cleanup_fsm.read() != CLEANUP_DIR_LOCK) and 9138 (r_cleanup_fsm.read() != CLEANUP_HEAP_REQ) and 9139 (r_cleanup_fsm.read() != CLEANUP_HEAP_LOCK) and 9140 (r_cleanup_fsm.read() != CLEANUP_IVT_LOCK_DATA)) 9141 { 9142 if(r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) 9146 { 9147 if ((r_cleanup_fsm.read() != CLEANUP_DIR_REQ) and 9148 (r_cleanup_fsm.read() != CLEANUP_DIR_LOCK) and 9149 (r_cleanup_fsm.read() != CLEANUP_HEAP_REQ) and 9150 (r_cleanup_fsm.read() != CLEANUP_HEAP_LOCK) and 9151 (r_cleanup_fsm.read() != CLEANUP_IVT_LOCK_DATA)) 9152 { 9153 if (r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) 9143 9154 r_alloc_dir_fsm = ALLOC_DIR_XRAM_RSP; 9144 9155 9145 else if (r_config_fsm.read() == CONFIG_DIR_REQ)9156 else if (r_config_fsm.read() == CONFIG_DIR_REQ) 9146 9157 r_alloc_dir_fsm = ALLOC_DIR_CONFIG; 9147 9158 9148 else if (r_read_fsm.read() == READ_DIR_REQ)9159 else if (r_read_fsm.read() == READ_DIR_REQ) 9149 9160 r_alloc_dir_fsm = ALLOC_DIR_READ; 9150 9161 9151 else if (r_write_fsm.read() == WRITE_DIR_REQ)9162 else if (r_write_fsm.read() == WRITE_DIR_REQ) 9152 9163 r_alloc_dir_fsm = ALLOC_DIR_WRITE; 9153 9164 9154 else if (r_cas_fsm.read() == CAS_DIR_REQ)9165 else if (r_cas_fsm.read() == CAS_DIR_REQ) 9155 9166 r_alloc_dir_fsm = ALLOC_DIR_CAS; 9156 9167 … … 9159 9170 } 9160 9171 break; 9161 9162 9172 } 9173 //////////////////////// 9163 9174 case ALLOC_DIR_XRAM_RSP: // allocated to XRAM_RSP FSM 9164 if( (r_xram_rsp_fsm.read() != XRAM_RSP_DIR_LOCK) and 9165 (r_xram_rsp_fsm.read() != XRAM_RSP_TRT_COPY) and 9166 (r_xram_rsp_fsm.read() != XRAM_RSP_IVT_LOCK)) 9167 { 9168 if(r_config_fsm.read() == CONFIG_DIR_REQ) 9175 { 9176 if ((r_xram_rsp_fsm.read() != XRAM_RSP_DIR_LOCK) and 9177 (r_xram_rsp_fsm.read() != XRAM_RSP_TRT_COPY) and 9178 (r_xram_rsp_fsm.read() != XRAM_RSP_IVT_LOCK)) 9179 { 9180 if (r_config_fsm.read() == CONFIG_DIR_REQ) 9169 9181 r_alloc_dir_fsm = ALLOC_DIR_CONFIG; 9170 9182 9171 else if (r_read_fsm.read() == READ_DIR_REQ)9183 else if (r_read_fsm.read() == READ_DIR_REQ) 9172 9184 r_alloc_dir_fsm = ALLOC_DIR_READ; 9173 9185 9174 else if (r_write_fsm.read() == WRITE_DIR_REQ)9186 else if (r_write_fsm.read() == WRITE_DIR_REQ) 9175 9187 r_alloc_dir_fsm = ALLOC_DIR_WRITE; 9176 9188 9177 else if (r_cas_fsm.read() == CAS_DIR_REQ)9189 else if (r_cas_fsm.read() == CAS_DIR_REQ) 9178 9190 r_alloc_dir_fsm = ALLOC_DIR_CAS; 9179 9191 9180 else if (r_cleanup_fsm.read() == CLEANUP_DIR_REQ)9192 else if (r_cleanup_fsm.read() == CLEANUP_DIR_REQ) 9181 9193 r_alloc_dir_fsm = ALLOC_DIR_CLEANUP; 9182 9194 … … 9185 9197 } 9186 9198 break; 9187 9199 } 9188 9200 } // end switch alloc_dir_fsm 9189 9201 … … 9192 9204 //////////////////////////////////////////////////////////////////////////////////// 9193 9205 // The ALLOC_TRT fsm allocates the access to the Transaction Table (write buffer) 9194 // with a round robin priority between 4user FSMs :9195 // The cyclic priority is READ > WRITE > CAS > XRAM_RSP9206 // with a round robin priority between 8 user FSMs : 9207 // The priority is READ > WRITE > CAS > IXR_CMD > XRAM_RSP > IXR_RSP > CONFIG > CLEANUP 9196 9208 // The ressource is always allocated. 9197 9209 /////////////////////////////////////////////////////////////////////////////////// 9198 9210 9199 switch (r_alloc_trt_fsm.read())9211 switch (r_alloc_trt_fsm.read()) 9200 9212 { 9201 9213 //////////////////// 9202 9214 case ALLOC_TRT_READ: 9203 if(r_read_fsm.read() != READ_TRT_LOCK) 9204 { 9205 if((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) or 9215 { 9216 if (r_read_fsm.read() != READ_TRT_LOCK) 9217 { 9218 if ((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) or 9206 9219 (r_write_fsm.read() == WRITE_BC_TRT_LOCK)) 9207 9220 r_alloc_trt_fsm = ALLOC_TRT_WRITE; 9208 9221 9209 else if ((r_cas_fsm.read() == CAS_MISS_TRT_LOCK) or9222 else if ((r_cas_fsm.read() == CAS_MISS_TRT_LOCK) or 9210 9223 (r_cas_fsm.read() == CAS_BC_TRT_LOCK)) 9211 9224 r_alloc_trt_fsm = ALLOC_TRT_CAS; 9212 9225 9213 else if((r_ixr_cmd_fsm.read() == IXR_CMD_READ_TRT) or 9226 else if ((r_ixr_cmd_fsm.read() == IXR_CMD_READ_TRT) or 9227 (r_ixr_cmd_fsm.read() == IXR_CMD_WRITE_TRT) or 9228 (r_ixr_cmd_fsm.read() == IXR_CMD_CAS_TRT) or 9229 (r_ixr_cmd_fsm.read() == IXR_CMD_XRAM_TRT) or 9230 (r_ixr_cmd_fsm.read() == IXR_CMD_CLEANUP_TRT) or 9231 (r_ixr_cmd_fsm.read() == IXR_CMD_CONFIG_TRT)) 9232 r_alloc_trt_fsm = ALLOC_TRT_IXR_CMD; 9233 9234 else if ((r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) and 9235 (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP)) 9236 r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; 9237 9238 else if ((r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) or 9239 (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ)) 9240 r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; 9241 9242 else if (r_config_fsm.read() == CONFIG_TRT_LOCK) 9243 r_alloc_trt_fsm = ALLOC_TRT_CONFIG; 9244 9245 else if (r_cleanup_fsm.read() == CLEANUP_IXR_REQ) 9246 r_alloc_trt_fsm = ALLOC_TRT_CLEANUP; 9247 9248 else 9249 m_cpt_trt_unused++; 9250 } 9251 break; 9252 } 9253 ///////////////////// 9254 case ALLOC_TRT_WRITE: 9255 { 9256 if ((r_write_fsm.read() != WRITE_MISS_TRT_LOCK) and 9257 (r_write_fsm.read() != WRITE_BC_TRT_LOCK) and 9258 (r_write_fsm.read() != WRITE_BC_IVT_LOCK)) 9259 { 9260 if ((r_cas_fsm.read() == CAS_MISS_TRT_LOCK) or 9261 (r_cas_fsm.read() == CAS_BC_TRT_LOCK)) 9262 r_alloc_trt_fsm = ALLOC_TRT_CAS; 9263 9264 else if ((r_ixr_cmd_fsm.read() == IXR_CMD_READ_TRT) or 9265 (r_ixr_cmd_fsm.read() == IXR_CMD_WRITE_TRT) or 9266 (r_ixr_cmd_fsm.read() == IXR_CMD_CAS_TRT) or 9267 (r_ixr_cmd_fsm.read() == IXR_CMD_XRAM_TRT) or 9268 (r_ixr_cmd_fsm.read() == IXR_CMD_CLEANUP_TRT) or 9269 (r_ixr_cmd_fsm.read() == IXR_CMD_CONFIG_TRT)) 9270 r_alloc_trt_fsm = ALLOC_TRT_IXR_CMD; 9271 9272 else if ((r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) and 9273 (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP)) 9274 r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; 9275 9276 else if ((r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) or 9277 (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ)) 9278 r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; 9279 9280 else if (r_config_fsm.read() == CONFIG_TRT_LOCK) 9281 r_alloc_trt_fsm = ALLOC_TRT_CONFIG; 9282 9283 else if (r_cleanup_fsm.read() == CLEANUP_IXR_REQ) 9284 r_alloc_trt_fsm = ALLOC_TRT_CLEANUP; 9285 9286 else if (r_read_fsm.read() == READ_TRT_LOCK) 9287 r_alloc_trt_fsm = ALLOC_TRT_READ; 9288 9289 else 9290 m_cpt_trt_unused++; 9291 } 9292 break; 9293 } 9294 //////////////////// 9295 case ALLOC_TRT_CAS: 9296 { 9297 if ((r_cas_fsm.read() != CAS_MISS_TRT_LOCK) and 9298 (r_cas_fsm.read() != CAS_BC_TRT_LOCK) and 9299 (r_cas_fsm.read() != CAS_BC_IVT_LOCK)) 9300 { 9301 if ((r_ixr_cmd_fsm.read() == IXR_CMD_READ_TRT) or 9214 9302 (r_ixr_cmd_fsm.read() == IXR_CMD_WRITE_TRT) or 9215 9303 (r_ixr_cmd_fsm.read() == IXR_CMD_CAS_TRT) or … … 9219 9307 r_alloc_trt_fsm = ALLOC_TRT_IXR_CMD; 9220 9308 9221 else if((r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) and 9309 else if ((r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) and 9310 (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP)) 9311 r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; 9312 9313 else if ((r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) or 9314 (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ)) 9315 r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; 9316 9317 else if (r_config_fsm.read() == CONFIG_TRT_LOCK) 9318 r_alloc_trt_fsm = ALLOC_TRT_CONFIG; 9319 9320 else if (r_cleanup_fsm.read() == CLEANUP_IXR_REQ) 9321 r_alloc_trt_fsm = ALLOC_TRT_CLEANUP; 9322 9323 else if (r_read_fsm.read() == READ_TRT_LOCK) 9324 r_alloc_trt_fsm = ALLOC_TRT_READ; 9325 9326 else if ((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) or 9327 (r_write_fsm.read() == WRITE_BC_TRT_LOCK)) 9328 r_alloc_trt_fsm = ALLOC_TRT_WRITE; 9329 9330 else 9331 m_cpt_trt_unused++; 9332 } 9333 break; 9334 } 9335 /////////////////////// 9336 case ALLOC_TRT_IXR_CMD: 9337 { 9338 if ((r_ixr_cmd_fsm.read() != IXR_CMD_READ_TRT) and 9339 (r_ixr_cmd_fsm.read() != IXR_CMD_WRITE_TRT) and 9340 (r_ixr_cmd_fsm.read() != IXR_CMD_CAS_TRT) and 9341 (r_ixr_cmd_fsm.read() != IXR_CMD_XRAM_TRT) and 9342 (r_ixr_cmd_fsm.read() != IXR_CMD_CLEANUP_TRT) and 9343 (r_ixr_cmd_fsm.read() != IXR_CMD_CONFIG_TRT)) 9344 { 9345 if ((r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) and 9222 9346 (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP)) 9223 9347 r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; 9224 9348 9225 else if ((r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) or9349 else if ((r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) or 9226 9350 (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ)) 9227 9351 r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; 9228 9352 9229 else if ( r_config_fsm.read() == CONFIG_TRT_LOCK)9353 else if (r_config_fsm.read() == CONFIG_TRT_LOCK) 9230 9354 r_alloc_trt_fsm = ALLOC_TRT_CONFIG; 9231 9355 … … 9233 9357 r_alloc_trt_fsm = ALLOC_TRT_CLEANUP; 9234 9358 9235 else 9236 m_cpt_trt_unused++; 9237 } 9238 break; 9239 9240 ///////////////////// 9241 case ALLOC_TRT_WRITE: 9242 if((r_write_fsm.read() != WRITE_MISS_TRT_LOCK) and 9243 (r_write_fsm.read() != WRITE_BC_TRT_LOCK) and 9244 (r_write_fsm.read() != WRITE_BC_IVT_LOCK)) 9245 { 9246 if((r_cas_fsm.read() == CAS_MISS_TRT_LOCK) or 9359 else if (r_read_fsm.read() == READ_TRT_LOCK) 9360 r_alloc_trt_fsm = ALLOC_TRT_READ; 9361 9362 else if ((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) or 9363 (r_write_fsm.read() == WRITE_BC_TRT_LOCK)) 9364 r_alloc_trt_fsm = ALLOC_TRT_WRITE; 9365 9366 else if ((r_cas_fsm.read() == CAS_MISS_TRT_LOCK) or 9247 9367 (r_cas_fsm.read() == CAS_BC_TRT_LOCK)) 9248 9368 r_alloc_trt_fsm = ALLOC_TRT_CAS; 9249 9250 else if((r_ixr_cmd_fsm.read() == IXR_CMD_READ_TRT) or 9251 (r_ixr_cmd_fsm.read() == IXR_CMD_WRITE_TRT) or 9252 (r_ixr_cmd_fsm.read() == IXR_CMD_CAS_TRT) or 9253 (r_ixr_cmd_fsm.read() == IXR_CMD_XRAM_TRT) or 9254 (r_ixr_cmd_fsm.read() == IXR_CMD_CLEANUP_TRT) or 9255 (r_ixr_cmd_fsm.read() == IXR_CMD_CONFIG_TRT) ) 9256 r_alloc_trt_fsm = ALLOC_TRT_IXR_CMD; 9257 9258 else if((r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) and 9259 (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP)) 9260 r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; 9261 9262 else if((r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) or 9263 (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ)) 9264 r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; 9265 9266 else if( r_config_fsm.read() == CONFIG_TRT_LOCK ) 9267 r_alloc_trt_fsm = ALLOC_TRT_CONFIG; 9268 9269 else if (r_cleanup_fsm.read() == CLEANUP_IXR_REQ) 9270 r_alloc_trt_fsm = ALLOC_TRT_CLEANUP; 9271 9272 else if(r_read_fsm.read() == READ_TRT_LOCK) 9273 r_alloc_trt_fsm = ALLOC_TRT_READ; 9274 9275 else 9276 m_cpt_trt_unused++; 9277 } 9278 break; 9279 9280 //////////////////// 9281 case ALLOC_TRT_CAS: 9282 if((r_cas_fsm.read() != CAS_MISS_TRT_LOCK) and 9283 (r_cas_fsm.read() != CAS_BC_TRT_LOCK) and 9284 (r_cas_fsm.read() != CAS_BC_IVT_LOCK)) 9285 { 9286 if((r_ixr_cmd_fsm.read() == IXR_CMD_READ_TRT) or 9287 (r_ixr_cmd_fsm.read() == IXR_CMD_WRITE_TRT) or 9288 (r_ixr_cmd_fsm.read() == IXR_CMD_CAS_TRT) or 9289 (r_ixr_cmd_fsm.read() == IXR_CMD_XRAM_TRT) or 9290 (r_ixr_cmd_fsm.read() == IXR_CMD_CLEANUP_TRT) or 9291 (r_ixr_cmd_fsm.read() == IXR_CMD_CONFIG_TRT) ) 9292 r_alloc_trt_fsm = ALLOC_TRT_IXR_CMD; 9293 9294 else if((r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) and 9295 (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP)) 9296 r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; 9297 9298 else if((r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) or 9299 (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ)) 9300 r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; 9301 9302 else if( r_config_fsm.read() == CONFIG_TRT_LOCK ) 9303 r_alloc_trt_fsm = ALLOC_TRT_CONFIG; 9304 9305 else if (r_cleanup_fsm.read() == CLEANUP_IXR_REQ) 9306 r_alloc_trt_fsm = ALLOC_TRT_CLEANUP; 9307 9308 else if(r_read_fsm.read() == READ_TRT_LOCK) 9309 r_alloc_trt_fsm = ALLOC_TRT_READ; 9310 9311 else if((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) or 9312 (r_write_fsm.read() == WRITE_BC_TRT_LOCK)) 9313 r_alloc_trt_fsm = ALLOC_TRT_WRITE; 9314 9315 else 9316 m_cpt_trt_unused++; 9317 } 9318 break; 9319 /////////////////////// 9320 case ALLOC_TRT_IXR_CMD: 9321 if((r_ixr_cmd_fsm.read() != IXR_CMD_READ_TRT) and 9322 (r_ixr_cmd_fsm.read() != IXR_CMD_WRITE_TRT) and 9323 (r_ixr_cmd_fsm.read() != IXR_CMD_CAS_TRT) and 9324 (r_ixr_cmd_fsm.read() != IXR_CMD_XRAM_TRT) and 9325 (r_ixr_cmd_fsm.read() != IXR_CMD_CLEANUP_TRT) and 9326 (r_ixr_cmd_fsm.read() != IXR_CMD_CONFIG_TRT)) 9327 { 9328 if((r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) and 9329 (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP)) 9330 r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; 9331 9332 else if((r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) or 9333 (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ)) 9334 r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; 9335 9336 else if( r_config_fsm.read() == CONFIG_TRT_LOCK ) 9337 r_alloc_trt_fsm = ALLOC_TRT_CONFIG; 9338 9339 else if (r_cleanup_fsm.read() == CLEANUP_IXR_REQ) 9340 r_alloc_trt_fsm = ALLOC_TRT_CLEANUP; 9341 9342 else if(r_read_fsm.read() == READ_TRT_LOCK) 9343 r_alloc_trt_fsm = ALLOC_TRT_READ; 9344 9345 else if((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) or 9346 (r_write_fsm.read() == WRITE_BC_TRT_LOCK)) 9347 r_alloc_trt_fsm = ALLOC_TRT_WRITE; 9348 9349 else if((r_cas_fsm.read() == CAS_MISS_TRT_LOCK) or 9350 (r_cas_fsm.read() == CAS_BC_TRT_LOCK)) 9351 r_alloc_trt_fsm = ALLOC_TRT_CAS; 9352 } 9353 break; 9354 9355 //////////////////////// 9369 } 9370 break; 9371 } 9372 //////////////////////// 9356 9373 case ALLOC_TRT_XRAM_RSP: 9357 if(((r_xram_rsp_fsm.read() != XRAM_RSP_DIR_LOCK) or 9374 { 9375 if (((r_xram_rsp_fsm.read() != XRAM_RSP_DIR_LOCK) or 9358 9376 (r_alloc_dir_fsm.read() != ALLOC_DIR_XRAM_RSP)) and 9359 9377 (r_xram_rsp_fsm.read() != XRAM_RSP_TRT_COPY) and … … 9361 9379 (r_xram_rsp_fsm.read() != XRAM_RSP_IVT_LOCK)) 9362 9380 { 9363 if ((r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) or9381 if ((r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) or 9364 9382 (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ)) 9365 9383 r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; 9366 9384 9367 else if ( r_config_fsm.read() == CONFIG_TRT_LOCK)9385 else if (r_config_fsm.read() == CONFIG_TRT_LOCK) 9368 9386 r_alloc_trt_fsm = ALLOC_TRT_CONFIG; 9369 9387 … … 9371 9389 r_alloc_trt_fsm = ALLOC_TRT_CLEANUP; 9372 9390 9373 else if (r_read_fsm.read() == READ_TRT_LOCK)9391 else if (r_read_fsm.read() == READ_TRT_LOCK) 9374 9392 r_alloc_trt_fsm = ALLOC_TRT_READ; 9375 9393 9376 else if ((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) or9394 else if ((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) or 9377 9395 (r_write_fsm.read() == WRITE_BC_TRT_LOCK)) 9378 9396 r_alloc_trt_fsm = ALLOC_TRT_WRITE; 9379 9397 9380 else if ((r_cas_fsm.read() == CAS_MISS_TRT_LOCK) or9398 else if ((r_cas_fsm.read() == CAS_MISS_TRT_LOCK) or 9381 9399 (r_cas_fsm.read() == CAS_BC_TRT_LOCK)) 9382 9400 r_alloc_trt_fsm = ALLOC_TRT_CAS; 9383 9401 9384 else if ((r_ixr_cmd_fsm.read() == IXR_CMD_READ_TRT) or9385 (r_ixr_cmd_fsm.read() == IXR_CMD_WRITE_TRT) or9386 (r_ixr_cmd_fsm.read() == IXR_CMD_CAS_TRT) or9387 (r_ixr_cmd_fsm.read() == IXR_CMD_XRAM_TRT) or9388 (r_ixr_cmd_fsm.read() == IXR_CMD_CLEANUP_TRT) or9389 (r_ixr_cmd_fsm.read() == IXR_CMD_CONFIG_TRT) )9402 else if ((r_ixr_cmd_fsm.read() == IXR_CMD_READ_TRT) or 9403 (r_ixr_cmd_fsm.read() == IXR_CMD_WRITE_TRT) or 9404 (r_ixr_cmd_fsm.read() == IXR_CMD_CAS_TRT) or 9405 (r_ixr_cmd_fsm.read() == IXR_CMD_XRAM_TRT) or 9406 (r_ixr_cmd_fsm.read() == IXR_CMD_CLEANUP_TRT) or 9407 (r_ixr_cmd_fsm.read() == IXR_CMD_CONFIG_TRT) ) 9390 9408 r_alloc_trt_fsm = ALLOC_TRT_IXR_CMD; 9391 9409 … … 9394 9412 } 9395 9413 break; 9396 9397 9414 } 9415 //////////////////////// 9398 9416 case ALLOC_TRT_IXR_RSP: 9399 if((r_ixr_rsp_fsm.read() != IXR_RSP_TRT_ERASE) and 9417 { 9418 if ((r_ixr_rsp_fsm.read() != IXR_RSP_TRT_ERASE) and 9400 9419 (r_ixr_rsp_fsm.read() != IXR_RSP_TRT_READ)) 9401 9420 { 9402 if ( r_config_fsm.read() == CONFIG_TRT_LOCK)9421 if (r_config_fsm.read() == CONFIG_TRT_LOCK) 9403 9422 r_alloc_trt_fsm = ALLOC_TRT_CONFIG; 9404 9423 … … 9406 9425 r_alloc_trt_fsm = ALLOC_TRT_CLEANUP; 9407 9426 9408 else if (r_read_fsm.read() == READ_TRT_LOCK)9427 else if (r_read_fsm.read() == READ_TRT_LOCK) 9409 9428 r_alloc_trt_fsm = ALLOC_TRT_READ; 9410 9429 9411 else if ((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) ||9430 else if ((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) or 9412 9431 (r_write_fsm.read() == WRITE_BC_TRT_LOCK)) 9413 9432 r_alloc_trt_fsm = ALLOC_TRT_WRITE; 9414 9433 9415 else if ((r_cas_fsm.read() == CAS_MISS_TRT_LOCK) ||9434 else if ((r_cas_fsm.read() == CAS_MISS_TRT_LOCK) or 9416 9435 (r_cas_fsm.read() == CAS_BC_TRT_LOCK)) 9417 9436 r_alloc_trt_fsm = ALLOC_TRT_CAS; 9418 9437 9419 else if ((r_ixr_cmd_fsm.read() == IXR_CMD_READ_TRT) or9420 (r_ixr_cmd_fsm.read() == IXR_CMD_WRITE_TRT) or9421 (r_ixr_cmd_fsm.read() == IXR_CMD_CAS_TRT) or9422 (r_ixr_cmd_fsm.read() == IXR_CMD_XRAM_TRT) or9423 (r_ixr_cmd_fsm.read() == IXR_CMD_CLEANUP_TRT) or9424 (r_ixr_cmd_fsm.read() == IXR_CMD_CONFIG_TRT) )9438 else if ((r_ixr_cmd_fsm.read() == IXR_CMD_READ_TRT) or 9439 (r_ixr_cmd_fsm.read() == IXR_CMD_WRITE_TRT) or 9440 (r_ixr_cmd_fsm.read() == IXR_CMD_CAS_TRT) or 9441 (r_ixr_cmd_fsm.read() == IXR_CMD_XRAM_TRT) or 9442 (r_ixr_cmd_fsm.read() == IXR_CMD_CLEANUP_TRT) or 9443 (r_ixr_cmd_fsm.read() == IXR_CMD_CONFIG_TRT) ) 9425 9444 r_alloc_trt_fsm = ALLOC_TRT_IXR_CMD; 9426 9445 9427 else if ((r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) &&9446 else if ((r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) and 9428 9447 (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP)) 9429 9448 r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; … … 9433 9452 } 9434 9453 break; 9435 9436 9454 } 9455 ////////////////////// 9437 9456 case ALLOC_TRT_CONFIG: 9438 if((r_config_fsm.read() != CONFIG_TRT_LOCK) and 9457 { 9458 if ((r_config_fsm.read() != CONFIG_TRT_LOCK) and 9439 9459 (r_config_fsm.read() != CONFIG_TRT_SET)) 9440 9460 { … … 9442 9462 r_alloc_trt_fsm = ALLOC_TRT_CLEANUP; 9443 9463 9444 else if (r_read_fsm.read() == READ_TRT_LOCK)9464 else if (r_read_fsm.read() == READ_TRT_LOCK) 9445 9465 r_alloc_trt_fsm = ALLOC_TRT_READ; 9446 9466 9447 else if ((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) or9467 else if ((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) or 9448 9468 (r_write_fsm.read() == WRITE_BC_TRT_LOCK)) 9449 9469 r_alloc_trt_fsm = ALLOC_TRT_WRITE; 9450 9470 9451 else if ((r_cas_fsm.read() == CAS_MISS_TRT_LOCK) or9471 else if ((r_cas_fsm.read() == CAS_MISS_TRT_LOCK) or 9452 9472 (r_cas_fsm.read() == CAS_BC_TRT_LOCK)) 9453 9473 r_alloc_trt_fsm = ALLOC_TRT_CAS; 9454 9474 9455 else if ((r_ixr_cmd_fsm.read() == IXR_CMD_READ_TRT) or9456 (r_ixr_cmd_fsm.read() == IXR_CMD_WRITE_TRT) or9457 (r_ixr_cmd_fsm.read() == IXR_CMD_CAS_TRT) or9458 (r_ixr_cmd_fsm.read() == IXR_CMD_XRAM_TRT) or9459 (r_ixr_cmd_fsm.read() == IXR_CMD_CLEANUP_TRT) or9460 (r_ixr_cmd_fsm.read() == IXR_CMD_CONFIG_TRT) )9475 else if ((r_ixr_cmd_fsm.read() == IXR_CMD_READ_TRT) or 9476 (r_ixr_cmd_fsm.read() == IXR_CMD_WRITE_TRT) or 9477 (r_ixr_cmd_fsm.read() == IXR_CMD_CAS_TRT) or 9478 (r_ixr_cmd_fsm.read() == IXR_CMD_XRAM_TRT) or 9479 (r_ixr_cmd_fsm.read() == IXR_CMD_CLEANUP_TRT) or 9480 (r_ixr_cmd_fsm.read() == IXR_CMD_CONFIG_TRT) ) 9461 9481 r_alloc_trt_fsm = ALLOC_TRT_IXR_CMD; 9462 9482 9463 else if ((r_xram_rsp_fsm.read()== XRAM_RSP_DIR_LOCK) and9483 else if ((r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) and 9464 9484 (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP)) 9465 9485 r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; 9466 9486 9467 else if ((r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) or9487 else if ((r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) or 9468 9488 (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ)) 9469 9489 r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; 9470 9490 } 9471 9491 break; 9472 9473 9492 } 9493 //////////////////////// 9474 9494 case ALLOC_TRT_CLEANUP: 9475 if(r_cleanup_fsm.read() != CLEANUP_IXR_REQ) 9476 { 9477 if(r_read_fsm.read() == READ_TRT_LOCK) 9495 { 9496 if (r_cleanup_fsm.read() != CLEANUP_IXR_REQ) 9497 { 9498 if (r_read_fsm.read() == READ_TRT_LOCK) 9478 9499 r_alloc_trt_fsm = ALLOC_TRT_READ; 9479 9500 9480 else if ((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) or9501 else if ((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) or 9481 9502 (r_write_fsm.read() == WRITE_BC_TRT_LOCK)) 9482 9503 r_alloc_trt_fsm = ALLOC_TRT_WRITE; 9483 9504 9484 else if ((r_cas_fsm.read() == CAS_MISS_TRT_LOCK) or9505 else if ((r_cas_fsm.read() == CAS_MISS_TRT_LOCK) or 9485 9506 (r_cas_fsm.read() == CAS_BC_TRT_LOCK)) 9486 9507 r_alloc_trt_fsm = ALLOC_TRT_CAS; 9487 9508 9488 else if ((r_ixr_cmd_fsm.read() == IXR_CMD_READ_TRT) or9489 (r_ixr_cmd_fsm.read() == IXR_CMD_WRITE_TRT) or9490 (r_ixr_cmd_fsm.read() == IXR_CMD_CAS_TRT) or9491 (r_ixr_cmd_fsm.read() == IXR_CMD_XRAM_TRT) or9492 (r_ixr_cmd_fsm.read() == IXR_CMD_CLEANUP_TRT) or9493 (r_ixr_cmd_fsm.read() == IXR_CMD_CONFIG_TRT))9509 else if ((r_ixr_cmd_fsm.read() == IXR_CMD_READ_TRT) or 9510 (r_ixr_cmd_fsm.read() == IXR_CMD_WRITE_TRT) or 9511 (r_ixr_cmd_fsm.read() == IXR_CMD_CAS_TRT) or 9512 (r_ixr_cmd_fsm.read() == IXR_CMD_XRAM_TRT) or 9513 (r_ixr_cmd_fsm.read() == IXR_CMD_CLEANUP_TRT) or 9514 (r_ixr_cmd_fsm.read() == IXR_CMD_CONFIG_TRT)) 9494 9515 r_alloc_trt_fsm = ALLOC_TRT_IXR_CMD; 9495 9516 9496 else if ((r_xram_rsp_fsm.read()== XRAM_RSP_DIR_LOCK) and9517 else if ((r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) and 9497 9518 (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP)) 9498 9519 r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; 9499 9520 9500 else if ((r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) ||9521 else if ((r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) or 9501 9522 (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ)) 9502 9523 r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; 9503 9524 9504 else if ( r_config_fsm.read() == CONFIG_TRT_LOCK)9525 else if (r_config_fsm.read() == CONFIG_TRT_LOCK) 9505 9526 r_alloc_trt_fsm = ALLOC_TRT_CONFIG; 9506 9527 } 9507 9528 break; 9508 9509 9529 } 9510 9530 } // end switch alloc_trt_fsm 9511 9531 … … 9519 9539 ///////////////////////////////////////////////////////////////////////////////////// 9520 9540 9521 switch (r_alloc_heap_fsm.read())9541 switch (r_alloc_heap_fsm.read()) 9522 9542 { 9523 9543 //////////////////// 9524 9544 case ALLOC_HEAP_RESET: 9545 { 9525 9546 // Initializes the heap one ENTRY each cycle. 9526 9547 9527 9548 r_alloc_heap_reset_cpt.write(r_alloc_heap_reset_cpt.read() + 1); 9528 9549 9529 if (r_alloc_heap_reset_cpt.read() == (m_heap_size-1))9550 if (r_alloc_heap_reset_cpt.read() == (m_heap_size - 1)) 9530 9551 { 9531 9552 m_heap.init(); … … 9534 9555 } 9535 9556 break; 9536 9537 9557 } 9558 //////////////////// 9538 9559 case ALLOC_HEAP_READ: 9539 if((r_read_fsm.read() != READ_HEAP_REQ) and 9540 (r_read_fsm.read() != READ_HEAP_LOCK) and 9541 (r_read_fsm.read() != READ_HEAP_ERASE)) 9542 { 9543 if(r_write_fsm.read() == WRITE_UPT_HEAP_LOCK) 9560 { 9561 if ((r_read_fsm.read() != READ_HEAP_REQ) and 9562 (r_read_fsm.read() != READ_HEAP_LOCK) and 9563 (r_read_fsm.read() != READ_HEAP_ERASE)) 9564 { 9565 if (r_write_fsm.read() == WRITE_UPT_HEAP_LOCK) 9544 9566 r_alloc_heap_fsm = ALLOC_HEAP_WRITE; 9545 9567 9546 else if (r_cas_fsm.read() == CAS_UPT_HEAP_LOCK)9568 else if (r_cas_fsm.read() == CAS_UPT_HEAP_LOCK) 9547 9569 r_alloc_heap_fsm = ALLOC_HEAP_CAS; 9548 9570 9549 else if (r_cleanup_fsm.read() == CLEANUP_HEAP_REQ)9571 else if (r_cleanup_fsm.read() == CLEANUP_HEAP_REQ) 9550 9572 r_alloc_heap_fsm = ALLOC_HEAP_CLEANUP; 9551 9573 9552 else if (r_xram_rsp_fsm.read() == XRAM_RSP_HEAP_REQ)9574 else if (r_xram_rsp_fsm.read() == XRAM_RSP_HEAP_REQ) 9553 9575 r_alloc_heap_fsm = ALLOC_HEAP_XRAM_RSP; 9554 9576 9555 else if (r_config_fsm.read() == CONFIG_HEAP_REQ)9577 else if (r_config_fsm.read() == CONFIG_HEAP_REQ) 9556 9578 r_alloc_heap_fsm = ALLOC_HEAP_CONFIG; 9557 9579 … … 9560 9582 } 9561 9583 break; 9562 9563 9584 } 9585 ///////////////////// 9564 9586 case ALLOC_HEAP_WRITE: 9565 if((r_write_fsm.read() != WRITE_UPT_HEAP_LOCK) and 9566 (r_write_fsm.read() != WRITE_UPT_REQ) and 9567 (r_write_fsm.read() != WRITE_UPT_NEXT)) 9568 { 9569 if(r_cas_fsm.read() == CAS_UPT_HEAP_LOCK) 9587 { 9588 if ((r_write_fsm.read() != WRITE_UPT_HEAP_LOCK) and 9589 (r_write_fsm.read() != WRITE_UPT_REQ) and 9590 (r_write_fsm.read() != WRITE_UPT_NEXT)) 9591 { 9592 if (r_cas_fsm.read() == CAS_UPT_HEAP_LOCK) 9570 9593 r_alloc_heap_fsm = ALLOC_HEAP_CAS; 9571 9594 9572 else if (r_cleanup_fsm.read() == CLEANUP_HEAP_REQ)9595 else if (r_cleanup_fsm.read() == CLEANUP_HEAP_REQ) 9573 9596 r_alloc_heap_fsm = ALLOC_HEAP_CLEANUP; 9574 9597 9575 else if (r_xram_rsp_fsm.read() == XRAM_RSP_HEAP_REQ)9598 else if (r_xram_rsp_fsm.read() == XRAM_RSP_HEAP_REQ) 9576 9599 r_alloc_heap_fsm = ALLOC_HEAP_XRAM_RSP; 9577 9600 9578 else if (r_config_fsm.read() == CONFIG_HEAP_REQ)9601 else if (r_config_fsm.read() == CONFIG_HEAP_REQ) 9579 9602 r_alloc_heap_fsm = ALLOC_HEAP_CONFIG; 9580 9603 9581 else if (r_read_fsm.read() == READ_HEAP_REQ)9604 else if (r_read_fsm.read() == READ_HEAP_REQ) 9582 9605 r_alloc_heap_fsm = ALLOC_HEAP_READ; 9583 9606 … … 9586 9609 } 9587 9610 break; 9588 9589 9611 } 9612 //////////////////// 9590 9613 case ALLOC_HEAP_CAS: 9591 if((r_cas_fsm.read() != CAS_UPT_HEAP_LOCK) and 9592 (r_cas_fsm.read() != CAS_UPT_REQ) and 9593 (r_cas_fsm.read() != CAS_UPT_NEXT)) 9594 { 9595 if(r_cleanup_fsm.read() == CLEANUP_HEAP_REQ) 9614 { 9615 if ((r_cas_fsm.read() != CAS_UPT_HEAP_LOCK) and 9616 (r_cas_fsm.read() != CAS_UPT_REQ) and 9617 (r_cas_fsm.read() != CAS_UPT_NEXT)) 9618 { 9619 if (r_cleanup_fsm.read() == CLEANUP_HEAP_REQ) 9596 9620 r_alloc_heap_fsm = ALLOC_HEAP_CLEANUP; 9597 9621 9598 else if (r_xram_rsp_fsm.read() == XRAM_RSP_HEAP_REQ)9622 else if (r_xram_rsp_fsm.read() == XRAM_RSP_HEAP_REQ) 9599 9623 r_alloc_heap_fsm = ALLOC_HEAP_XRAM_RSP; 9600 9624 9601 else if (r_config_fsm.read() == CONFIG_HEAP_REQ)9625 else if (r_config_fsm.read() == CONFIG_HEAP_REQ) 9602 9626 r_alloc_heap_fsm = ALLOC_HEAP_CONFIG; 9603 9627 9604 else if (r_read_fsm.read() == READ_HEAP_REQ)9628 else if (r_read_fsm.read() == READ_HEAP_REQ) 9605 9629 r_alloc_heap_fsm = ALLOC_HEAP_READ; 9606 9630 9607 else if (r_write_fsm.read() == WRITE_UPT_HEAP_LOCK)9631 else if (r_write_fsm.read() == WRITE_UPT_HEAP_LOCK) 9608 9632 r_alloc_heap_fsm = ALLOC_HEAP_WRITE; 9609 9633 … … 9612 9636 } 9613 9637 break; 9614 9615 9638 } 9639 /////////////////////// 9616 9640 case ALLOC_HEAP_CLEANUP: 9617 if((r_cleanup_fsm.read() != CLEANUP_HEAP_REQ) and 9618 (r_cleanup_fsm.read() != CLEANUP_HEAP_LOCK) and 9619 (r_cleanup_fsm.read() != CLEANUP_HEAP_SEARCH) and 9620 (r_cleanup_fsm.read() != CLEANUP_HEAP_CLEAN)) 9621 { 9622 if(r_xram_rsp_fsm.read() == XRAM_RSP_HEAP_REQ) 9641 { 9642 if ((r_cleanup_fsm.read() != CLEANUP_HEAP_REQ) and 9643 (r_cleanup_fsm.read() != CLEANUP_HEAP_LOCK) and 9644 (r_cleanup_fsm.read() != CLEANUP_HEAP_SEARCH) and 9645 (r_cleanup_fsm.read() != CLEANUP_HEAP_CLEAN)) 9646 { 9647 if (r_xram_rsp_fsm.read() == XRAM_RSP_HEAP_REQ) 9623 9648 r_alloc_heap_fsm = ALLOC_HEAP_XRAM_RSP; 9624 9649 9625 else if (r_config_fsm.read() == CONFIG_HEAP_REQ)9650 else if (r_config_fsm.read() == CONFIG_HEAP_REQ) 9626 9651 r_alloc_heap_fsm = ALLOC_HEAP_CONFIG; 9627 9652 9628 else if (r_read_fsm.read() == READ_HEAP_REQ)9653 else if (r_read_fsm.read() == READ_HEAP_REQ) 9629 9654 r_alloc_heap_fsm = ALLOC_HEAP_READ; 9630 9655 9631 else if (r_write_fsm.read() == WRITE_UPT_HEAP_LOCK)9656 else if (r_write_fsm.read() == WRITE_UPT_HEAP_LOCK) 9632 9657 r_alloc_heap_fsm = ALLOC_HEAP_WRITE; 9633 9658 9634 else if (r_cas_fsm.read() == CAS_UPT_HEAP_LOCK)9659 else if (r_cas_fsm.read() == CAS_UPT_HEAP_LOCK) 9635 9660 r_alloc_heap_fsm = ALLOC_HEAP_CAS; 9636 9661 … … 9639 9664 } 9640 9665 break; 9641 9642 9666 } 9667 //////////////////////// 9643 9668 case ALLOC_HEAP_XRAM_RSP: 9644 if((r_xram_rsp_fsm.read() != XRAM_RSP_HEAP_REQ) and 9645 (r_xram_rsp_fsm.read() != XRAM_RSP_HEAP_ERASE)) 9646 { 9647 if(r_config_fsm.read() == CONFIG_HEAP_REQ) 9669 { 9670 if ((r_xram_rsp_fsm.read() != XRAM_RSP_HEAP_REQ) and 9671 (r_xram_rsp_fsm.read() != XRAM_RSP_HEAP_ERASE)) 9672 { 9673 if (r_config_fsm.read() == CONFIG_HEAP_REQ) 9648 9674 r_alloc_heap_fsm = ALLOC_HEAP_CONFIG; 9649 9675 9650 else if (r_read_fsm.read() == READ_HEAP_REQ)9676 else if (r_read_fsm.read() == READ_HEAP_REQ) 9651 9677 r_alloc_heap_fsm = ALLOC_HEAP_READ; 9652 9678 9653 else if (r_write_fsm.read() == WRITE_UPT_HEAP_LOCK)9679 else if (r_write_fsm.read() == WRITE_UPT_HEAP_LOCK) 9654 9680 r_alloc_heap_fsm = ALLOC_HEAP_WRITE; 9655 9681 9656 else if (r_cas_fsm.read() == CAS_UPT_HEAP_LOCK)9682 else if (r_cas_fsm.read() == CAS_UPT_HEAP_LOCK) 9657 9683 r_alloc_heap_fsm = ALLOC_HEAP_CAS; 9658 9684 9659 else if (r_cleanup_fsm.read() == CLEANUP_HEAP_REQ)9685 else if (r_cleanup_fsm.read() == CLEANUP_HEAP_REQ) 9660 9686 r_alloc_heap_fsm = ALLOC_HEAP_CLEANUP; 9661 9687 9662 9688 } 9663 9689 break; 9664 9665 9690 } 9691 /////////////////////// 9666 9692 case ALLOC_HEAP_CONFIG: 9667 if((r_config_fsm.read() != CONFIG_HEAP_REQ) and 9668 (r_config_fsm.read() != CONFIG_HEAP_SCAN)) 9669 { 9670 if(r_read_fsm.read() == READ_HEAP_REQ) 9693 { 9694 if ((r_config_fsm.read() != CONFIG_HEAP_REQ) and 9695 (r_config_fsm.read() != CONFIG_HEAP_SCAN)) 9696 { 9697 if (r_read_fsm.read() == READ_HEAP_REQ) 9671 9698 r_alloc_heap_fsm = ALLOC_HEAP_READ; 9672 9699 9673 else if (r_write_fsm.read() == WRITE_UPT_HEAP_LOCK)9700 else if (r_write_fsm.read() == WRITE_UPT_HEAP_LOCK) 9674 9701 r_alloc_heap_fsm = ALLOC_HEAP_WRITE; 9675 9702 9676 else if (r_cas_fsm.read() == CAS_UPT_HEAP_LOCK)9703 else if (r_cas_fsm.read() == CAS_UPT_HEAP_LOCK) 9677 9704 r_alloc_heap_fsm = ALLOC_HEAP_CAS; 9678 9705 9679 else if (r_cleanup_fsm.read() == CLEANUP_HEAP_REQ)9706 else if (r_cleanup_fsm.read() == CLEANUP_HEAP_REQ) 9680 9707 r_alloc_heap_fsm = ALLOC_HEAP_CLEANUP; 9681 9708 9682 else if (r_xram_rsp_fsm.read() == XRAM_RSP_HEAP_REQ)9709 else if (r_xram_rsp_fsm.read() == XRAM_RSP_HEAP_REQ) 9683 9710 r_alloc_heap_fsm = ALLOC_HEAP_XRAM_RSP; 9684 9711 … … 9687 9714 } 9688 9715 break; 9689 9716 } 9690 9717 } // end switch alloc_heap_fsm 9691 9718 … … 9694 9721 ///////////////////////////////////////////////////////////////////// 9695 9722 9696 m_cmd_read_addr_fifo.update( 9697 p_vci_tgt.address.read() 9698 m_cmd_read_length_fifo.update( 9699 p_vci_tgt.plen.read() >>2);9700 m_cmd_read_srcid_fifo.update( 9701 p_vci_tgt.srcid.read() 9702 m_cmd_read_trdid_fifo.update( 9703 p_vci_tgt.trdid.read() 9704 m_cmd_read_pktid_fifo.update( 9705 p_vci_tgt.pktid.read() 9723 m_cmd_read_addr_fifo.update(cmd_read_fifo_get, cmd_read_fifo_put, 9724 p_vci_tgt.address.read()); 9725 m_cmd_read_length_fifo.update(cmd_read_fifo_get, cmd_read_fifo_put, 9726 p_vci_tgt.plen.read() >> 2); 9727 m_cmd_read_srcid_fifo.update(cmd_read_fifo_get, cmd_read_fifo_put, 9728 p_vci_tgt.srcid.read()); 9729 m_cmd_read_trdid_fifo.update(cmd_read_fifo_get, cmd_read_fifo_put, 9730 p_vci_tgt.trdid.read()); 9731 m_cmd_read_pktid_fifo.update(cmd_read_fifo_get, cmd_read_fifo_put, 9732 p_vci_tgt.pktid.read()); 9706 9733 9707 9734 ///////////////////////////////////////////////////////////////////// … … 9709 9736 ///////////////////////////////////////////////////////////////////// 9710 9737 9711 m_cmd_write_addr_fifo.update( 9712 (addr_t)p_vci_tgt.address.read() 9713 m_cmd_write_eop_fifo.update( 9714 p_vci_tgt.eop.read() 9715 m_cmd_write_srcid_fifo.update( 9716 p_vci_tgt.srcid.read() 9717 m_cmd_write_trdid_fifo.update( 9718 p_vci_tgt.trdid.read() 9719 m_cmd_write_pktid_fifo.update( 9720 p_vci_tgt.pktid.read() 9721 m_cmd_write_data_fifo.update( 9722 p_vci_tgt.wdata.read() 9723 m_cmd_write_be_fifo.update( 9724 p_vci_tgt.be.read() 9738 m_cmd_write_addr_fifo.update(cmd_write_fifo_get, cmd_write_fifo_put, 9739 (addr_t)p_vci_tgt.address.read()); 9740 m_cmd_write_eop_fifo.update(cmd_write_fifo_get, cmd_write_fifo_put, 9741 p_vci_tgt.eop.read()); 9742 m_cmd_write_srcid_fifo.update(cmd_write_fifo_get, cmd_write_fifo_put, 9743 p_vci_tgt.srcid.read()); 9744 m_cmd_write_trdid_fifo.update(cmd_write_fifo_get, cmd_write_fifo_put, 9745 p_vci_tgt.trdid.read()); 9746 m_cmd_write_pktid_fifo.update(cmd_write_fifo_get, cmd_write_fifo_put, 9747 p_vci_tgt.pktid.read()); 9748 m_cmd_write_data_fifo.update(cmd_write_fifo_get, cmd_write_fifo_put, 9749 p_vci_tgt.wdata.read()); 9750 m_cmd_write_be_fifo.update(cmd_write_fifo_get, cmd_write_fifo_put, 9751 p_vci_tgt.be.read()); 9725 9752 9726 9753 //////////////////////////////////////////////////////////////////////////////////// … … 9728 9755 //////////////////////////////////////////////////////////////////////////////////// 9729 9756 9730 m_cmd_cas_addr_fifo.update( 9731 (addr_t)p_vci_tgt.address.read() 9732 m_cmd_cas_eop_fifo.update( 9733 p_vci_tgt.eop.read() 9734 m_cmd_cas_srcid_fifo.update( 9735 p_vci_tgt.srcid.read() 9736 m_cmd_cas_trdid_fifo.update( 9737 p_vci_tgt.trdid.read() 9738 m_cmd_cas_pktid_fifo.update( 9739 p_vci_tgt.pktid.read() 9740 m_cmd_cas_wdata_fifo.update( 9741 p_vci_tgt.wdata.read() 9757 m_cmd_cas_addr_fifo.update(cmd_cas_fifo_get, cmd_cas_fifo_put, 9758 (addr_t)p_vci_tgt.address.read()); 9759 m_cmd_cas_eop_fifo.update(cmd_cas_fifo_get, cmd_cas_fifo_put, 9760 p_vci_tgt.eop.read()); 9761 m_cmd_cas_srcid_fifo.update(cmd_cas_fifo_get, cmd_cas_fifo_put, 9762 p_vci_tgt.srcid.read()); 9763 m_cmd_cas_trdid_fifo.update(cmd_cas_fifo_get, cmd_cas_fifo_put, 9764 p_vci_tgt.trdid.read()); 9765 m_cmd_cas_pktid_fifo.update(cmd_cas_fifo_get, cmd_cas_fifo_put, 9766 p_vci_tgt.pktid.read()); 9767 m_cmd_cas_wdata_fifo.update(cmd_cas_fifo_get, cmd_cas_fifo_put, 9768 p_vci_tgt.wdata.read()); 9742 9769 9743 9770 //////////////////////////////////////////////////////////////////////////////////// … … 9745 9772 //////////////////////////////////////////////////////////////////////////////////// 9746 9773 9747 if (cc_receive_to_cleanup_fifo_put)9774 if (cc_receive_to_cleanup_fifo_put) 9748 9775 { 9749 if (cc_receive_to_cleanup_fifo_get)9750 { 9751 m_cc_receive_to_cleanup_fifo.put_and_get( ((uint64_t)(p_dspin_p2m.eop.read()&0x1) << 32) | p_dspin_p2m.data.read());9776 if (cc_receive_to_cleanup_fifo_get) 9777 { 9778 m_cc_receive_to_cleanup_fifo.put_and_get(((uint64_t) (p_dspin_p2m.eop.read() & 0x1) << 32) | p_dspin_p2m.data.read()); 9752 9779 } 9753 9780 else 9754 9781 { 9755 m_cc_receive_to_cleanup_fifo.simple_put( ((uint64_t)(p_dspin_p2m.eop.read()&0x1) << 32) | p_dspin_p2m.data.read());9782 m_cc_receive_to_cleanup_fifo.simple_put(((uint64_t) (p_dspin_p2m.eop.read() & 0x1) << 32) | p_dspin_p2m.data.read()); 9756 9783 } 9757 9784 } 9758 9785 else 9759 9786 { 9760 if (cc_receive_to_cleanup_fifo_get)9787 if (cc_receive_to_cleanup_fifo_get) 9761 9788 { 9762 9789 m_cc_receive_to_cleanup_fifo.simple_get(); … … 9768 9795 //////////////////////////////////////////////////////////////////////////////////// 9769 9796 9770 m_cc_receive_to_multi_ack_fifo.update( 9797 m_cc_receive_to_multi_ack_fifo.update(cc_receive_to_multi_ack_fifo_get, 9771 9798 cc_receive_to_multi_ack_fifo_put, 9772 p_dspin_p2m.data.read() 9799 p_dspin_p2m.data.read()); 9773 9800 9774 9801 //////////////////////////////////////////////////////////////////////////////////// … … 9776 9803 //////////////////////////////////////////////////////////////////////////////////// 9777 9804 9778 m_write_to_cc_send_inst_fifo.update( write_to_cc_send_fifo_get, write_to_cc_send_fifo_put, 9779 write_to_cc_send_fifo_inst ); 9780 m_write_to_cc_send_srcid_fifo.update( write_to_cc_send_fifo_get, write_to_cc_send_fifo_put, 9781 write_to_cc_send_fifo_srcid ); 9805 m_write_to_cc_send_inst_fifo.update(write_to_cc_send_fifo_get, 9806 write_to_cc_send_fifo_put, 9807 write_to_cc_send_fifo_inst); 9808 m_write_to_cc_send_srcid_fifo.update(write_to_cc_send_fifo_get, 9809 write_to_cc_send_fifo_put, 9810 write_to_cc_send_fifo_srcid); 9782 9811 9783 9812 //////////////////////////////////////////////////////////////////////////////////// … … 9785 9814 //////////////////////////////////////////////////////////////////////////////////// 9786 9815 9787 m_config_to_cc_send_inst_fifo.update( config_to_cc_send_fifo_get, config_to_cc_send_fifo_put, 9788 config_to_cc_send_fifo_inst ); 9789 m_config_to_cc_send_srcid_fifo.update( config_to_cc_send_fifo_get, config_to_cc_send_fifo_put, 9790 config_to_cc_send_fifo_srcid ); 9816 m_config_to_cc_send_inst_fifo.update(config_to_cc_send_fifo_get, 9817 config_to_cc_send_fifo_put, 9818 config_to_cc_send_fifo_inst); 9819 m_config_to_cc_send_srcid_fifo.update(config_to_cc_send_fifo_get, 9820 config_to_cc_send_fifo_put, 9821 config_to_cc_send_fifo_srcid); 9791 9822 9792 9823 //////////////////////////////////////////////////////////////////////////////////// … … 9794 9825 //////////////////////////////////////////////////////////////////////////////////// 9795 9826 9796 m_xram_rsp_to_cc_send_inst_fifo.update( xram_rsp_to_cc_send_fifo_get, xram_rsp_to_cc_send_fifo_put, 9797 xram_rsp_to_cc_send_fifo_inst ); 9798 m_xram_rsp_to_cc_send_srcid_fifo.update( xram_rsp_to_cc_send_fifo_get, xram_rsp_to_cc_send_fifo_put, 9799 xram_rsp_to_cc_send_fifo_srcid ); 9827 m_xram_rsp_to_cc_send_inst_fifo.update(xram_rsp_to_cc_send_fifo_get, 9828 xram_rsp_to_cc_send_fifo_put, 9829 xram_rsp_to_cc_send_fifo_inst); 9830 m_xram_rsp_to_cc_send_srcid_fifo.update(xram_rsp_to_cc_send_fifo_get, 9831 xram_rsp_to_cc_send_fifo_put, 9832 xram_rsp_to_cc_send_fifo_srcid); 9800 9833 9801 9834 //////////////////////////////////////////////////////////////////////////////////// … … 9803 9836 //////////////////////////////////////////////////////////////////////////////////// 9804 9837 9805 m_cas_to_cc_send_inst_fifo.update( cas_to_cc_send_fifo_get, cas_to_cc_send_fifo_put, 9806 cas_to_cc_send_fifo_inst ); 9807 m_cas_to_cc_send_srcid_fifo.update( cas_to_cc_send_fifo_get, cas_to_cc_send_fifo_put, 9808 cas_to_cc_send_fifo_srcid ); 9838 m_cas_to_cc_send_inst_fifo.update(cas_to_cc_send_fifo_get, 9839 cas_to_cc_send_fifo_put, 9840 cas_to_cc_send_fifo_inst); 9841 m_cas_to_cc_send_srcid_fifo.update(cas_to_cc_send_fifo_get, 9842 cas_to_cc_send_fifo_put, 9843 cas_to_cc_send_fifo_srcid); 9809 9844 m_cpt_cycles++; 9810 9845 … … 9813 9848 // The three sources of (increment / decrement) are CONFIG / CLEANUP / IXR_RSP FSMs 9814 9849 //////////////////////////////////////////////////////////////////////////////////// 9815 if ( 9816 (config_rsp_lines_cleanup_decr or config_rsp_lines_ixr_rsp_decr) 9850 if (config_rsp_lines_incr and not 9851 (config_rsp_lines_cleanup_decr or config_rsp_lines_ixr_rsp_decr)) 9817 9852 { 9818 9853 r_config_rsp_lines = r_config_rsp_lines.read() + 1; 9819 9854 } 9820 if ( 9821 (config_rsp_lines_cleanup_decr or config_rsp_lines_ixr_rsp_decr) 9855 if (not config_rsp_lines_incr and 9856 (config_rsp_lines_cleanup_decr or config_rsp_lines_ixr_rsp_decr)) 9822 9857 { 9823 9858 r_config_rsp_lines = r_config_rsp_lines.read() - 1; … … 9839 9874 { 9840 9875 #if MONITOR_MEMCACHE_FSM == 1 9841 p_read_fsm.write (r_read_fsm.read());9842 p_write_fsm.write (r_write_fsm.read());9843 p_xram_rsp_fsm.write (r_xram_rsp_fsm.read());9844 p_cas_fsm.write (r_cas_fsm.read());9845 p_cleanup_fsm.write (r_cleanup_fsm.read());9846 p_config_fsm.write (r_config_fsm.read());9847 p_alloc_heap_fsm.write (r_alloc_heap_fsm.read());9848 p_alloc_dir_fsm.write (r_alloc_dir_fsm.read());9849 p_alloc_trt_fsm.write (r_alloc_trt_fsm.read());9850 p_alloc_upt_fsm.write (r_alloc_upt_fsm.read());9851 p_alloc_ivt_fsm.write (r_alloc_ivt_fsm.read());9852 p_tgt_cmd_fsm.write (r_tgt_cmd_fsm.read());9853 p_tgt_rsp_fsm.write (r_tgt_rsp_fsm.read());9854 p_ixr_cmd_fsm.write (r_ixr_cmd_fsm.read());9855 p_ixr_rsp_fsm.write (r_ixr_rsp_fsm.read());9856 p_cc_send_fsm.write (r_cc_send_fsm.read());9857 p_cc_receive_fsm.write (r_cc_receive_fsm.read());9858 p_multi_ack_fsm.write (r_multi_ack_fsm.read());9876 p_read_fsm.write (r_read_fsm.read()); 9877 p_write_fsm.write (r_write_fsm.read()); 9878 p_xram_rsp_fsm.write (r_xram_rsp_fsm.read()); 9879 p_cas_fsm.write (r_cas_fsm.read()); 9880 p_cleanup_fsm.write (r_cleanup_fsm.read()); 9881 p_config_fsm.write (r_config_fsm.read()); 9882 p_alloc_heap_fsm.write(r_alloc_heap_fsm.read()); 9883 p_alloc_dir_fsm.write (r_alloc_dir_fsm.read()); 9884 p_alloc_trt_fsm.write (r_alloc_trt_fsm.read()); 9885 p_alloc_upt_fsm.write (r_alloc_upt_fsm.read()); 9886 p_alloc_ivt_fsm.write (r_alloc_ivt_fsm.read()); 9887 p_tgt_cmd_fsm.write (r_tgt_cmd_fsm.read()); 9888 p_tgt_rsp_fsm.write (r_tgt_rsp_fsm.read()); 9889 p_ixr_cmd_fsm.write (r_ixr_cmd_fsm.read()); 9890 p_ixr_rsp_fsm.write (r_ixr_rsp_fsm.read()); 9891 p_cc_send_fsm.write (r_cc_send_fsm.read()); 9892 p_cc_receive_fsm.write(r_cc_receive_fsm.read()); 9893 p_multi_ack_fsm.write (r_multi_ack_fsm.read()); 9859 9894 #endif 9860 9895 … … 9862 9897 // Command signals on the p_vci_ixr port 9863 9898 //////////////////////////////////////////////////////////// 9899 9864 9900 // DATA width is 8 bytes 9865 9901 // The following values are not transmitted to XRAM … … 9875 9911 p_vci_ixr.srcid = m_srcid_x; 9876 9912 p_vci_ixr.trdid = r_ixr_cmd_trdid.read(); 9877 p_vci_ixr.address = (addr_t)r_ixr_cmd_address.read() + (r_ixr_cmd_word.read() <<2);9913 p_vci_ixr.address = (addr_t)r_ixr_cmd_address.read() + (r_ixr_cmd_word.read() << 2); 9878 9914 p_vci_ixr.be = 0xFF; 9879 9915 p_vci_ixr.pktid = 0; … … 9884 9920 p_vci_ixr.cfixed = false; 9885 9921 9886 if ( 9887 9888 9889 9890 (r_ixr_cmd_fsm.read() == IXR_CMD_CONFIG_SEND))9922 if ((r_ixr_cmd_fsm.read() == IXR_CMD_READ_SEND) or 9923 (r_ixr_cmd_fsm.read() == IXR_CMD_WRITE_SEND) or 9924 (r_ixr_cmd_fsm.read() == IXR_CMD_CAS_SEND) or 9925 (r_ixr_cmd_fsm.read() == IXR_CMD_XRAM_SEND) or 9926 (r_ixr_cmd_fsm.read() == IXR_CMD_CONFIG_SEND)) 9891 9927 { 9892 p_vci_ixr.cmdval 9893 9894 if ( r_ixr_cmd_get.read()) // GET9895 { 9896 p_vci_ixr.cmd 9897 p_vci_ixr.wdata 9898 p_vci_ixr.eop 9928 p_vci_ixr.cmdval = true; 9929 9930 if (r_ixr_cmd_get.read()) // GET 9931 { 9932 p_vci_ixr.cmd = vci_param_ext::CMD_READ; 9933 p_vci_ixr.wdata = 0; 9934 p_vci_ixr.eop = true; 9899 9935 } 9900 9936 else // PUT 9901 9937 { 9902 size_t word 9903 p_vci_ixr.cmd 9904 p_vci_ixr.wdata = ((wide_data_t)(r_ixr_cmd_wdata[word].read()))|9905 ((wide_data_t) (r_ixr_cmd_wdata[word+1].read()) << 32);9906 p_vci_ixr.eop = (word == (m_words-2));9938 size_t word = r_ixr_cmd_word.read(); 9939 p_vci_ixr.cmd = vci_param_ext::CMD_WRITE; 9940 p_vci_ixr.wdata = ((wide_data_t) (r_ixr_cmd_wdata[word].read())) | 9941 ((wide_data_t) (r_ixr_cmd_wdata[word + 1].read()) << 32); 9942 p_vci_ixr.eop = (word == (m_words - 2)); 9907 9943 } 9908 9944 } … … 9911 9947 p_vci_ixr.cmd = vci_param_ext::CMD_WRITE; 9912 9948 p_vci_ixr.cmdval = true; 9913 p_vci_ixr.address = (addr_t) r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2);9914 p_vci_ixr.wdata = ((wide_data_t) (r_ixr_cmd_wdata[r_ixr_cmd_word.read()].read()) |9915 ((wide_data_t) (r_ixr_cmd_wdata[r_ixr_cmd_word.read() + 1].read()) << 32));9949 p_vci_ixr.address = (addr_t) r_ixr_cmd_address.read() + (r_ixr_cmd_word.read() << 2); 9950 p_vci_ixr.wdata = ((wide_data_t) (r_ixr_cmd_wdata[r_ixr_cmd_word.read()].read()) | 9951 ((wide_data_t) (r_ixr_cmd_wdata[r_ixr_cmd_word.read() + 1].read()) << 32)); 9916 9952 p_vci_ixr.trdid = r_cleanup_to_ixr_cmd_index.read(); 9917 9953 p_vci_ixr.eop = (r_ixr_cmd_word == (m_words - 2)); 9918 9954 } 9919 9920 9955 else 9921 9956 { 9922 p_vci_ixr.cmdval 9957 p_vci_ixr.cmdval = false; 9923 9958 } 9924 9959 … … 9927 9962 //////////////////////////////////////////////////// 9928 9963 9929 if ((r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ) or9930 (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE))9964 if ((r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ) or 9965 (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE)) 9931 9966 { 9932 9967 p_vci_ixr.rspack = (r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_RSP); 9933 }9934 else if (r_ixr_rsp_fsm.read() == IXR_RSP_ACK)9935 {9936 p_vci_ixr.rspack = true;9937 9968 } 9938 9969 else // r_ixr_rsp_fsm == IXR_RSP_IDLE … … 9945 9976 //////////////////////////////////////////////////// 9946 9977 9947 switch ((tgt_cmd_fsm_state_e) r_tgt_cmd_fsm.read())9978 switch ((tgt_cmd_fsm_state_e) r_tgt_cmd_fsm.read()) 9948 9979 { 9949 9980 case TGT_CMD_IDLE: … … 9952 9983 9953 9984 case TGT_CMD_CONFIG: 9985 { 9986 addr_t addr_lsb = p_vci_tgt.address.read() & 9987 m_config_addr_mask; 9988 9989 addr_t cell = (addr_lsb / vci_param_int::B); 9990 9991 size_t regr = cell & m_config_regr_idx_mask; 9992 9993 size_t func = (cell >> m_config_regr_width) & 9994 m_config_func_idx_mask; 9995 9996 switch (func) 9997 { 9998 case MEMC_CONFIG: 9999 if ((p_vci_tgt.cmd.read() == vci_param_int::CMD_WRITE) 10000 and (regr == MEMC_CMD_TYPE)) 10001 { 10002 p_vci_tgt.cmdack = true; 10003 } 10004 else 10005 { 10006 p_vci_tgt.cmdack = not r_tgt_cmd_to_tgt_rsp_req.read(); 10007 } 10008 break; 10009 10010 default: 10011 p_vci_tgt.cmdack = not r_tgt_cmd_to_tgt_rsp_req.read(); 10012 break; 10013 } 10014 10015 break; 10016 } 9954 10017 case TGT_CMD_ERROR: 9955 10018 p_vci_tgt.cmdack = not r_tgt_cmd_to_tgt_rsp_req.read(); … … 9973 10036 //////////////////////////////////////////////////// 9974 10037 9975 switch (r_tgt_rsp_fsm.read())10038 switch (r_tgt_rsp_fsm.read()) 9976 10039 { 9977 10040 case TGT_RSP_CONFIG_IDLE: … … 9983 10046 case TGT_RSP_MULTI_ACK_IDLE: 9984 10047 case TGT_RSP_CLEANUP_IDLE: 9985 9986 p_vci_tgt.rspval= false;9987 p_vci_tgt.rsrcid= 0;9988 p_vci_tgt.rdata= 0;9989 p_vci_tgt.rpktid= 0;9990 p_vci_tgt.rtrdid= 0;9991 p_vci_tgt.rerror= 0;9992 p_vci_tgt.reop= false;9993 9994 10048 { 10049 p_vci_tgt.rspval = false; 10050 p_vci_tgt.rsrcid = 0; 10051 p_vci_tgt.rdata = 0; 10052 p_vci_tgt.rpktid = 0; 10053 p_vci_tgt.rtrdid = 0; 10054 p_vci_tgt.rerror = 0; 10055 p_vci_tgt.reop = false; 10056 break; 10057 } 9995 10058 case TGT_RSP_CONFIG: 9996 { 9997 p_vci_tgt.rspval = true; 9998 p_vci_tgt.rdata = 0; 9999 p_vci_tgt.rsrcid = r_config_to_tgt_rsp_srcid.read(); 10000 p_vci_tgt.rtrdid = r_config_to_tgt_rsp_trdid.read(); 10001 p_vci_tgt.rpktid = r_config_to_tgt_rsp_pktid.read(); 10002 p_vci_tgt.rerror = r_config_to_tgt_rsp_error.read(); 10003 p_vci_tgt.reop = true; 10004 10005 break; 10006 } 10007 10059 { 10060 p_vci_tgt.rspval = true; 10061 p_vci_tgt.rdata = 0; 10062 p_vci_tgt.rsrcid = r_config_to_tgt_rsp_srcid.read(); 10063 p_vci_tgt.rtrdid = r_config_to_tgt_rsp_trdid.read(); 10064 p_vci_tgt.rpktid = r_config_to_tgt_rsp_pktid.read(); 10065 p_vci_tgt.rerror = r_config_to_tgt_rsp_error.read(); 10066 p_vci_tgt.reop = true; 10067 break; 10068 } 10008 10069 case TGT_RSP_TGT_CMD: 10009 { 10010 p_vci_tgt.rspval = true; 10011 p_vci_tgt.rdata = r_tgt_cmd_to_tgt_rsp_rdata.read(); 10012 p_vci_tgt.rsrcid = r_tgt_cmd_to_tgt_rsp_srcid.read(); 10013 p_vci_tgt.rtrdid = r_tgt_cmd_to_tgt_rsp_trdid.read(); 10014 p_vci_tgt.rpktid = r_tgt_cmd_to_tgt_rsp_pktid.read(); 10015 p_vci_tgt.rerror = r_tgt_cmd_to_tgt_rsp_error.read(); 10016 p_vci_tgt.reop = true; 10017 10018 break; 10019 } 10020 10070 { 10071 p_vci_tgt.rspval = true; 10072 p_vci_tgt.rdata = r_tgt_cmd_to_tgt_rsp_rdata.read(); 10073 p_vci_tgt.rsrcid = r_tgt_cmd_to_tgt_rsp_srcid.read(); 10074 p_vci_tgt.rtrdid = r_tgt_cmd_to_tgt_rsp_trdid.read(); 10075 p_vci_tgt.rpktid = r_tgt_cmd_to_tgt_rsp_pktid.read(); 10076 p_vci_tgt.rerror = r_tgt_cmd_to_tgt_rsp_error.read(); 10077 p_vci_tgt.reop = true; 10078 break; 10079 } 10021 10080 case TGT_RSP_READ: 10022 10023 10024 bool is_last_word= (r_tgt_rsp_cpt.read() == last_word_idx);10025 bool is_ll= ((r_read_to_tgt_rsp_pktid.read() & 0x7) == TYPE_LL);10026 10027 10028 10029 if ( is_ll and not r_tgt_rsp_key_sent.read())10030 10031 10032 10033 10034 10035 10036 10037 10038 10039 10040 p_vci_tgt.rsrcid= r_read_to_tgt_rsp_srcid.read();10041 p_vci_tgt.rtrdid= r_read_to_tgt_rsp_trdid.read();10042 p_vci_tgt.rpktid= r_read_to_tgt_rsp_pktid.read();10043 p_vci_tgt.rerror= 0;10044 p_vci_tgt.reop= (is_last_word and not is_ll) or (r_tgt_rsp_key_sent.read() and is_ll);10045 10046 10081 { 10082 uint32_t last_word_idx = r_read_to_tgt_rsp_word.read() + r_read_to_tgt_rsp_length - 1; 10083 bool is_last_word = (r_tgt_rsp_cpt.read() == last_word_idx); 10084 bool is_ll = ((r_read_to_tgt_rsp_pktid.read() & 0x7) == TYPE_LL); 10085 10086 p_vci_tgt.rspval = true; 10087 10088 if (is_ll and not r_tgt_rsp_key_sent.read()) 10089 { 10090 // LL response first flit 10091 p_vci_tgt.rdata = r_read_to_tgt_rsp_ll_key.read(); 10092 } 10093 else 10094 { 10095 // LL response second flit or READ response 10096 p_vci_tgt.rdata = r_read_to_tgt_rsp_data[r_tgt_rsp_cpt.read()].read(); 10097 } 10098 10099 p_vci_tgt.rsrcid = r_read_to_tgt_rsp_srcid.read(); 10100 p_vci_tgt.rtrdid = r_read_to_tgt_rsp_trdid.read(); 10101 p_vci_tgt.rpktid = r_read_to_tgt_rsp_pktid.read(); 10102 p_vci_tgt.rerror = 0; 10103 p_vci_tgt.reop = (is_last_word and not is_ll) or (r_tgt_rsp_key_sent.read() and is_ll); 10104 break; 10105 } 10047 10106 10048 10107 case TGT_RSP_WRITE: 10049 p_vci_tgt.rspval 10050 if (((r_write_to_tgt_rsp_pktid.read() & 0x7) == TYPE_SC) and r_write_to_tgt_rsp_sc_fail.read())10051 p_vci_tgt.rdata 10108 p_vci_tgt.rspval = true; 10109 if (((r_write_to_tgt_rsp_pktid.read() & 0x7) == TYPE_SC) and r_write_to_tgt_rsp_sc_fail.read()) 10110 p_vci_tgt.rdata = 1; 10052 10111 else 10053 p_vci_tgt.rdata 10054 p_vci_tgt.rsrcid 10055 p_vci_tgt.rtrdid 10056 p_vci_tgt.rpktid 10057 p_vci_tgt.rerror 10058 p_vci_tgt.reop 10112 p_vci_tgt.rdata = 0; 10113 p_vci_tgt.rsrcid = r_write_to_tgt_rsp_srcid.read(); 10114 p_vci_tgt.rtrdid = r_write_to_tgt_rsp_trdid.read(); 10115 p_vci_tgt.rpktid = r_write_to_tgt_rsp_pktid.read(); 10116 p_vci_tgt.rerror = 0; 10117 p_vci_tgt.reop = true; 10059 10118 break; 10060 10119 10061 10120 case TGT_RSP_CLEANUP: 10062 10063 10064 bool is_last_word= (r_tgt_rsp_cpt.read() == last_word_idx);10065 bool is_ll= ((r_cleanup_to_tgt_rsp_pktid.read() & 0x7) == TYPE_LL);10066 10067 p_vci_tgt.rspval= true;10068 10069 10070 p_vci_tgt.rdata= r_cleanup_to_tgt_rsp_ll_key.read();10071 10072 10073 10074 p_vci_tgt.rdata= r_cleanup_to_tgt_rsp_data[r_tgt_rsp_cpt.read()].read();10075 10076 else //if the CLEANUP fsm sends a SC_RSP, then it is a success (and it caused an inval)10077 10078 p_vci_tgt.rdata= 0;10079 10080 p_vci_tgt.rsrcid= r_cleanup_to_tgt_rsp_srcid.read();10081 p_vci_tgt.rtrdid= r_cleanup_to_tgt_rsp_trdid.read();10082 p_vci_tgt.rpktid= r_cleanup_to_tgt_rsp_pktid.read();10083 p_vci_tgt.rerror= 0; // Can be a CAS rsp10084 p_vci_tgt.reop= r_cleanup_to_tgt_rsp_type.read() or (is_last_word and not is_ll) or (r_tgt_rsp_key_sent.read() and is_ll);10085 10086 10121 { 10122 uint32_t last_word_idx = r_cleanup_to_tgt_rsp_first_word.read() + r_cleanup_to_tgt_rsp_length - 1; 10123 bool is_last_word = (r_tgt_rsp_cpt.read() == last_word_idx); 10124 bool is_ll = ((r_cleanup_to_tgt_rsp_pktid.read() & 0x7) == TYPE_LL); 10125 10126 p_vci_tgt.rspval = true; 10127 if (is_ll and not r_tgt_rsp_key_sent.read()) 10128 { 10129 p_vci_tgt.rdata = r_cleanup_to_tgt_rsp_ll_key.read(); 10130 } 10131 else if (!r_cleanup_to_tgt_rsp_type.read()) 10132 { 10133 p_vci_tgt.rdata = r_cleanup_to_tgt_rsp_data[r_tgt_rsp_cpt.read()].read(); 10134 } 10135 else // if the CLEANUP fsm sends a SC_RSP, then it is a success (and it caused an inval) 10136 { 10137 p_vci_tgt.rdata = 0; 10138 } 10139 p_vci_tgt.rsrcid = r_cleanup_to_tgt_rsp_srcid.read(); 10140 p_vci_tgt.rtrdid = r_cleanup_to_tgt_rsp_trdid.read(); 10141 p_vci_tgt.rpktid = r_cleanup_to_tgt_rsp_pktid.read(); 10142 p_vci_tgt.rerror = 0; // Can be a CAS rsp 10143 p_vci_tgt.reop = r_cleanup_to_tgt_rsp_type.read() or (is_last_word and not is_ll) or (r_tgt_rsp_key_sent.read() and is_ll); 10144 break; 10145 } 10087 10146 10088 10147 case TGT_RSP_CAS: 10089 p_vci_tgt.rspval 10090 p_vci_tgt.rdata 10091 p_vci_tgt.rsrcid 10092 p_vci_tgt.rtrdid 10093 p_vci_tgt.rpktid 10094 p_vci_tgt.rerror 10095 p_vci_tgt.reop 10148 p_vci_tgt.rspval = true; 10149 p_vci_tgt.rdata = r_cas_to_tgt_rsp_data.read(); 10150 p_vci_tgt.rsrcid = r_cas_to_tgt_rsp_srcid.read(); 10151 p_vci_tgt.rtrdid = r_cas_to_tgt_rsp_trdid.read(); 10152 p_vci_tgt.rpktid = r_cas_to_tgt_rsp_pktid.read(); 10153 p_vci_tgt.rerror = 0; 10154 p_vci_tgt.reop = true; 10096 10155 break; 10097 10156 … … 10103 10162 bool is_error = r_xram_rsp_to_tgt_rsp_rerror.read(); 10104 10163 10105 p_vci_tgt.rspval 10106 10107 if ( is_ll and not r_tgt_rsp_key_sent.read())10164 p_vci_tgt.rspval = true; 10165 10166 if (is_ll and not r_tgt_rsp_key_sent.read()) 10108 10167 { 10109 10168 // LL response first flit … … 10116 10175 } 10117 10176 10118 p_vci_tgt.rsrcid 10119 p_vci_tgt.rtrdid 10120 p_vci_tgt.rpktid 10121 p_vci_tgt.rerror 10122 p_vci_tgt.reop 10177 p_vci_tgt.rsrcid = r_xram_rsp_to_tgt_rsp_srcid.read(); 10178 p_vci_tgt.rtrdid = r_xram_rsp_to_tgt_rsp_trdid.read(); 10179 p_vci_tgt.rpktid = r_xram_rsp_to_tgt_rsp_pktid.read(); 10180 p_vci_tgt.rerror = is_error; 10181 p_vci_tgt.reop = (((is_last_word or is_error) and not is_ll) or 10123 10182 (r_tgt_rsp_key_sent.read() and is_ll)); 10124 10125 10183 break; 10126 10184 } 10127 10185 10128 10186 case TGT_RSP_MULTI_ACK: 10129 p_vci_tgt.rspval 10130 p_vci_tgt.rdata 10131 p_vci_tgt.rsrcid 10132 p_vci_tgt.rtrdid 10133 p_vci_tgt.rpktid 10134 p_vci_tgt.rerror 10135 p_vci_tgt.reop 10187 p_vci_tgt.rspval = true; 10188 p_vci_tgt.rdata = 0; // Can be a CAS or SC rsp 10189 p_vci_tgt.rsrcid = r_multi_ack_to_tgt_rsp_srcid.read(); 10190 p_vci_tgt.rtrdid = r_multi_ack_to_tgt_rsp_trdid.read(); 10191 p_vci_tgt.rpktid = r_multi_ack_to_tgt_rsp_pktid.read(); 10192 p_vci_tgt.rerror = 0; 10193 p_vci_tgt.reop = true; 10136 10194 break; 10137 10195 } // end switch r_tgt_rsp_fsm … … 10155 10213 p_dspin_m2p.data = 0; 10156 10214 10157 switch (r_cc_send_fsm.read())10215 switch (r_cc_send_fsm.read()) 10158 10216 { 10159 10217 /////////////////////////// … … 10162 10220 case CC_SEND_WRITE_IDLE: 10163 10221 case CC_SEND_CAS_IDLE: 10164 10165 10166 10167 10222 { 10223 break; 10224 } 10225 //////////////////////////////// 10168 10226 case CC_SEND_CONFIG_INVAL_HEADER: 10169 { 10170 uint8_t multi_inval_type; 10171 if(m_config_to_cc_send_inst_fifo.read()) 10172 { 10173 multi_inval_type = DspinRwtParam::TYPE_MULTI_INVAL_INST; 10174 } 10175 else 10176 { 10177 multi_inval_type = DspinRwtParam::TYPE_MULTI_INVAL_DATA; 10178 } 10179 10180 uint64_t flit = 0; 10181 uint64_t dest = m_config_to_cc_send_srcid_fifo.read() << 10182 (DspinRwtParam::SRCID_WIDTH - vci_param_int::S); 10183 10184 DspinRwtParam::dspin_set( flit, 10185 dest, 10186 DspinRwtParam::MULTI_INVAL_DEST); 10187 10188 // MODIFIED FOR CONFIG INVAL (solution 1 bit in flit multi_inval) 10189 DspinRwtParam::dspin_set( flit, 10190 1, 10191 DspinRwtParam::MULTI_INVAL_IS_CONFIG); 10192 10193 DspinRwtParam::dspin_set( flit, 10194 r_config_to_cc_send_trdid.read(), 10195 DspinRwtParam::MULTI_INVAL_UPDT_INDEX); 10196 10197 DspinRwtParam::dspin_set( flit, 10198 multi_inval_type, 10199 DspinRwtParam::M2P_TYPE); 10200 p_dspin_m2p.write = true; 10201 p_dspin_m2p.data = flit; 10202 break; 10203 } 10204 //////////////////////////////// 10227 { 10228 uint8_t multi_inval_type; 10229 if (m_config_to_cc_send_inst_fifo.read()) 10230 { 10231 multi_inval_type = DspinRwtParam::TYPE_MULTI_INVAL_INST; 10232 } 10233 else 10234 { 10235 multi_inval_type = DspinRwtParam::TYPE_MULTI_INVAL_DATA; 10236 } 10237 10238 uint64_t flit = 0; 10239 uint64_t dest = m_config_to_cc_send_srcid_fifo.read() << 10240 (DspinRwtParam::SRCID_WIDTH - vci_param_int::S); 10241 10242 DspinRwtParam::dspin_set(flit, 10243 dest, 10244 DspinRwtParam::MULTI_INVAL_DEST); 10245 10246 DspinRwtParam::dspin_set(flit, 10247 1, 10248 DspinRwtParam::MULTI_INVAL_IS_CONFIG); 10249 10250 DspinRwtParam::dspin_set(flit, 10251 r_config_to_cc_send_trdid.read(), 10252 DspinRwtParam::MULTI_INVAL_UPDT_INDEX); 10253 10254 DspinRwtParam::dspin_set(flit, 10255 multi_inval_type, 10256 DspinRwtParam::M2P_TYPE); 10257 p_dspin_m2p.write = true; 10258 p_dspin_m2p.data = flit; 10259 break; 10260 } 10261 //////////////////////////////// 10205 10262 case CC_SEND_CONFIG_INVAL_NLINE: 10206 10207 10208 DspinRwtParam::dspin_set(flit,10209 r_config_to_cc_send_nline.read(),10210 DspinRwtParam::MULTI_INVAL_NLINE);10211 10212 10213 10214 10215 10216 10263 { 10264 uint64_t flit = 0; 10265 DspinRwtParam::dspin_set(flit, 10266 r_config_to_cc_send_nline.read(), 10267 DspinRwtParam::MULTI_INVAL_NLINE); 10268 p_dspin_m2p.eop = true; 10269 p_dspin_m2p.write = true; 10270 p_dspin_m2p.data = flit; 10271 break; 10272 } 10273 /////////////////////////////////// 10217 10274 case CC_SEND_XRAM_RSP_INVAL_HEADER: 10218 10219 if(not m_xram_rsp_to_cc_send_inst_fifo.rok()) break;10220 10221 10222 if(m_xram_rsp_to_cc_send_inst_fifo.read())10223 10224 10225 10226 10227 10228 10229 10230 10231 10232 10233 10234 10235 DspinRwtParam::dspin_set(flit,10236 dest,10237 DspinRwtParam::MULTI_INVAL_DEST);10238 10239 DspinRwtParam::dspin_set(flit,10240 r_xram_rsp_to_cc_send_trdid.read(),10241 DspinRwtParam::MULTI_INVAL_UPDT_INDEX);10242 10243 DspinRwtParam::dspin_set(flit,10244 multi_inval_type,10245 DspinRwtParam::M2P_TYPE);10246 10247 10248 10249 10250 10251 10275 { 10276 if (not m_xram_rsp_to_cc_send_inst_fifo.rok()) break; 10277 10278 uint8_t multi_inval_type; 10279 if (m_xram_rsp_to_cc_send_inst_fifo.read()) 10280 { 10281 multi_inval_type = DspinRwtParam::TYPE_MULTI_INVAL_INST; 10282 } 10283 else 10284 { 10285 multi_inval_type = DspinRwtParam::TYPE_MULTI_INVAL_DATA; 10286 } 10287 10288 uint64_t flit = 0; 10289 uint64_t dest = m_xram_rsp_to_cc_send_srcid_fifo.read() << 10290 (DspinRwtParam::SRCID_WIDTH - vci_param_int::S); 10291 10292 DspinRwtParam::dspin_set(flit, 10293 dest, 10294 DspinRwtParam::MULTI_INVAL_DEST); 10295 10296 DspinRwtParam::dspin_set(flit, 10297 r_xram_rsp_to_cc_send_trdid.read(), 10298 DspinRwtParam::MULTI_INVAL_UPDT_INDEX); 10299 10300 DspinRwtParam::dspin_set(flit, 10301 multi_inval_type, 10302 DspinRwtParam::M2P_TYPE); 10303 p_dspin_m2p.write = true; 10304 p_dspin_m2p.data = flit; 10305 break; 10306 } 10307 10308 ////////////////////////////////// 10252 10309 case CC_SEND_XRAM_RSP_INVAL_NLINE: 10253 10254 10255 10256 DspinRwtParam::dspin_set(flit,10257 r_xram_rsp_to_cc_send_nline.read(),10258 DspinRwtParam::MULTI_INVAL_NLINE);10259 10260 10261 10262 10263 10264 10265 10310 { 10311 uint64_t flit = 0; 10312 10313 DspinRwtParam::dspin_set(flit, 10314 r_xram_rsp_to_cc_send_nline.read(), 10315 DspinRwtParam::MULTI_INVAL_NLINE); 10316 p_dspin_m2p.eop = true; 10317 p_dspin_m2p.write = true; 10318 p_dspin_m2p.data = flit; 10319 break; 10320 } 10321 10322 ///////////////////////////////////// 10266 10323 case CC_SEND_CONFIG_BRDCAST_HEADER: 10267 10268 10269 10270 DspinRwtParam::dspin_set(flit,10271 m_broadcast_boundaries,10272 DspinRwtParam::BROADCAST_BOX);10273 10274 DspinRwtParam::dspin_set(flit,10275 1,10276 DspinRwtParam::MULTI_INVAL_IS_CONFIG);10277 10278 DspinRwtParam::dspin_set(flit,10279 1ULL,10280 DspinRwtParam::M2P_BC);10281 10282 10283 10284 10285 10324 { 10325 uint64_t flit = 0; 10326 10327 DspinRwtParam::dspin_set(flit, 10328 m_broadcast_boundaries, 10329 DspinRwtParam::BROADCAST_BOX); 10330 10331 DspinRwtParam::dspin_set(flit, 10332 1, 10333 DspinRwtParam::MULTI_INVAL_IS_CONFIG); 10334 10335 DspinRwtParam::dspin_set(flit, 10336 1ULL, 10337 DspinRwtParam::M2P_BC); 10338 p_dspin_m2p.write = true; 10339 p_dspin_m2p.data = flit; 10340 break; 10341 } 10342 ///////////////////////////////////// 10286 10343 case CC_SEND_XRAM_RSP_BRDCAST_HEADER: 10287 10344 case CC_SEND_WRITE_BRDCAST_HEADER: 10288 10345 case CC_SEND_CAS_BRDCAST_HEADER: 10289 10290 10291 10292 DspinRwtParam::dspin_set(flit,10293 m_broadcast_boundaries,10294 DspinRwtParam::BROADCAST_BOX);10295 10296 DspinRwtParam::dspin_set(flit,10297 1ULL,10298 DspinRwtParam::M2P_BC);10299 10300 10301 10302 10303 10346 { 10347 uint64_t flit = 0; 10348 10349 DspinRwtParam::dspin_set(flit, 10350 m_broadcast_boundaries, 10351 DspinRwtParam::BROADCAST_BOX); 10352 10353 DspinRwtParam::dspin_set(flit, 10354 1ULL, 10355 DspinRwtParam::M2P_BC); 10356 p_dspin_m2p.write = true; 10357 p_dspin_m2p.data = flit; 10358 break; 10359 } 10360 //////////////////////////////////// 10304 10361 case CC_SEND_XRAM_RSP_BRDCAST_NLINE: 10305 10306 10307 DspinRwtParam::dspin_set(flit,10308 r_xram_rsp_to_cc_send_nline.read(),10309 DspinRwtParam::BROADCAST_NLINE);10310 10311 10312 10313 10314 10315 10362 { 10363 uint64_t flit = 0; 10364 DspinRwtParam::dspin_set(flit, 10365 r_xram_rsp_to_cc_send_nline.read(), 10366 DspinRwtParam::BROADCAST_NLINE); 10367 p_dspin_m2p.write = true; 10368 p_dspin_m2p.eop = true; 10369 p_dspin_m2p.data = flit; 10370 break; 10371 } 10372 ////////////////////////////////// 10316 10373 case CC_SEND_CONFIG_BRDCAST_NLINE: 10317 { 10318 uint64_t flit = 0; 10319 DspinRwtParam::dspin_set( flit, 10320 r_config_to_cc_send_nline.read(), 10321 DspinRwtParam::BROADCAST_NLINE); 10322 p_dspin_m2p.write = true; 10323 p_dspin_m2p.eop = true; 10324 p_dspin_m2p.data = flit; 10325 break; 10326 } 10327 ///////////////////////////////// 10328 10374 { 10375 uint64_t flit = 0; 10376 DspinRwtParam::dspin_set(flit, 10377 r_config_to_cc_send_nline.read(), 10378 DspinRwtParam::BROADCAST_NLINE); 10379 p_dspin_m2p.write = true; 10380 p_dspin_m2p.eop = true; 10381 p_dspin_m2p.data = flit; 10382 break; 10383 } 10384 ///////////////////////////////// 10329 10385 case CC_SEND_READ_NCC_INVAL_HEADER: 10330 { 10331 uint64_t flit = 0; 10332 10333 //uint8_t multi_inval_type; 10334 //if (r_read_to_cc_send_inst.read()) 10335 //{ 10336 // multi_inval_type = DspinRwtParam::TYPE_MULTI_INVAL_INST; 10337 //} 10338 //else 10339 //{ 10340 // multi_inval_type = DspinRwtParam::TYPE_MULTI_INVAL_DATA; 10341 //} 10342 10343 DspinRwtParam::dspin_set( 10344 flit, 10345 r_read_to_cc_send_dest.read(), 10346 DspinRwtParam::MULTI_INVAL_DEST); 10347 10348 DspinRwtParam::dspin_set( 10349 flit, 10350 DspinRwtParam::TYPE_MULTI_INVAL_DATA, 10351 DspinRwtParam::M2P_TYPE); 10352 10353 p_dspin_m2p.write = true; 10354 p_dspin_m2p.data = flit; 10355 10356 break; 10357 10358 } 10359 10360 10386 { 10387 uint64_t flit = 0; 10388 10389 DspinRwtParam::dspin_set(flit, 10390 r_read_to_cc_send_dest.read(), 10391 DspinRwtParam::MULTI_INVAL_DEST); 10392 10393 DspinRwtParam::dspin_set(flit, 10394 DspinRwtParam::TYPE_MULTI_INVAL_DATA, 10395 DspinRwtParam::M2P_TYPE); 10396 10397 p_dspin_m2p.write = true; 10398 p_dspin_m2p.data = flit; 10399 10400 break; 10401 } 10402 ///////////////////////////////// 10361 10403 case CC_SEND_READ_NCC_INVAL_NLINE: 10362 { 10363 uint64_t flit = 0; 10364 10365 DspinRwtParam::dspin_set( 10366 flit, 10367 r_read_to_cc_send_nline.read(), 10368 DspinRwtParam::MULTI_INVAL_NLINE); 10369 10370 10371 p_dspin_m2p.write = true; 10372 p_dspin_m2p.data = flit; 10373 p_dspin_m2p.eop = true; 10374 10375 break; 10376 10377 } 10378 10404 { 10405 uint64_t flit = 0; 10406 10407 DspinRwtParam::dspin_set(flit, 10408 r_read_to_cc_send_nline.read(), 10409 DspinRwtParam::MULTI_INVAL_NLINE); 10410 10411 p_dspin_m2p.write = true; 10412 p_dspin_m2p.data = flit; 10413 p_dspin_m2p.eop = true; 10414 10415 break; 10416 } 10417 ///////////////////////////////// 10379 10418 case CC_SEND_WRITE_NCC_INVAL_HEADER: 10380 { 10381 uint64_t flit = 0; 10382 10383 DspinRwtParam::dspin_set( 10384 flit, 10385 r_write_to_cc_send_dest.read(), 10386 DspinRwtParam::MULTI_INVAL_DEST); 10387 10388 DspinRwtParam::dspin_set( 10389 flit, 10390 DspinRwtParam::TYPE_MULTI_INVAL_DATA, 10391 DspinRwtParam::M2P_TYPE); 10392 10393 p_dspin_m2p.write = true; 10394 p_dspin_m2p.data = flit; 10395 10396 break; 10397 10398 } 10399 10419 { 10420 uint64_t flit = 0; 10421 10422 DspinRwtParam::dspin_set(flit, 10423 r_write_to_cc_send_dest.read(), 10424 DspinRwtParam::MULTI_INVAL_DEST); 10425 10426 DspinRwtParam::dspin_set(flit, 10427 DspinRwtParam::TYPE_MULTI_INVAL_DATA, 10428 DspinRwtParam::M2P_TYPE); 10429 10430 p_dspin_m2p.write = true; 10431 p_dspin_m2p.data = flit; 10432 10433 break; 10434 } 10435 ///////////////////////////////// 10400 10436 case CC_SEND_WRITE_NCC_INVAL_NLINE: 10401 { 10402 uint64_t flit = 0; 10403 10404 DspinRwtParam::dspin_set( 10405 flit, 10406 r_write_to_cc_send_nline.read(), 10407 DspinRwtParam::MULTI_INVAL_NLINE); 10408 10409 10410 p_dspin_m2p.write = true; 10411 p_dspin_m2p.data = flit; 10412 p_dspin_m2p.eop = true; 10413 10414 break; 10415 10416 } 10417 10418 10437 { 10438 uint64_t flit = 0; 10439 10440 DspinRwtParam::dspin_set(flit, 10441 r_write_to_cc_send_nline.read(), 10442 DspinRwtParam::MULTI_INVAL_NLINE); 10443 10444 p_dspin_m2p.write = true; 10445 p_dspin_m2p.data = flit; 10446 p_dspin_m2p.eop = true; 10447 10448 break; 10449 } 10450 ///////////////////////////////// 10419 10451 case CC_SEND_WRITE_BRDCAST_NLINE: 10420 10421 10422 DspinRwtParam::dspin_set(flit,10423 r_write_to_cc_send_nline.read(),10424 DspinRwtParam::BROADCAST_NLINE);10425 10426 10427 10428 10429 10430 10452 { 10453 uint64_t flit = 0; 10454 DspinRwtParam::dspin_set(flit, 10455 r_write_to_cc_send_nline.read(), 10456 DspinRwtParam::BROADCAST_NLINE); 10457 p_dspin_m2p.write = true; 10458 p_dspin_m2p.eop = true; 10459 p_dspin_m2p.data = flit; 10460 break; 10461 } 10462 /////////////////////////////// 10431 10463 case CC_SEND_CAS_BRDCAST_NLINE: 10432 10433 10434 DspinRwtParam::dspin_set(flit,10435 r_cas_to_cc_send_nline.read(),10436 DspinRwtParam::BROADCAST_NLINE);10437 10438 10439 10440 10441 10442 10464 { 10465 uint64_t flit = 0; 10466 DspinRwtParam::dspin_set(flit, 10467 r_cas_to_cc_send_nline.read(), 10468 DspinRwtParam::BROADCAST_NLINE); 10469 p_dspin_m2p.write = true; 10470 p_dspin_m2p.eop = true; 10471 p_dspin_m2p.data = flit; 10472 break; 10473 } 10474 /////////////////////////////// 10443 10475 case CC_SEND_WRITE_UPDT_HEADER: 10444 { 10445 if(not m_write_to_cc_send_inst_fifo.rok()) break; 10446 10447 uint8_t multi_updt_type; 10448 if(m_write_to_cc_send_inst_fifo.read()) 10449 { 10450 multi_updt_type = DspinRwtParam::TYPE_MULTI_UPDT_INST; 10451 } 10452 else 10453 { 10454 multi_updt_type = DspinRwtParam::TYPE_MULTI_UPDT_DATA; 10455 } 10456 10457 uint64_t flit = 0; 10458 uint64_t dest = 10459 m_write_to_cc_send_srcid_fifo.read() << 10460 (DspinRwtParam::SRCID_WIDTH - vci_param_int::S); 10461 10462 DspinRwtParam::dspin_set( 10463 flit, 10464 dest, 10465 DspinRwtParam::MULTI_UPDT_DEST); 10466 10467 DspinRwtParam::dspin_set( 10468 flit, 10469 r_write_to_cc_send_trdid.read(), 10470 DspinRwtParam::MULTI_UPDT_UPDT_INDEX); 10471 10472 DspinRwtParam::dspin_set( 10473 flit, 10474 multi_updt_type, 10475 DspinRwtParam::M2P_TYPE); 10476 10477 p_dspin_m2p.write = true; 10478 p_dspin_m2p.data = flit; 10479 10480 break; 10481 } 10482 ////////////////////////////// 10476 { 10477 if (not m_write_to_cc_send_inst_fifo.rok()) break; 10478 10479 uint8_t multi_updt_type; 10480 if (m_write_to_cc_send_inst_fifo.read()) 10481 { 10482 multi_updt_type = DspinRwtParam::TYPE_MULTI_UPDT_INST; 10483 } 10484 else 10485 { 10486 multi_updt_type = DspinRwtParam::TYPE_MULTI_UPDT_DATA; 10487 } 10488 10489 uint64_t flit = 0; 10490 uint64_t dest = 10491 m_write_to_cc_send_srcid_fifo.read() << 10492 (DspinRwtParam::SRCID_WIDTH - vci_param_int::S); 10493 10494 DspinRwtParam::dspin_set(flit, 10495 dest, 10496 DspinRwtParam::MULTI_UPDT_DEST); 10497 10498 DspinRwtParam::dspin_set(flit, 10499 r_write_to_cc_send_trdid.read(), 10500 DspinRwtParam::MULTI_UPDT_UPDT_INDEX); 10501 10502 DspinRwtParam::dspin_set(flit, 10503 multi_updt_type, 10504 DspinRwtParam::M2P_TYPE); 10505 10506 p_dspin_m2p.write = true; 10507 p_dspin_m2p.data = flit; 10508 10509 break; 10510 } 10511 ////////////////////////////// 10483 10512 case CC_SEND_WRITE_UPDT_NLINE: 10484 { 10485 uint64_t flit = 0; 10486 10487 DspinRwtParam::dspin_set( 10488 flit, 10489 r_write_to_cc_send_index.read(), 10490 DspinRwtParam::MULTI_UPDT_WORD_INDEX); 10491 10492 DspinRwtParam::dspin_set( 10493 flit, 10494 r_write_to_cc_send_nline.read(), 10495 DspinRwtParam::MULTI_UPDT_NLINE); 10496 10497 p_dspin_m2p.write = true; 10498 p_dspin_m2p.data = flit; 10499 10500 break; 10501 } 10502 ///////////////////////////// 10513 { 10514 uint64_t flit = 0; 10515 10516 DspinRwtParam::dspin_set(flit, 10517 r_write_to_cc_send_index.read(), 10518 DspinRwtParam::MULTI_UPDT_WORD_INDEX); 10519 10520 DspinRwtParam::dspin_set(flit, 10521 r_write_to_cc_send_nline.read(), 10522 DspinRwtParam::MULTI_UPDT_NLINE); 10523 10524 p_dspin_m2p.write = true; 10525 p_dspin_m2p.data = flit; 10526 10527 break; 10528 } 10529 ///////////////////////////// 10503 10530 case CC_SEND_WRITE_UPDT_DATA: 10504 { 10505 10506 uint8_t multi_updt_cpt = 10507 r_cc_send_cpt.read() + r_write_to_cc_send_index.read(); 10508 10509 uint8_t multi_updt_be = r_write_to_cc_send_be[multi_updt_cpt].read(); 10510 uint32_t multi_updt_data = r_write_to_cc_send_data[multi_updt_cpt].read(); 10511 10512 uint64_t flit = 0; 10513 10514 DspinRwtParam::dspin_set( 10515 flit, 10516 multi_updt_be, 10517 DspinRwtParam::MULTI_UPDT_BE); 10518 10519 DspinRwtParam::dspin_set( 10520 flit, 10521 multi_updt_data, 10522 DspinRwtParam::MULTI_UPDT_DATA); 10523 10524 p_dspin_m2p.write = true; 10525 p_dspin_m2p.eop = (r_cc_send_cpt.read() == r_write_to_cc_send_count.read()); 10526 p_dspin_m2p.data = flit; 10527 10528 break; 10529 } 10530 //////////////////////////// 10531 { 10532 10533 uint8_t multi_updt_cpt = 10534 r_cc_send_cpt.read() + r_write_to_cc_send_index.read(); 10535 10536 uint8_t multi_updt_be = r_write_to_cc_send_be[multi_updt_cpt].read(); 10537 uint32_t multi_updt_data = r_write_to_cc_send_data[multi_updt_cpt].read(); 10538 10539 uint64_t flit = 0; 10540 10541 DspinRwtParam::dspin_set(flit, 10542 multi_updt_be, 10543 DspinRwtParam::MULTI_UPDT_BE); 10544 10545 DspinRwtParam::dspin_set(flit, 10546 multi_updt_data, 10547 DspinRwtParam::MULTI_UPDT_DATA); 10548 10549 p_dspin_m2p.write = true; 10550 p_dspin_m2p.eop = (r_cc_send_cpt.read() == r_write_to_cc_send_count.read()); 10551 p_dspin_m2p.data = flit; 10552 10553 break; 10554 } 10555 //////////////////////////// 10531 10556 case CC_SEND_CAS_UPDT_HEADER: 10532 { 10533 if (not m_cas_to_cc_send_inst_fifo.rok()) break; 10534 10535 uint8_t multi_updt_type; 10536 if(m_cas_to_cc_send_inst_fifo.read()) 10537 { 10538 multi_updt_type = DspinRwtParam::TYPE_MULTI_UPDT_INST; 10539 } 10540 else 10541 { 10542 multi_updt_type = DspinRwtParam::TYPE_MULTI_UPDT_DATA; 10543 } 10544 10545 uint64_t flit = 0; 10546 uint64_t dest = 10547 m_cas_to_cc_send_srcid_fifo.read() << 10548 (DspinRwtParam::SRCID_WIDTH - vci_param_int::S); 10549 10550 DspinRwtParam::dspin_set( 10551 flit, 10552 dest, 10553 DspinRwtParam::MULTI_UPDT_DEST); 10554 10555 DspinRwtParam::dspin_set( 10556 flit, 10557 r_cas_to_cc_send_trdid.read(), 10558 DspinRwtParam::MULTI_UPDT_UPDT_INDEX); 10559 10560 DspinRwtParam::dspin_set( 10561 flit, 10562 multi_updt_type, 10563 DspinRwtParam::M2P_TYPE); 10564 10565 p_dspin_m2p.write = true; 10566 p_dspin_m2p.data = flit; 10567 10568 break; 10569 } 10570 //////////////////////////// 10557 { 10558 if (not m_cas_to_cc_send_inst_fifo.rok()) break; 10559 10560 uint8_t multi_updt_type; 10561 if (m_cas_to_cc_send_inst_fifo.read()) 10562 { 10563 multi_updt_type = DspinRwtParam::TYPE_MULTI_UPDT_INST; 10564 } 10565 else 10566 { 10567 multi_updt_type = DspinRwtParam::TYPE_MULTI_UPDT_DATA; 10568 } 10569 10570 uint64_t flit = 0; 10571 uint64_t dest = 10572 m_cas_to_cc_send_srcid_fifo.read() << 10573 (DspinRwtParam::SRCID_WIDTH - vci_param_int::S); 10574 10575 DspinRwtParam::dspin_set(flit, 10576 dest, 10577 DspinRwtParam::MULTI_UPDT_DEST); 10578 10579 DspinRwtParam::dspin_set(flit, 10580 r_cas_to_cc_send_trdid.read(), 10581 DspinRwtParam::MULTI_UPDT_UPDT_INDEX); 10582 10583 DspinRwtParam::dspin_set(flit, 10584 multi_updt_type, 10585 DspinRwtParam::M2P_TYPE); 10586 10587 p_dspin_m2p.write = true; 10588 p_dspin_m2p.data = flit; 10589 10590 break; 10591 } 10592 //////////////////////////// 10571 10593 case CC_SEND_CAS_UPDT_NLINE: 10572 { 10573 uint64_t flit = 0; 10574 10575 DspinRwtParam::dspin_set( 10576 flit, 10577 r_cas_to_cc_send_index.read(), 10578 DspinRwtParam::MULTI_UPDT_WORD_INDEX); 10579 10580 DspinRwtParam::dspin_set( 10581 flit, 10582 r_cas_to_cc_send_nline.read(), 10583 DspinRwtParam::MULTI_UPDT_NLINE); 10584 10585 p_dspin_m2p.write = true; 10586 p_dspin_m2p.data = flit; 10587 10588 break; 10589 } 10590 /////////////////////////// 10594 { 10595 uint64_t flit = 0; 10596 10597 DspinRwtParam::dspin_set(flit, 10598 r_cas_to_cc_send_index.read(), 10599 DspinRwtParam::MULTI_UPDT_WORD_INDEX); 10600 10601 DspinRwtParam::dspin_set(flit, 10602 r_cas_to_cc_send_nline.read(), 10603 DspinRwtParam::MULTI_UPDT_NLINE); 10604 10605 p_dspin_m2p.write = true; 10606 p_dspin_m2p.data = flit; 10607 10608 break; 10609 } 10610 /////////////////////////// 10591 10611 case CC_SEND_CAS_UPDT_DATA: 10592 { 10593 uint64_t flit = 0; 10594 10595 DspinRwtParam::dspin_set( 10596 flit, 10597 0xF, 10598 DspinRwtParam::MULTI_UPDT_BE); 10599 10600 DspinRwtParam::dspin_set( 10601 flit, 10602 r_cas_to_cc_send_wdata.read(), 10603 DspinRwtParam::MULTI_UPDT_DATA); 10604 10605 p_dspin_m2p.write = true; 10606 p_dspin_m2p.eop = not r_cas_to_cc_send_is_long.read(); 10607 p_dspin_m2p.data = flit; 10608 10609 break; 10610 } 10611 //////////////////////////////// 10612 { 10613 uint64_t flit = 0; 10614 10615 DspinRwtParam::dspin_set(flit, 10616 0xF, 10617 DspinRwtParam::MULTI_UPDT_BE); 10618 10619 DspinRwtParam::dspin_set(flit, 10620 r_cas_to_cc_send_wdata.read(), 10621 DspinRwtParam::MULTI_UPDT_DATA); 10622 10623 p_dspin_m2p.write = true; 10624 p_dspin_m2p.eop = not r_cas_to_cc_send_is_long.read(); 10625 p_dspin_m2p.data = flit; 10626 10627 break; 10628 } 10629 //////////////////////////////// 10612 10630 case CC_SEND_CAS_UPDT_DATA_HIGH: 10613 { 10614 uint64_t flit = 0; 10615 10616 DspinRwtParam::dspin_set( 10617 flit, 10618 0xF, 10619 DspinRwtParam::MULTI_UPDT_BE); 10620 10621 DspinRwtParam::dspin_set( 10622 flit, 10623 r_cas_to_cc_send_wdata_high.read(), 10624 DspinRwtParam::MULTI_UPDT_DATA); 10625 10626 p_dspin_m2p.write = true; 10627 p_dspin_m2p.eop = true; 10628 p_dspin_m2p.data = flit; 10629 10630 break; 10631 } 10631 { 10632 uint64_t flit = 0; 10633 10634 DspinRwtParam::dspin_set(flit, 10635 0xF, 10636 DspinRwtParam::MULTI_UPDT_BE); 10637 10638 DspinRwtParam::dspin_set(flit, 10639 r_cas_to_cc_send_wdata_high.read(), 10640 DspinRwtParam::MULTI_UPDT_DATA); 10641 10642 p_dspin_m2p.write = true; 10643 p_dspin_m2p.eop = true; 10644 p_dspin_m2p.data = flit; 10645 10646 break; 10647 } 10632 10648 } 10633 10649 … … 10636 10652 //////////////////////////////////////////////////////////////////// 10637 10653 10638 if ( r_cleanup_fsm.read() == CLEANUP_SEND_CLACK)10654 if (r_cleanup_fsm.read() == CLEANUP_SEND_CLACK) 10639 10655 { 10640 10656 uint8_t cleanup_ack_type; 10641 if (r_cleanup_inst.read())10657 if (r_cleanup_inst.read()) 10642 10658 { 10643 10659 cleanup_ack_type = DspinRwtParam::TYPE_CLACK_INST; … … 10652 10668 (DspinRwtParam::SRCID_WIDTH - vci_param_int::S); 10653 10669 10654 DspinRwtParam::dspin_set( 10655 flit, 10656 dest, 10657 DspinRwtParam::CLACK_DEST); 10658 10659 DspinRwtParam::dspin_set( 10660 flit, 10661 r_cleanup_nline.read() & 0xFFFF, 10662 DspinRwtParam::CLACK_SET); 10663 10664 DspinRwtParam::dspin_set( 10665 flit, 10666 r_cleanup_way_index.read(), 10667 DspinRwtParam::CLACK_WAY); 10668 10669 DspinRwtParam::dspin_set( 10670 flit, 10671 cleanup_ack_type, 10672 DspinRwtParam::CLACK_TYPE); 10670 DspinRwtParam::dspin_set(flit, 10671 dest, 10672 DspinRwtParam::CLACK_DEST); 10673 10674 DspinRwtParam::dspin_set(flit, 10675 r_cleanup_nline.read() & 0xFFFF, 10676 DspinRwtParam::CLACK_SET); 10677 10678 DspinRwtParam::dspin_set(flit, 10679 r_cleanup_way_index.read(), 10680 DspinRwtParam::CLACK_WAY); 10681 10682 DspinRwtParam::dspin_set(flit, 10683 cleanup_ack_type, 10684 DspinRwtParam::CLACK_TYPE); 10673 10685 10674 10686 p_dspin_clack.eop = true; … … 10682 10694 p_dspin_clack.data = 0; 10683 10695 } 10696 10684 10697 /////////////////////////////////////////////////////////////////// 10685 10698 // p_dspin_p2m port (CC_RECEIVE FSM) 10686 10699 /////////////////////////////////////////////////////////////////// 10687 10700 // 10688 switch (r_cc_receive_fsm.read())10701 switch (r_cc_receive_fsm.read()) 10689 10702 { 10690 10703 case CC_RECEIVE_IDLE: 10691 10692 10693 10694 10704 { 10705 p_dspin_p2m.read = false; 10706 break; 10707 } 10695 10708 case CC_RECEIVE_CLEANUP: 10696 case CC_RECEIVE_CLEANUP_EOP: 10697 { 10698 p_dspin_p2m.read = m_cc_receive_to_cleanup_fifo.wok(); 10699 break; 10700 } 10709 { 10710 p_dspin_p2m.read = m_cc_receive_to_cleanup_fifo.wok(); 10711 break; 10712 } 10701 10713 case CC_RECEIVE_MULTI_ACK: 10702 10703 10704 10705 10714 { 10715 p_dspin_p2m.read = m_cc_receive_to_multi_ack_fifo.wok(); 10716 break; 10717 } 10706 10718 } 10707 10719 // end switch r_cc_send_fsm
Note: See TracChangeset
for help on using the changeset viewer.