1 | /* -*- c++ -*- |
---|
2 | * File : vci_mem_cache.cpp |
---|
3 | * Date : 30/10/2008 |
---|
4 | * Copyright : UPMC / LIP6 |
---|
5 | * Authors : Alain Greiner / Eric Guthmuller |
---|
6 | * |
---|
7 | * SOCLIB_LGPL_HEADER_BEGIN |
---|
8 | * |
---|
9 | * This file is part of SoCLib, GNU LGPLv2.1. |
---|
10 | * |
---|
11 | * SoCLib is free software; you can redistribute it and/or modify it |
---|
12 | * under the terms of the GNU Lesser General Public License as published |
---|
13 | * by the Free Software Foundation; version 2.1 of the License. |
---|
14 | * |
---|
15 | * SoCLib is distributed in the hope that it will be useful, but |
---|
16 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
---|
18 | * Lesser General Public License for more details. |
---|
19 | * |
---|
20 | * You should have received a copy of the GNU Lesser General Public |
---|
21 | * License along with SoCLib; if not, write to the Free Software |
---|
22 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA |
---|
23 | * 02110-1301 USA |
---|
24 | * |
---|
25 | * SOCLIB_LGPL_HEADER_END |
---|
26 | * |
---|
27 | * Maintainers: alain eric.guthmuller@polytechnique.edu |
---|
28 | */ |
---|
29 | |
---|
30 | #include "../include/vci_mem_cache.h" |
---|
31 | |
---|
32 | #define DEBUG_VCI_MEM_CACHE 0 |
---|
33 | |
---|
34 | namespace soclib { namespace caba { |
---|
35 | |
---|
36 | #ifdef DEBUG_VCI_MEM_CACHE |
---|
37 | const char *tgt_cmd_fsm_str[] = { |
---|
38 | "TGT_CMD_IDLE", |
---|
39 | "TGT_CMD_READ", |
---|
40 | "TGT_CMD_READ_EOP", |
---|
41 | "TGT_CMD_WRITE", |
---|
42 | "TGT_CMD_ATOMIC", |
---|
43 | "TGT_CMD_CLEANUP", |
---|
44 | }; |
---|
45 | const char *tgt_rsp_fsm_str[] = { |
---|
46 | "TGT_RSP_READ_IDLE", |
---|
47 | "TGT_RSP_WRITE_IDLE", |
---|
48 | "TGT_RSP_LLSC_IDLE", |
---|
49 | "TGT_RSP_CLEANUP_IDLE", |
---|
50 | "TGT_RSP_XRAM_IDLE", |
---|
51 | "TGT_RSP_INIT_IDLE", |
---|
52 | "TGT_RSP_READ_TEST", |
---|
53 | "TGT_RSP_READ_WORD", |
---|
54 | "TGT_RSP_READ_LINE", |
---|
55 | "TGT_RSP_WRITE", |
---|
56 | "TGT_RSP_LLSC", |
---|
57 | "TGT_RSP_CLEANUP", |
---|
58 | "TGT_RSP_XRAM_TEST", |
---|
59 | "TGT_RSP_XRAM_WORD", |
---|
60 | "TGT_RSP_XRAM_LINE", |
---|
61 | "TGT_RSP_INIT", |
---|
62 | }; |
---|
63 | const char *init_cmd_fsm_str[] = { |
---|
64 | "INIT_CMD_INVAL_IDLE", |
---|
65 | "INIT_CMD_INVAL_SEL", |
---|
66 | "INIT_CMD_INVAL_NLINE", |
---|
67 | "INIT_CMD_UPDT_IDLE", |
---|
68 | "INIT_CMD_UPDT_SEL", |
---|
69 | "INIT_CMD_UPDT_NLINE", |
---|
70 | "INIT_CMD_UPDT_INDEX", |
---|
71 | "INIT_CMD_UPDT_DATA", |
---|
72 | }; |
---|
73 | const char *init_rsp_fsm_str[] = { |
---|
74 | "INIT_RSP_IDLE", |
---|
75 | "INIT_RSP_UPT_LOCK", |
---|
76 | "INIT_RSP_UPT_CLEAR", |
---|
77 | "INIT_RSP_END", |
---|
78 | }; |
---|
79 | const char *read_fsm_str[] = { |
---|
80 | "READ_IDLE", |
---|
81 | "READ_DIR_LOCK", |
---|
82 | "READ_DIR_HIT", |
---|
83 | "READ_RSP", |
---|
84 | "READ_TRT_LOCK", |
---|
85 | "READ_TRT_SET", |
---|
86 | "READ_XRAM_REQ", |
---|
87 | }; |
---|
88 | const char *write_fsm_str[] = { |
---|
89 | "WRITE_IDLE", |
---|
90 | "WRITE_NEXT" |
---|
91 | "WRITE_DIR_LOCK", |
---|
92 | "WRITE_DIR_HIT_READ", |
---|
93 | "WRITE_DIR_HIT", |
---|
94 | "WRITE_UPT_LOCK", |
---|
95 | "WRITE_WAIT_UPT", |
---|
96 | "WRITE_UPDATE", |
---|
97 | "WRITE_RSP", |
---|
98 | "WRITE_TRT_LOCK", |
---|
99 | "WRITE_TRT_DATA", |
---|
100 | "WRITE_TRT_SET", |
---|
101 | "WRITE_WAIT_TRT", |
---|
102 | "WRITE_XRAM_REQ", |
---|
103 | }; |
---|
104 | const char *ixr_rsp_fsm_str[] = { |
---|
105 | "IXR_RSP_IDLE", |
---|
106 | "IXR_RSP_ACK", |
---|
107 | "IXR_RSP_TRT_ERASE", |
---|
108 | "IXR_RSP_TRT_READ", |
---|
109 | }; |
---|
110 | const char *xram_rsp_fsm_str[] = { |
---|
111 | "XRAM_RSP_IDLE", |
---|
112 | "XRAM_RSP_TRT_COPY", |
---|
113 | "XRAM_RSP_TRT_DIRTY", |
---|
114 | "XRAM_RSP_DIR_LOCK", |
---|
115 | "XRAM_RSP_DIR_UPDT", |
---|
116 | "XRAM_RSP_DIR_RSP", |
---|
117 | "XRAM_RSP_UPT_LOCK", |
---|
118 | "XRAM_RSP_WAIT", |
---|
119 | "XRAM_RSP_INVAL", |
---|
120 | "XRAM_RSP_WRITE_DIRTY", |
---|
121 | }; |
---|
122 | const char *xram_cmd_fsm_str[] = { |
---|
123 | "XRAM_CMD_READ_IDLE", |
---|
124 | "XRAM_CMD_WRITE_IDLE", |
---|
125 | "XRAM_CMD_LLSC_IDLE", |
---|
126 | "XRAM_CMD_XRAM_IDLE", |
---|
127 | "XRAM_CMD_READ_NLINE", |
---|
128 | "XRAM_CMD_WRITE_NLINE", |
---|
129 | "XRAM_CMD_LLSC_NLINE", |
---|
130 | "XRAM_CMD_XRAM_DATA", |
---|
131 | }; |
---|
132 | const char *llsc_fsm_str[] = { |
---|
133 | "LLSC_IDLE", |
---|
134 | "LL_DIR_LOCK", |
---|
135 | "LL_DIR_HIT", |
---|
136 | "LL_RSP", |
---|
137 | "SC_DIR_LOCK", |
---|
138 | "SC_DIR_HIT", |
---|
139 | "SC_RSP_FALSE", |
---|
140 | "SC_RSP_TRUE", |
---|
141 | "LLSC_TRT_LOCK", |
---|
142 | "LLSC_TRT_SET", |
---|
143 | "LLSC_XRAM_REQ", |
---|
144 | }; |
---|
145 | const char *cleanup_fsm_str[] = { |
---|
146 | "CLEANUP_IDLE", |
---|
147 | "CLEANUP_DIR_LOCK", |
---|
148 | "CLEANUP_DIR_WRITE", |
---|
149 | "CLEANUP_RSP", |
---|
150 | }; |
---|
151 | const char *alloc_dir_fsm_str[] = { |
---|
152 | "ALLOC_DIR_READ", |
---|
153 | "ALLOC_DIR_WRITE", |
---|
154 | "ALLOC_DIR_LLSC", |
---|
155 | "ALLOC_DIR_CLEANUP", |
---|
156 | "ALLOC_DIR_XRAM_RSP", |
---|
157 | }; |
---|
158 | const char *alloc_trt_fsm_str[] = { |
---|
159 | "ALLOC_TRT_READ", |
---|
160 | "ALLOC_TRT_WRITE", |
---|
161 | "ALLOC_TRT_LLSC", |
---|
162 | "ALLOC_TRT_XRAM_RSP", |
---|
163 | "ALLOC_TRT_IXR_RSP", |
---|
164 | }; |
---|
165 | const char *alloc_upt_fsm_str[] = { |
---|
166 | "ALLOC_UPT_WRITE", |
---|
167 | "ALLOC_UPT_XRAM_RSP", |
---|
168 | "ALLOC_UPT_INIT_RSP", |
---|
169 | }; |
---|
170 | #endif |
---|
171 | |
---|
172 | #define tmpl(x) template<typename vci_param> x VciMemCache<vci_param> |
---|
173 | |
---|
174 | using soclib::common::uint32_log2; |
---|
175 | |
---|
176 | //////////////////////////////// |
---|
177 | // Constructor |
---|
178 | //////////////////////////////// |
---|
179 | |
---|
180 | tmpl(/**/)::VciMemCache( |
---|
181 | sc_module_name name, |
---|
182 | const soclib::common::MappingTable &mtp, |
---|
183 | const soclib::common::MappingTable &mtc, |
---|
184 | const soclib::common::MappingTable &mtx, |
---|
185 | const soclib::common::IntTab &vci_ixr_index, |
---|
186 | const soclib::common::IntTab &vci_ini_index, |
---|
187 | const soclib::common::IntTab &vci_tgt_index, |
---|
188 | size_t nways, |
---|
189 | size_t nsets, |
---|
190 | size_t nwords) |
---|
191 | |
---|
192 | : soclib::caba::BaseModule(name), |
---|
193 | |
---|
194 | p_clk("clk"), |
---|
195 | p_resetn("resetn"), |
---|
196 | p_vci_tgt("vci_tgt"), |
---|
197 | p_vci_ini("vci_ini"), |
---|
198 | |
---|
199 | m_initiators( 32 ), |
---|
200 | m_ways( nways ), |
---|
201 | m_sets( nsets ), |
---|
202 | m_words( nwords ), |
---|
203 | m_srcid_ixr( mtx.indexForId(vci_ixr_index) ), |
---|
204 | m_srcid_ini( mtc.indexForId(vci_ini_index) ), |
---|
205 | //m_mem_segment("bidon",0,0,soclib::common::IntTab(),false), |
---|
206 | m_seglist(mtp.getSegmentList(vci_tgt_index)), |
---|
207 | m_reg_segment("bidon",0,0,soclib::common::IntTab(),false), |
---|
208 | m_coherence_table( mtc.getCoherenceTable() ), |
---|
209 | m_atomic_tab( m_initiators ), |
---|
210 | m_transaction_tab( TRANSACTION_TAB_LINES, nwords ), |
---|
211 | m_update_tab( UPDATE_TAB_LINES ), |
---|
212 | m_cache_directory( nways, nsets, nwords, vci_param::N ), |
---|
213 | nseg(0), |
---|
214 | #define L2 soclib::common::uint32_log2 |
---|
215 | m_x( L2(m_words), 2), |
---|
216 | m_y( L2(m_sets), L2(m_words) + 2), |
---|
217 | m_z( vci_param::N - L2(m_sets) - L2(m_words) - 2, L2(m_sets) + L2(m_words) + 2), |
---|
218 | m_nline( vci_param::N - L2(m_words) - 2, L2(m_words) + 2), |
---|
219 | #undef L2 |
---|
220 | |
---|
221 | // FIFOs |
---|
222 | m_cmd_read_addr_fifo("m_cmd_read_addr_fifo", 4), |
---|
223 | m_cmd_read_word_fifo("m_cmd_read_word_fifo", 4), |
---|
224 | m_cmd_read_srcid_fifo("m_cmd_read_srcid_fifo", 4), |
---|
225 | m_cmd_read_trdid_fifo("m_cmd_read_trdid_fifo", 4), |
---|
226 | m_cmd_read_pktid_fifo("m_cmd_read_pktid_fifo", 4), |
---|
227 | |
---|
228 | m_cmd_write_addr_fifo("m_cmd_write_addr_fifo",8), |
---|
229 | m_cmd_write_eop_fifo("m_cmd_write_eop_fifo",8), |
---|
230 | m_cmd_write_srcid_fifo("m_cmd_write_srcid_fifo",8), |
---|
231 | m_cmd_write_trdid_fifo("m_cmd_write_trdid_fifo",8), |
---|
232 | m_cmd_write_pktid_fifo("m_cmd_write_pktid_fifo",8), |
---|
233 | m_cmd_write_data_fifo("m_cmd_write_data_fifo",8), |
---|
234 | m_cmd_write_be_fifo("m_cmd_write_be_fifo",8), |
---|
235 | |
---|
236 | m_cmd_llsc_addr_fifo("m_cmd_llsc_addr_fifo",4), |
---|
237 | m_cmd_llsc_sc_fifo("m_cmd_llsc_sc_fifo",4), |
---|
238 | m_cmd_llsc_srcid_fifo("m_cmd_llsc_srcid_fifo",4), |
---|
239 | m_cmd_llsc_trdid_fifo("m_cmd_llsc_trdid_fifo",4), |
---|
240 | m_cmd_llsc_pktid_fifo("m_cmd_llsc_pktid_fifo",4), |
---|
241 | m_cmd_llsc_wdata_fifo("m_cmd_llsc_wdata_fifo",4), |
---|
242 | |
---|
243 | m_cmd_cleanup_srcid_fifo("m_cmd_cleanup_srcid_fifo",4), |
---|
244 | m_cmd_cleanup_trdid_fifo("m_cmd_cleanup_trdid_fifo",4), |
---|
245 | m_cmd_cleanup_pktid_fifo("m_cmd_cleanup_pktid_fifo",4), |
---|
246 | m_cmd_cleanup_nline_fifo("m_cmd_cleanup_nline_fifo",4), |
---|
247 | |
---|
248 | r_tgt_cmd_fsm("r_tgt_cmd_fsm"), |
---|
249 | r_read_fsm("r_read_fsm"), |
---|
250 | r_write_fsm("r_write_fsm"), |
---|
251 | r_init_rsp_fsm("r_init_rsp_fsm"), |
---|
252 | r_cleanup_fsm("r_cleanup_fsm"), |
---|
253 | r_llsc_fsm("r_llsc_fsm"), |
---|
254 | r_ixr_rsp_fsm("r_ixr_rsp_fsm"), |
---|
255 | r_xram_rsp_fsm("r_xram_rsp_fsm"), |
---|
256 | r_xram_cmd_fsm("r_xram_cmd_fsm"), |
---|
257 | r_tgt_rsp_fsm("r_tgt_rsp_fsm"), |
---|
258 | r_init_cmd_fsm("r_init_cmd_fsm"), |
---|
259 | r_alloc_dir_fsm("r_alloc_dir_fsm"), |
---|
260 | r_alloc_trt_fsm("r_alloc_trt_fsm"), |
---|
261 | r_alloc_upt_fsm("r_alloc_upt_fsm") |
---|
262 | { |
---|
263 | assert(IS_POW_OF_2(nsets)); |
---|
264 | assert(IS_POW_OF_2(nwords)); |
---|
265 | assert(IS_POW_OF_2(nways)); |
---|
266 | assert(nsets); |
---|
267 | assert(nwords); |
---|
268 | assert(nways); |
---|
269 | assert(nsets <= 1024); |
---|
270 | assert(nwords <= 32); |
---|
271 | assert(nways <= 32); |
---|
272 | |
---|
273 | // Get the segments associated to the MemCache |
---|
274 | //std::list<soclib::common::Segment> segList(mtp.getSegmentList(vci_tgt_index)); |
---|
275 | std::list<soclib::common::Segment>::iterator seg; |
---|
276 | /* |
---|
277 | for(seg = segList.begin(); seg != segList.end() ; seg++) { |
---|
278 | if( seg->size() > 8 ) m_mem_segment = *seg; |
---|
279 | else m_reg_segment = *seg; |
---|
280 | nseg++; |
---|
281 | } |
---|
282 | */ |
---|
283 | |
---|
284 | for(seg = m_seglist.begin(); seg != m_seglist.end() ; seg++) { |
---|
285 | if( seg->size() > 8 ) nseg++; |
---|
286 | } |
---|
287 | //assert( (nseg == 2) && (m_reg_segment.size() == 8) ); |
---|
288 | |
---|
289 | m_seg = new soclib::common::Segment*[nseg]; |
---|
290 | size_t i = 0; |
---|
291 | for ( seg = m_seglist.begin() ; seg != m_seglist.end() ; seg++ ) { |
---|
292 | if ( seg->size() > 8 ) |
---|
293 | { |
---|
294 | m_seg[i] = &(*seg); |
---|
295 | i++; |
---|
296 | } |
---|
297 | else |
---|
298 | { |
---|
299 | m_reg_segment = *seg; |
---|
300 | } |
---|
301 | } |
---|
302 | |
---|
303 | assert( (m_reg_segment.size() == 8) ); |
---|
304 | |
---|
305 | // Memory cache allocation & initialisation |
---|
306 | m_cache_data = new data_t**[nways]; |
---|
307 | for ( size_t i=0 ; i<nways ; ++i ) { |
---|
308 | m_cache_data[i] = new data_t*[nsets]; |
---|
309 | } |
---|
310 | for ( size_t i=0; i<nways; ++i ) { |
---|
311 | for ( size_t j=0; j<nsets; ++j ) { |
---|
312 | m_cache_data[i][j] = new data_t[nwords]; |
---|
313 | for ( size_t k=0; k<nwords; k++){ |
---|
314 | m_cache_data[i][j][k]=0; |
---|
315 | } |
---|
316 | } |
---|
317 | } |
---|
318 | |
---|
319 | // Allocation for IXR_RSP FSM |
---|
320 | r_ixr_rsp_to_xram_rsp_rok = new sc_signal<bool>[TRANSACTION_TAB_LINES]; |
---|
321 | |
---|
322 | // Allocation for XRAM_RSP FSM |
---|
323 | r_xram_rsp_victim_data = new sc_signal<data_t>[nwords]; |
---|
324 | r_xram_rsp_to_tgt_rsp_data = new sc_signal<data_t>[nwords]; |
---|
325 | r_xram_rsp_to_tgt_rsp_val = new sc_signal<bool>[nwords]; |
---|
326 | r_xram_rsp_to_xram_cmd_data = new sc_signal<data_t>[nwords]; |
---|
327 | |
---|
328 | // Allocation for READ FSM |
---|
329 | r_read_data = new sc_signal<data_t>[nwords]; |
---|
330 | r_read_to_tgt_rsp_data = new sc_signal<data_t>[nwords]; |
---|
331 | r_read_to_tgt_rsp_val = new sc_signal<bool>[nwords]; |
---|
332 | |
---|
333 | // Allocation for WRITE FSM |
---|
334 | r_write_data = new sc_signal<data_t>[nwords]; |
---|
335 | r_write_be = new sc_signal<be_t>[nwords]; |
---|
336 | r_write_to_init_cmd_data = new sc_signal<data_t>[nwords]; |
---|
337 | r_write_to_init_cmd_we = new sc_signal<bool>[nwords]; |
---|
338 | |
---|
339 | // Simulation |
---|
340 | |
---|
341 | SC_METHOD(transition); |
---|
342 | dont_initialize(); |
---|
343 | sensitive << p_clk.pos(); |
---|
344 | |
---|
345 | SC_METHOD(genMoore); |
---|
346 | dont_initialize(); |
---|
347 | sensitive << p_clk.neg(); |
---|
348 | |
---|
349 | } // end constructor |
---|
350 | |
---|
351 | ///////////////////////////////////////// |
---|
352 | // This function prints the statistics |
---|
353 | ///////////////////////////////////////// |
---|
354 | |
---|
355 | tmpl(void)::print_stats() |
---|
356 | { |
---|
357 | std::cout << "----------------------------------" << std::dec << std::endl; |
---|
358 | std::cout << "MEM_CACHE " << m_srcid_ini << " / Time = " << m_cpt_cycles << std::endl |
---|
359 | << "- READ RATE = " << (float)m_cpt_read/m_cpt_cycles << std::endl |
---|
360 | << "- READ MISS RATE = " << (float)m_cpt_read_miss/m_cpt_read << std::endl |
---|
361 | << "- WRITE RATE = " << (float)m_cpt_write/m_cpt_cycles << std::endl |
---|
362 | << "- WRITE MISS RATE = " << (float)m_cpt_write_miss/m_cpt_write << std::endl |
---|
363 | << "- WRITE BURST LENGTH = " << (float)m_cpt_write_cells/m_cpt_write << std::endl |
---|
364 | << "- UPDATE RATE = " << (float)m_cpt_update/m_cpt_cycles << std::endl |
---|
365 | << "- UPDATE ARITY = " << (float)m_cpt_update_mult/m_cpt_update << std::endl |
---|
366 | << "- INVAL RATE = " << (float)m_cpt_inval/m_cpt_cycles << std::endl |
---|
367 | << "- INVAL ARITY = " << (float)m_cpt_inval_mult/m_cpt_inval << std::endl |
---|
368 | << "- SAVE DIRTY RATE = " << (float)m_cpt_write_dirty/m_cpt_cycles << std::endl |
---|
369 | << "- CLEANUP RATE = " << (float)m_cpt_cleanup/m_cpt_cycles << std::endl |
---|
370 | << "- LL RATE = " << (float)m_cpt_ll/m_cpt_cycles << std::endl |
---|
371 | << "- SC RATE = " << (float)m_cpt_sc/m_cpt_cycles << std::endl; |
---|
372 | } |
---|
373 | |
---|
374 | ///////////////////////////////// |
---|
375 | tmpl(/**/)::~VciMemCache() |
---|
376 | ///////////////////////////////// |
---|
377 | { |
---|
378 | for(size_t i=0; i<m_ways ; i++){ |
---|
379 | for(size_t j=0; j<m_sets ; j++){ |
---|
380 | delete [] m_cache_data[i][j]; |
---|
381 | } |
---|
382 | } |
---|
383 | for(size_t i=0; i<m_ways ; i++){ |
---|
384 | delete [] m_cache_data[i]; |
---|
385 | } |
---|
386 | delete [] m_cache_data; |
---|
387 | delete [] m_coherence_table; |
---|
388 | |
---|
389 | delete [] r_ixr_rsp_to_xram_rsp_rok; |
---|
390 | |
---|
391 | delete [] r_xram_rsp_victim_data; |
---|
392 | delete [] r_xram_rsp_to_tgt_rsp_data; |
---|
393 | delete [] r_xram_rsp_to_tgt_rsp_val; |
---|
394 | delete [] r_xram_rsp_to_xram_cmd_data; |
---|
395 | |
---|
396 | delete [] r_read_data; |
---|
397 | delete [] r_read_to_tgt_rsp_data; |
---|
398 | delete [] r_read_to_tgt_rsp_val; |
---|
399 | |
---|
400 | delete [] r_write_data; |
---|
401 | delete [] r_write_be; |
---|
402 | delete [] r_write_to_init_cmd_data; |
---|
403 | } |
---|
404 | |
---|
405 | ////////////////////////////////// |
---|
406 | tmpl(void)::transition() |
---|
407 | ////////////////////////////////// |
---|
408 | { |
---|
409 | using soclib::common::uint32_log2; |
---|
410 | // RESET |
---|
411 | if ( ! p_resetn.read() ) { |
---|
412 | |
---|
413 | // Initializing FSMs |
---|
414 | r_tgt_cmd_fsm = TGT_CMD_IDLE; |
---|
415 | r_tgt_rsp_fsm = TGT_RSP_READ_IDLE; |
---|
416 | r_init_cmd_fsm = INIT_CMD_INVAL_IDLE; |
---|
417 | r_init_rsp_fsm = INIT_RSP_IDLE; |
---|
418 | r_read_fsm = READ_IDLE; |
---|
419 | r_write_fsm = WRITE_IDLE; |
---|
420 | r_llsc_fsm = LLSC_IDLE; |
---|
421 | r_cleanup_fsm = CLEANUP_IDLE; |
---|
422 | r_alloc_dir_fsm = ALLOC_DIR_READ; |
---|
423 | r_alloc_trt_fsm = ALLOC_TRT_READ; |
---|
424 | r_alloc_upt_fsm = ALLOC_UPT_WRITE; |
---|
425 | r_ixr_rsp_fsm = IXR_RSP_IDLE; |
---|
426 | r_xram_rsp_fsm = XRAM_RSP_IDLE; |
---|
427 | r_xram_cmd_fsm = XRAM_CMD_READ_IDLE; |
---|
428 | |
---|
429 | // Initializing Tables |
---|
430 | m_cache_directory.init(); |
---|
431 | m_atomic_tab.init(); |
---|
432 | m_transaction_tab.init(); |
---|
433 | |
---|
434 | // initializing FIFOs and communication Buffers |
---|
435 | |
---|
436 | m_cmd_read_addr_fifo.init(); |
---|
437 | m_cmd_read_word_fifo.init(); |
---|
438 | m_cmd_read_srcid_fifo.init(); |
---|
439 | m_cmd_read_trdid_fifo.init(); |
---|
440 | m_cmd_read_pktid_fifo.init(); |
---|
441 | |
---|
442 | m_cmd_write_addr_fifo.init(); |
---|
443 | m_cmd_write_eop_fifo.init(); |
---|
444 | m_cmd_write_srcid_fifo.init(); |
---|
445 | m_cmd_write_trdid_fifo.init(); |
---|
446 | m_cmd_write_pktid_fifo.init(); |
---|
447 | m_cmd_write_data_fifo.init(); |
---|
448 | |
---|
449 | m_cmd_llsc_addr_fifo.init(); |
---|
450 | m_cmd_llsc_srcid_fifo.init(); |
---|
451 | m_cmd_llsc_trdid_fifo.init(); |
---|
452 | m_cmd_llsc_pktid_fifo.init(); |
---|
453 | m_cmd_llsc_wdata_fifo.init(); |
---|
454 | m_cmd_llsc_sc_fifo.init(); |
---|
455 | |
---|
456 | m_cmd_cleanup_srcid_fifo.init(); |
---|
457 | m_cmd_cleanup_trdid_fifo.init(); |
---|
458 | m_cmd_cleanup_pktid_fifo.init(); |
---|
459 | m_cmd_cleanup_nline_fifo.init(); |
---|
460 | |
---|
461 | r_read_to_tgt_rsp_req = false; |
---|
462 | r_read_to_xram_cmd_req = false; |
---|
463 | |
---|
464 | r_write_to_tgt_rsp_req = false; |
---|
465 | r_write_to_xram_cmd_req = false; |
---|
466 | r_write_to_init_cmd_req = false; |
---|
467 | |
---|
468 | r_init_rsp_to_tgt_rsp_req = false; |
---|
469 | |
---|
470 | r_cleanup_to_tgt_rsp_req = false; |
---|
471 | |
---|
472 | r_llsc_to_tgt_rsp_req = false; |
---|
473 | r_llsc_to_xram_cmd_req = false; |
---|
474 | |
---|
475 | for(size_t i=0; i<TRANSACTION_TAB_LINES ; i++){ |
---|
476 | r_ixr_rsp_to_xram_rsp_rok[i]= false; |
---|
477 | } |
---|
478 | |
---|
479 | r_xram_rsp_to_tgt_rsp_req = false; |
---|
480 | r_xram_rsp_to_init_cmd_req = false; |
---|
481 | r_xram_rsp_to_xram_cmd_req = false; |
---|
482 | r_xram_rsp_trt_index = 0; |
---|
483 | |
---|
484 | r_xram_cmd_cpt = 0; |
---|
485 | |
---|
486 | // Activity counters |
---|
487 | m_cpt_cycles = 0; |
---|
488 | m_cpt_read = 0; |
---|
489 | m_cpt_read_miss = 0; |
---|
490 | m_cpt_write = 0; |
---|
491 | m_cpt_write_miss = 0; |
---|
492 | m_cpt_write_cells = 0; |
---|
493 | m_cpt_write_dirty = 0; |
---|
494 | m_cpt_update = 0; |
---|
495 | m_cpt_update_mult = 0; |
---|
496 | m_cpt_inval = 0; |
---|
497 | m_cpt_inval_mult = 0; |
---|
498 | m_cpt_cleanup = 0; |
---|
499 | m_cpt_ll = 0; |
---|
500 | m_cpt_sc = 0; |
---|
501 | |
---|
502 | return; |
---|
503 | } |
---|
504 | |
---|
505 | bool cmd_read_fifo_put = false; |
---|
506 | bool cmd_read_fifo_get = false; |
---|
507 | |
---|
508 | bool cmd_write_fifo_put = false; |
---|
509 | bool cmd_write_fifo_get = false; |
---|
510 | |
---|
511 | bool cmd_llsc_fifo_put = false; |
---|
512 | bool cmd_llsc_fifo_get = false; |
---|
513 | |
---|
514 | bool cmd_cleanup_fifo_put = false; |
---|
515 | bool cmd_cleanup_fifo_get = false; |
---|
516 | |
---|
517 | #if DEBUG_VCI_MEM_CACHE |
---|
518 | std::cout << "---------------------------------------------" << std::dec << std::endl; |
---|
519 | std::cout << "MEM_CACHE " << m_srcid_ini << " ; Time = " << m_cpt_cycles << std::endl |
---|
520 | << " - TGT_CMD FSM = " << tgt_cmd_fsm_str[r_tgt_cmd_fsm] << std::endl |
---|
521 | << " - TGT_RSP FSM = " << tgt_rsp_fsm_str[r_tgt_rsp_fsm] << std::endl |
---|
522 | << " - INIT_CMD FSM = " << init_cmd_fsm_str[r_init_cmd_fsm] << std::endl |
---|
523 | << " - INIT_RSP FSM = " << init_rsp_fsm_str[r_init_rsp_fsm] << std::endl |
---|
524 | << " - READ FSM = " << read_fsm_str[r_read_fsm] << std::endl |
---|
525 | << " - WRITE FSM = " << write_fsm_str[r_write_fsm] << std::endl |
---|
526 | << " - LLSC FSM = " << llsc_fsm_str[r_llsc_fsm] << std::endl |
---|
527 | << " - CLEANUP FSM = " << cleanup_fsm_str[r_cleanup_fsm] << std::endl |
---|
528 | << " - XRAM_CMD FSM = " << xram_cmd_fsm_str[r_xram_cmd_fsm] << std::endl |
---|
529 | << " - IXR_RSP FSM = " << ixr_rsp_fsm_str[r_ixr_rsp_fsm] << std::endl |
---|
530 | << " - XRAM_RSP FSM = " << xram_rsp_fsm_str[r_xram_rsp_fsm] << std::endl |
---|
531 | << " - ALLOC_DIR FSM = " << alloc_dir_fsm_str[r_alloc_dir_fsm] << std::endl |
---|
532 | << " - ALLOC_TRT FSM = " << alloc_trt_fsm_str[r_alloc_trt_fsm] << std::endl |
---|
533 | << " - ALLOC_UPT FSM = " << alloc_upt_fsm_str[r_alloc_upt_fsm] << std::endl; |
---|
534 | #endif |
---|
535 | |
---|
536 | |
---|
537 | //////////////////////////////////////////////////////////////////////////////////// |
---|
538 | // TGT_CMD FSM |
---|
539 | //////////////////////////////////////////////////////////////////////////////////// |
---|
540 | // The TGT_CMD_FSM controls the incoming VCI command pakets from the processors |
---|
541 | // |
---|
542 | // There is 4 types of packets for the m_mem_segment : |
---|
543 | // - READ : a READ request has a length of 1 VCI cell. It can be a single word |
---|
544 | // or an entire cache line, depending on the PLEN value. |
---|
545 | // - WRITE : a WRITE request has a maximum length of 16 cells, and can only |
---|
546 | // concern words in a same line. |
---|
547 | // - LL : The LL request has a length of 1 cell. |
---|
548 | // - SC : The SC request has a length of 1 cell. |
---|
549 | // The WDATA field contains the data to write. |
---|
550 | // |
---|
551 | // There is one type of packet for the m_reg_segment : |
---|
552 | // - CLEANUP : it is a one cell VCI packet. The VCI CMD field is WRITE, and the |
---|
553 | // WDATA field contains the cache line number (z & y). |
---|
554 | //////////////////////////////////////////////////////////////////////////////////// |
---|
555 | |
---|
556 | switch ( r_tgt_cmd_fsm.read() ) { |
---|
557 | |
---|
558 | ////////////////// |
---|
559 | case TGT_CMD_IDLE: |
---|
560 | { |
---|
561 | if ( p_vci_tgt.cmdval ) { |
---|
562 | assert( (p_vci_tgt.srcid.read() < m_initiators) |
---|
563 | && "error in VCI_MEM_CACHE : The received SRCID is larger than 31"); |
---|
564 | |
---|
565 | bool reached = false; |
---|
566 | for ( size_t index = 0 ; index < nseg && !reached ; index++) |
---|
567 | { |
---|
568 | if ( m_seg[index]->contains(p_vci_tgt.address.read()) ) { |
---|
569 | reached = true; |
---|
570 | r_index = index; |
---|
571 | } |
---|
572 | } |
---|
573 | |
---|
574 | if ( !reached ) |
---|
575 | { |
---|
576 | std::cout << "Out of segment access in VCI_MEM_CACHE" << std::endl; |
---|
577 | std::cout << "Faulty address = " << p_vci_tgt.address.read() << std::endl; |
---|
578 | std::cout << "Faulty initiator = " << p_vci_tgt.srcid.read() << std::endl; |
---|
579 | exit(0); |
---|
580 | } |
---|
581 | else if ( p_vci_tgt.cmd.read() == vci_param::CMD_READ ) |
---|
582 | { |
---|
583 | r_tgt_cmd_fsm = TGT_CMD_READ; |
---|
584 | } |
---|
585 | else if (( p_vci_tgt.cmd.read() == vci_param::CMD_WRITE ) && ( p_vci_tgt.trdid.read() == 0x0 ) ) |
---|
586 | { |
---|
587 | r_tgt_cmd_fsm = TGT_CMD_WRITE; |
---|
588 | } |
---|
589 | else if ((p_vci_tgt.cmd.read() == vci_param::CMD_LOCKED_READ) || |
---|
590 | (p_vci_tgt.cmd.read() == vci_param::CMD_STORE_COND) ) |
---|
591 | { |
---|
592 | r_tgt_cmd_fsm = TGT_CMD_ATOMIC; |
---|
593 | } |
---|
594 | else if (( p_vci_tgt.cmd.read() == vci_param::CMD_WRITE ) && ( p_vci_tgt.trdid.read() == 0x1 )) |
---|
595 | { |
---|
596 | r_tgt_cmd_fsm = TGT_CMD_CLEANUP; |
---|
597 | } |
---|
598 | } |
---|
599 | break; |
---|
600 | } |
---|
601 | ////////////////// |
---|
602 | case TGT_CMD_READ: |
---|
603 | { |
---|
604 | assert(((p_vci_tgt.plen.read() == 4) || (p_vci_tgt.plen.read() == m_words*4)) |
---|
605 | && "All read request to the MemCache must have PLEN = 4 or PLEN = 4*nwords"); |
---|
606 | |
---|
607 | if ( p_vci_tgt.cmdval && m_cmd_read_addr_fifo.wok() ) { |
---|
608 | cmd_read_fifo_put = true; |
---|
609 | if ( p_vci_tgt.eop ) r_tgt_cmd_fsm = TGT_CMD_IDLE; |
---|
610 | else r_tgt_cmd_fsm = TGT_CMD_READ_EOP; |
---|
611 | } |
---|
612 | break; |
---|
613 | } |
---|
614 | ////////////////////// |
---|
615 | case TGT_CMD_READ_EOP: |
---|
616 | { |
---|
617 | if ( p_vci_tgt.cmdval && p_vci_tgt.eop ){ |
---|
618 | r_tgt_cmd_fsm = TGT_CMD_IDLE; |
---|
619 | } |
---|
620 | break; |
---|
621 | } |
---|
622 | /////////////////// |
---|
623 | case TGT_CMD_WRITE: |
---|
624 | { |
---|
625 | if ( p_vci_tgt.cmdval && m_cmd_write_addr_fifo.wok() ) { |
---|
626 | cmd_write_fifo_put = true; |
---|
627 | if( p_vci_tgt.eop ) r_tgt_cmd_fsm = TGT_CMD_IDLE; |
---|
628 | } |
---|
629 | break; |
---|
630 | } |
---|
631 | //////////////////// |
---|
632 | case TGT_CMD_ATOMIC: |
---|
633 | { |
---|
634 | assert(p_vci_tgt.eop && "Memory Cache Error: LL or SC command with length > 1 "); |
---|
635 | |
---|
636 | if ( p_vci_tgt.cmdval && m_cmd_llsc_addr_fifo.wok() ) { |
---|
637 | cmd_llsc_fifo_put = true; |
---|
638 | r_tgt_cmd_fsm = TGT_CMD_IDLE; |
---|
639 | } |
---|
640 | break; |
---|
641 | } |
---|
642 | ///////////////////// |
---|
643 | case TGT_CMD_CLEANUP: |
---|
644 | { |
---|
645 | assert(p_vci_tgt.eop && "Memory Cache Error: CLEANUP request with length > 1 "); |
---|
646 | |
---|
647 | if ( p_vci_tgt.cmdval && m_cmd_cleanup_nline_fifo.wok() ) { |
---|
648 | cmd_cleanup_fifo_put = true; |
---|
649 | r_tgt_cmd_fsm = TGT_CMD_IDLE; |
---|
650 | } |
---|
651 | break; |
---|
652 | } |
---|
653 | } // end switch tgt_cmd_fsm |
---|
654 | |
---|
655 | ///////////////////////////////////////////////////////////////////////// |
---|
656 | // INIT_RSP FSM |
---|
657 | ///////////////////////////////////////////////////////////////////////// |
---|
658 | // This FSM controls the response to the update or invalidate requests |
---|
659 | // sent by the memory cache to the L1 caches : |
---|
660 | // |
---|
661 | // - update request initiated by the WRITE FSM. |
---|
662 | // The FSM decrements the proper entry in the Update/Inval Table. |
---|
663 | // It sends a request to the TGT_RSP FSM to complete the pending |
---|
664 | // write transaction (acknowledge response to the writer processor), |
---|
665 | // and clear the UPT entry when all responses have been received. |
---|
666 | // - invalidate request initiated by the XRAM_RSP FSM. |
---|
667 | // The FSM decrements the proper entry in the Update/Inval_Table, |
---|
668 | // and clear the entry when all responses have been received. |
---|
669 | // |
---|
670 | // All those response packets are one word, compact |
---|
671 | // packets complying with the VCI advanced format. |
---|
672 | // The index in the Table is defined in the RTRDID field, and |
---|
673 | // the Transaction type is defined in the Update/Inval Table. |
---|
674 | ///////////////////////////////////////////////////////////////////// |
---|
675 | |
---|
676 | switch ( r_init_rsp_fsm.read() ) { |
---|
677 | |
---|
678 | /////////////////// |
---|
679 | case INIT_RSP_IDLE: |
---|
680 | { |
---|
681 | if ( p_vci_ini.rspval ) { |
---|
682 | assert ( ( p_vci_ini.rtrdid.read() < m_update_tab.size() ) |
---|
683 | && "UPT index too large in VCI response paquet received by memory cache" ); |
---|
684 | assert ( p_vci_ini.reop |
---|
685 | && "All response packets to update/invalidate requests must be one cell" ); |
---|
686 | r_init_rsp_upt_index = p_vci_ini.rtrdid.read(); |
---|
687 | r_init_rsp_fsm = INIT_RSP_UPT_LOCK; |
---|
688 | } |
---|
689 | break; |
---|
690 | } |
---|
691 | /////////////////////// |
---|
692 | case INIT_RSP_UPT_LOCK: // decrement the number of expected responses |
---|
693 | { |
---|
694 | if ( r_alloc_upt_fsm.read() == ALLOC_UPT_INIT_RSP ) { |
---|
695 | size_t count = 0; |
---|
696 | bool valid = m_update_tab.decrement(r_init_rsp_upt_index.read(), count); |
---|
697 | assert ( valid |
---|
698 | && "Invalid UPT entry in VCI response paquet received by memory cache" ); |
---|
699 | if ( count == 0 ) r_init_rsp_fsm = INIT_RSP_UPT_CLEAR; |
---|
700 | else r_init_rsp_fsm = INIT_RSP_IDLE; |
---|
701 | } |
---|
702 | break; |
---|
703 | } |
---|
704 | //////////////////////// |
---|
705 | case INIT_RSP_UPT_CLEAR: // clear the UPT entry |
---|
706 | { |
---|
707 | if ( r_alloc_upt_fsm.read() == ALLOC_UPT_INIT_RSP ) { |
---|
708 | r_init_rsp_srcid = m_update_tab.srcid(r_init_rsp_upt_index.read()); |
---|
709 | r_init_rsp_trdid = m_update_tab.trdid(r_init_rsp_upt_index.read()); |
---|
710 | r_init_rsp_pktid = m_update_tab.pktid(r_init_rsp_upt_index.read()); |
---|
711 | bool update = m_update_tab.is_update(r_init_rsp_upt_index.read()); |
---|
712 | if ( update ) r_init_rsp_fsm = INIT_RSP_END; |
---|
713 | else r_init_rsp_fsm = INIT_RSP_IDLE; |
---|
714 | m_update_tab.clear(r_init_rsp_upt_index.read()); |
---|
715 | } |
---|
716 | break; |
---|
717 | } |
---|
718 | ////////////////// |
---|
719 | case INIT_RSP_END: |
---|
720 | { |
---|
721 | if ( !r_init_rsp_to_tgt_rsp_req ) { |
---|
722 | r_init_rsp_to_tgt_rsp_req = true; |
---|
723 | r_init_rsp_to_tgt_rsp_srcid = r_init_rsp_srcid.read(); |
---|
724 | r_init_rsp_to_tgt_rsp_trdid = r_init_rsp_trdid.read(); |
---|
725 | r_init_rsp_to_tgt_rsp_pktid = r_init_rsp_pktid.read(); |
---|
726 | r_init_rsp_fsm = INIT_RSP_IDLE; |
---|
727 | } |
---|
728 | break; |
---|
729 | } |
---|
730 | } // end switch r_init_rsp_fsm |
---|
731 | |
---|
732 | //////////////////////////////////////////////////////////////////////////////////// |
---|
733 | // READ FSM |
---|
734 | //////////////////////////////////////////////////////////////////////////////////// |
---|
735 | // The READ FSM controls the read requests sent by processors. |
---|
736 | // It takes the lock protecting the cache directory to check the cache line status: |
---|
737 | // - In case of HIT, the fsm copies the data (one line, or one single word) |
---|
738 | // in the r_read_to_tgt_rsp buffer. It waits if this buffer is not empty. |
---|
739 | // The requesting initiator is registered in the cache directory. |
---|
740 | // - In case of MISS, the READ fsm takes the lock protecting the transaction tab. |
---|
741 | // If a read transaction to the XRAM for this line already exists, |
---|
742 | // or if the transaction tab is full, the fsm is stalled. |
---|
743 | // If a transaction entry is free, the READ fsm sends a request to the XRAM. |
---|
744 | //////////////////////////////////////////////////////////////////////////////////// |
---|
745 | |
---|
746 | switch ( r_read_fsm.read() ) { |
---|
747 | |
---|
748 | /////////////// |
---|
749 | case READ_IDLE: |
---|
750 | { |
---|
751 | if (m_cmd_read_addr_fifo.rok()) { |
---|
752 | m_cpt_read++; |
---|
753 | r_read_fsm = READ_DIR_LOCK; |
---|
754 | } |
---|
755 | break; |
---|
756 | } |
---|
757 | /////////////////// |
---|
758 | case READ_DIR_LOCK: // check directory for hit / miss |
---|
759 | { |
---|
760 | if( r_alloc_dir_fsm.read() == ALLOC_DIR_READ ) { |
---|
761 | size_t way = 0; |
---|
762 | DirectoryEntry entry = m_cache_directory.read(m_cmd_read_addr_fifo.read(), way); |
---|
763 | |
---|
764 | r_read_dirty = entry.dirty; |
---|
765 | r_read_tag = entry.tag; |
---|
766 | r_read_lock = entry.lock; |
---|
767 | r_read_way = way; |
---|
768 | r_read_word = m_cmd_read_word_fifo.read(); |
---|
769 | r_read_copies = entry.copies | (0x1 << m_cmd_read_srcid_fifo.read()); |
---|
770 | |
---|
771 | // In case of hit, the read acces must be registered in the copies bit-vector |
---|
772 | // for both a cache line read & a single word read (TLB coherence) |
---|
773 | |
---|
774 | if( entry.valid ) { |
---|
775 | r_read_fsm = READ_DIR_HIT; |
---|
776 | } else { |
---|
777 | r_read_fsm = READ_TRT_LOCK; |
---|
778 | m_cpt_read_miss++; |
---|
779 | } |
---|
780 | } |
---|
781 | break; |
---|
782 | } |
---|
783 | ////////////////// |
---|
784 | case READ_DIR_HIT: // read hit : update the memory cache |
---|
785 | { |
---|
786 | if( r_alloc_dir_fsm.read() == ALLOC_DIR_READ ) { |
---|
787 | |
---|
788 | // read data in the cache |
---|
789 | size_t set = m_y[m_cmd_read_addr_fifo.read()]; |
---|
790 | size_t way = r_read_way.read(); |
---|
791 | for ( size_t i=0 ; i<m_words ; i++ ) { |
---|
792 | r_read_data[i] = m_cache_data[way][set][i]; |
---|
793 | } |
---|
794 | |
---|
795 | // update the cache directory (for the copies) |
---|
796 | DirectoryEntry entry; |
---|
797 | entry.valid = true; |
---|
798 | entry.dirty = r_read_dirty.read(); |
---|
799 | entry.tag = r_read_tag.read(); |
---|
800 | entry.lock = r_read_lock.read(); |
---|
801 | entry.copies = r_read_copies.read(); |
---|
802 | m_cache_directory.write(set, way, entry); |
---|
803 | r_read_fsm = READ_RSP; |
---|
804 | } |
---|
805 | break; |
---|
806 | } |
---|
807 | ////////////// |
---|
808 | case READ_RSP: // request the TGT_RSP FSM to return data |
---|
809 | { |
---|
810 | if( !r_read_to_tgt_rsp_req ) { |
---|
811 | for ( size_t i=0 ; i<m_words ; i++ ) { |
---|
812 | r_read_to_tgt_rsp_data[i] = r_read_data[i]; |
---|
813 | if ( r_read_word ) { // single word |
---|
814 | r_read_to_tgt_rsp_val[i] = (i == m_x[m_cmd_read_addr_fifo.read()]); |
---|
815 | } else { // cache line |
---|
816 | r_read_to_tgt_rsp_val[i] = true; |
---|
817 | } |
---|
818 | } |
---|
819 | cmd_read_fifo_get = true; |
---|
820 | r_read_to_tgt_rsp_req = true; |
---|
821 | r_read_to_tgt_rsp_srcid = m_cmd_read_srcid_fifo.read(); |
---|
822 | r_read_to_tgt_rsp_trdid = m_cmd_read_trdid_fifo.read(); |
---|
823 | r_read_to_tgt_rsp_pktid = m_cmd_read_pktid_fifo.read(); |
---|
824 | r_read_fsm = READ_IDLE; |
---|
825 | } |
---|
826 | break; |
---|
827 | } |
---|
828 | /////////////////// |
---|
829 | case READ_TRT_LOCK: // read miss : check the Transaction Table |
---|
830 | { |
---|
831 | if ( r_alloc_trt_fsm.read() == ALLOC_TRT_READ ) { |
---|
832 | size_t index = 0; |
---|
833 | bool hit = m_transaction_tab.hit_read(m_nline[m_cmd_read_addr_fifo.read()], index); |
---|
834 | bool wok = !m_transaction_tab.full(index); |
---|
835 | if( hit || !wok ) { // missing line already requested or no space |
---|
836 | r_read_fsm = READ_IDLE; |
---|
837 | } else { // missing line is requested to the XRAM |
---|
838 | r_read_trt_index = index; |
---|
839 | r_read_fsm = READ_TRT_SET; |
---|
840 | } |
---|
841 | } |
---|
842 | break; |
---|
843 | } |
---|
844 | ////////////////// |
---|
845 | case READ_TRT_SET: |
---|
846 | { |
---|
847 | if ( r_alloc_trt_fsm.read() == ALLOC_TRT_READ ) { |
---|
848 | m_transaction_tab.set(r_read_trt_index.read(), |
---|
849 | true, |
---|
850 | m_nline[m_cmd_read_addr_fifo.read()], |
---|
851 | m_cmd_read_srcid_fifo.read(), |
---|
852 | m_cmd_read_trdid_fifo.read(), |
---|
853 | m_cmd_read_pktid_fifo.read(), |
---|
854 | true, |
---|
855 | m_cmd_read_word_fifo.read(), |
---|
856 | m_x[m_cmd_read_addr_fifo.read()], |
---|
857 | std::vector<be_t>(m_words,0), |
---|
858 | std::vector<data_t>(m_words,0)); |
---|
859 | r_read_fsm = READ_XRAM_REQ; |
---|
860 | } |
---|
861 | break; |
---|
862 | } |
---|
863 | ///////////////////// |
---|
864 | case READ_XRAM_REQ: |
---|
865 | { |
---|
866 | if( !r_read_to_xram_cmd_req ) { |
---|
867 | cmd_read_fifo_get = true; |
---|
868 | r_read_to_xram_cmd_req = true; |
---|
869 | r_read_to_xram_cmd_nline = m_nline[m_cmd_read_addr_fifo.read()]; |
---|
870 | r_read_to_xram_cmd_trdid = r_read_trt_index.read(); |
---|
871 | r_read_fsm = READ_IDLE; |
---|
872 | } |
---|
873 | break; |
---|
874 | } |
---|
875 | } // end switch read_fsm |
---|
876 | |
---|
877 | /////////////////////////////////////////////////////////////////////////////////// |
---|
878 | // WRITE FSM |
---|
879 | /////////////////////////////////////////////////////////////////////////////////// |
---|
880 | // The WRITE FSM handles the write bursts sent by the processors. |
---|
881 | // All addresses in a burst must be in the same cache line. |
---|
882 | // A complete write burst is consumed in the FIFO & copied to a local buffer. |
---|
883 | // Then the FSM takes the lock protecting the cache directory, to check |
---|
884 | // if the line is in the cache. |
---|
885 | // |
---|
886 | // - In case of HIT, the cache is updated. |
---|
887 | // If there is no other copy, an acknowledge response is immediately |
---|
888 | // returned to the writing processor. |
---|
889 | // if the data is cached by other processoris, the FSM takes the lock |
---|
890 | // protecting the Update Table (UPT) to register this update transaction. |
---|
891 | // If the UPT is full, it releases the lock and waits. Then, it sends |
---|
892 | // a multi-update request to all owners of the line (but the writer), |
---|
893 | // through the INIT_CMD FSM. In case of multi-update transaction, the WRITE FSM |
---|
894 | // does not respond to the writing processor, as this response will be sent by |
---|
895 | // the INIT_RSP FSM when all update responses have been received. |
---|
896 | // |
---|
897 | // - In case of MISS, the WRITE FSM takes the lock protecting the transaction |
---|
898 | // table (TRT). If a read transaction to the XRAM for this line already exists, |
---|
899 | // it writes in the TRT (write buffer). Otherwise, if a TRT entry is free, |
---|
900 | // the WRITE FSM register a new transaction in TRT, and sends a read line request |
---|
901 | // to the XRAM. If the TRT is full, it releases the lock, and waits. |
---|
902 | // Finally, the WRITE FSM returns an aknowledge response to the writing processor. |
---|
903 | ///////////////////////////////////////////////////////////////////////////////////// |
---|
904 | |
---|
905 | switch ( r_write_fsm.read() ) { |
---|
906 | |
---|
907 | //////////////// |
---|
908 | case WRITE_IDLE: // copy first word of a write burst in local buffer |
---|
909 | { |
---|
910 | if ( m_cmd_write_addr_fifo.rok()) { |
---|
911 | m_cpt_write++; |
---|
912 | m_cpt_write_cells++; |
---|
913 | // consume a word in the FIFO & write it in the local buffer |
---|
914 | cmd_write_fifo_get = true; |
---|
915 | size_t index = m_x[m_cmd_write_addr_fifo.read()]; |
---|
916 | r_write_address = m_cmd_write_addr_fifo.read(); |
---|
917 | r_write_word_index = index; |
---|
918 | r_write_word_count = 1; |
---|
919 | r_write_data[index] = m_cmd_write_data_fifo.read(); |
---|
920 | r_write_srcid = m_cmd_write_srcid_fifo.read(); |
---|
921 | r_write_trdid = m_cmd_write_trdid_fifo.read(); |
---|
922 | r_write_pktid = m_cmd_write_pktid_fifo.read(); |
---|
923 | |
---|
924 | // the be field must be set for all words |
---|
925 | for ( size_t i=0 ; i<m_words ; i++ ) { |
---|
926 | if ( i == index ) r_write_be[i] = m_cmd_write_be_fifo.read(); |
---|
927 | else r_write_be[i] = 0x0; |
---|
928 | } |
---|
929 | if( !((m_cmd_write_be_fifo.read()==0x0)||(m_cmd_write_be_fifo.read()==0xF)) ) |
---|
930 | r_write_byte=true; |
---|
931 | else r_write_byte=false; |
---|
932 | |
---|
933 | if( m_cmd_write_eop_fifo.read() ) |
---|
934 | { |
---|
935 | r_write_fsm = WRITE_DIR_LOCK; |
---|
936 | } |
---|
937 | else |
---|
938 | { |
---|
939 | r_write_fsm = WRITE_NEXT; |
---|
940 | } |
---|
941 | } |
---|
942 | break; |
---|
943 | } |
---|
944 | //////////////// |
---|
945 | case WRITE_NEXT: // copy next word of a write burst in local buffer |
---|
946 | { |
---|
947 | if ( m_cmd_write_addr_fifo.rok() ) { |
---|
948 | m_cpt_write_cells++; |
---|
949 | |
---|
950 | // check that the next word is in the same cache line |
---|
951 | assert( (m_nline[r_write_address.read()] == m_nline[m_cmd_write_addr_fifo.read()]) |
---|
952 | && "write error in vci_mem_cache : write burst over a line" ); |
---|
953 | // consume a word in the FIFO & write it in the local buffer |
---|
954 | cmd_write_fifo_get=true; |
---|
955 | size_t index = r_write_word_index.read() + r_write_word_count.read(); |
---|
956 | r_write_be[index] = m_cmd_write_be_fifo.read(); |
---|
957 | r_write_data[index] = m_cmd_write_data_fifo.read(); |
---|
958 | r_write_word_count = r_write_word_count.read() + 1; |
---|
959 | if( !((m_cmd_write_be_fifo.read()==0x0)||(m_cmd_write_be_fifo.read()==0xF)) ) |
---|
960 | r_write_byte=true; |
---|
961 | if ( m_cmd_write_eop_fifo.read() ) r_write_fsm = WRITE_DIR_LOCK; |
---|
962 | } |
---|
963 | break; |
---|
964 | } |
---|
965 | //////////////////// |
---|
966 | case WRITE_DIR_LOCK: // access directory to check hit/miss |
---|
967 | { |
---|
968 | if ( r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE ) { |
---|
969 | size_t way = 0; |
---|
970 | DirectoryEntry entry(m_cache_directory.read(r_write_address.read(), way)); |
---|
971 | |
---|
972 | // copy directory entry in local buffers in case of hit |
---|
973 | if ( entry.valid ) { |
---|
974 | r_write_lock = entry.lock; |
---|
975 | r_write_tag = entry.tag; |
---|
976 | r_write_copies = entry.copies; |
---|
977 | r_write_way = way; |
---|
978 | if(r_write_byte.read()) |
---|
979 | r_write_fsm = WRITE_DIR_HIT_READ; |
---|
980 | else r_write_fsm = WRITE_DIR_HIT; |
---|
981 | } else { |
---|
982 | r_write_fsm = WRITE_TRT_LOCK; |
---|
983 | m_cpt_write_miss++; |
---|
984 | } |
---|
985 | } |
---|
986 | break; |
---|
987 | } |
---|
988 | /////////////////// |
---|
989 | case WRITE_DIR_HIT_READ: // read the cache and complete the buffer (data, when be!=0xF) |
---|
990 | { |
---|
991 | // update local buffer |
---|
992 | size_t set = m_y[r_write_address.read()]; |
---|
993 | size_t way = r_write_way.read(); |
---|
994 | for(size_t i=0 ; i<m_words ; i++) { |
---|
995 | data_t mask = 0; |
---|
996 | if (r_write_be[i].read() & 0x1) mask = mask | 0x000000FF; |
---|
997 | if (r_write_be[i].read() & 0x2) mask = mask | 0x0000FF00; |
---|
998 | if (r_write_be[i].read() & 0x4) mask = mask | 0x00FF0000; |
---|
999 | if (r_write_be[i].read() & 0x8) mask = mask | 0xFF000000; |
---|
1000 | if(r_write_be[i].read()) { // complete only if mask is not null (for energy consumption) |
---|
1001 | r_write_data[i] = (r_write_data[i].read() & mask) | |
---|
1002 | (m_cache_data[way][set][i] & ~mask); |
---|
1003 | r_write_be[i]=0xF; |
---|
1004 | } |
---|
1005 | } // end for |
---|
1006 | |
---|
1007 | r_write_fsm = WRITE_DIR_HIT; |
---|
1008 | break; |
---|
1009 | } |
---|
1010 | /////////////////// |
---|
1011 | case WRITE_DIR_HIT: // update the cache (data & dirty bit) |
---|
1012 | { |
---|
1013 | // update directory with Dirty bit |
---|
1014 | DirectoryEntry entry; |
---|
1015 | entry.valid = true; |
---|
1016 | entry.dirty = true; |
---|
1017 | entry.tag = r_write_tag.read(); |
---|
1018 | entry.lock = r_write_lock.read(); |
---|
1019 | entry.copies = r_write_copies.read(); |
---|
1020 | size_t set = m_y[r_write_address.read()]; |
---|
1021 | size_t way = r_write_way.read(); |
---|
1022 | m_cache_directory.write(set, way, entry); |
---|
1023 | |
---|
1024 | // write data in cache |
---|
1025 | for(size_t i=0 ; i<m_words ; i++) { |
---|
1026 | if ( r_write_be[i].read() ) { |
---|
1027 | m_cache_data[way][set][i] = r_write_data[i].read(); |
---|
1028 | } |
---|
1029 | } // end for |
---|
1030 | |
---|
1031 | // compute the actual number of copies & the modified bit vector |
---|
1032 | size_t n = 0; |
---|
1033 | copy_t mask = 0x1; |
---|
1034 | copy_t copies = r_write_copies.read(); |
---|
1035 | for ( size_t i=0 ; i<32 ; i++) { |
---|
1036 | if ( i != r_write_srcid.read() ) { |
---|
1037 | if ( copies & mask ) n++; |
---|
1038 | } else { |
---|
1039 | copies = copies & ~mask; |
---|
1040 | } |
---|
1041 | mask = (mask << 1); |
---|
1042 | } |
---|
1043 | r_write_nb_copies = n; |
---|
1044 | r_write_copies = copies; |
---|
1045 | |
---|
1046 | if ( n == 0 ) r_write_fsm = WRITE_RSP; |
---|
1047 | else r_write_fsm = WRITE_UPT_LOCK; |
---|
1048 | break; |
---|
1049 | } |
---|
1050 | ///////////////////// |
---|
1051 | case WRITE_UPT_LOCK: // Try to register the request in Update Table |
---|
1052 | { |
---|
1053 | if ( r_alloc_upt_fsm.read() == ALLOC_UPT_WRITE ) { |
---|
1054 | bool wok=false; |
---|
1055 | size_t index=0; |
---|
1056 | size_t srcid=r_write_srcid.read(); |
---|
1057 | size_t trdid=r_write_trdid.read(); |
---|
1058 | size_t pktid=r_write_pktid.read(); |
---|
1059 | size_t nb_copies=r_write_nb_copies.read(); |
---|
1060 | wok =m_update_tab.set(true, // it's an update transaction |
---|
1061 | srcid, |
---|
1062 | trdid, |
---|
1063 | pktid, |
---|
1064 | nb_copies, |
---|
1065 | index); |
---|
1066 | r_write_upt_index = index; |
---|
1067 | // releases the lock protecting Update Table if no entry... |
---|
1068 | if ( wok ) r_write_fsm = WRITE_UPDATE; |
---|
1069 | else r_write_fsm = WRITE_WAIT_UPT; |
---|
1070 | } |
---|
1071 | break; |
---|
1072 | } |
---|
1073 | //////////////////// |
---|
1074 | case WRITE_WAIT_UPT: // release the lock protecting UPT |
---|
1075 | { |
---|
1076 | r_write_fsm = WRITE_UPT_LOCK; |
---|
1077 | break; |
---|
1078 | } |
---|
1079 | ////////////////// |
---|
1080 | case WRITE_UPDATE: // send a multi-update request to INIT_CMD fsm |
---|
1081 | { |
---|
1082 | if ( !r_write_to_init_cmd_req ) { |
---|
1083 | r_write_to_init_cmd_req = true; |
---|
1084 | r_write_to_init_cmd_trdid = r_write_upt_index.read(); |
---|
1085 | r_write_to_init_cmd_nline = m_nline[r_write_address.read()]; |
---|
1086 | r_write_to_init_cmd_index = r_write_word_index.read(); |
---|
1087 | r_write_to_init_cmd_count = r_write_word_count.read(); |
---|
1088 | r_write_to_init_cmd_copies = r_write_copies; |
---|
1089 | |
---|
1090 | for(size_t i=0; i<m_words ; i++){ |
---|
1091 | assert( ((r_write_be[i].read()==0xF)||(r_write_be[i].read()==0x0)) && |
---|
1092 | "write error in vci_mem_cache : invalid BE"); |
---|
1093 | if(r_write_be[i].read()) r_write_to_init_cmd_we[i]=true; |
---|
1094 | else r_write_to_init_cmd_we[i]=false; |
---|
1095 | } |
---|
1096 | |
---|
1097 | size_t min = r_write_word_index.read(); |
---|
1098 | size_t max = r_write_word_index.read() + r_write_word_count.read(); |
---|
1099 | for (size_t i=min ; i<max ; i++) { |
---|
1100 | r_write_to_init_cmd_data[i] = r_write_data[i]; |
---|
1101 | } |
---|
1102 | r_write_fsm = WRITE_IDLE; |
---|
1103 | } |
---|
1104 | break; |
---|
1105 | } |
---|
1106 | /////////////// |
---|
1107 | case WRITE_RSP: // send a request to TGT_RSP FSM to acknowledge the write |
---|
1108 | { |
---|
1109 | if ( !r_write_to_tgt_rsp_req ) { |
---|
1110 | r_write_to_tgt_rsp_req = true; |
---|
1111 | r_write_to_tgt_rsp_srcid = r_write_srcid; |
---|
1112 | r_write_to_tgt_rsp_trdid = r_write_trdid; |
---|
1113 | r_write_to_tgt_rsp_pktid = r_write_pktid; |
---|
1114 | r_write_fsm = WRITE_IDLE; |
---|
1115 | } |
---|
1116 | break; |
---|
1117 | } |
---|
1118 | //////////////////// |
---|
1119 | case WRITE_TRT_LOCK: // Miss : check Transaction Table |
---|
1120 | { |
---|
1121 | if ( r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE ) { |
---|
1122 | size_t hit_index = 0; |
---|
1123 | size_t wok_index = 0; |
---|
1124 | bool hit = m_transaction_tab.hit_read(m_nline[r_write_address.read()],hit_index); |
---|
1125 | bool wok = !m_transaction_tab.full(wok_index); |
---|
1126 | if ( hit ) { // register the modified data in TRT |
---|
1127 | r_write_trt_index = hit_index; |
---|
1128 | r_write_fsm = WRITE_TRT_DATA; |
---|
1129 | } else if ( wok ) { // set a new entry in TRT |
---|
1130 | r_write_trt_index = wok_index; |
---|
1131 | r_write_fsm = WRITE_TRT_SET; |
---|
1132 | } else { // wait an empty entry in TRT |
---|
1133 | r_write_fsm = WRITE_WAIT_TRT; |
---|
1134 | } |
---|
1135 | } |
---|
1136 | break; |
---|
1137 | } |
---|
1138 | //////////////////// |
---|
1139 | case WRITE_WAIT_TRT: // release the lock protecting TRT |
---|
1140 | { |
---|
1141 | r_write_fsm = WRITE_DIR_LOCK; |
---|
1142 | break; |
---|
1143 | } |
---|
1144 | /////////////////// |
---|
1145 | case WRITE_TRT_SET: // register a new transaction in TRT (Write Buffer) |
---|
1146 | { |
---|
1147 | if ( r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE ) { |
---|
1148 | std::vector<be_t> be_vector; |
---|
1149 | std::vector<data_t> data_vector; |
---|
1150 | be_vector.clear(); |
---|
1151 | data_vector.clear(); |
---|
1152 | for ( size_t i=0; i<m_words; i++ ) { |
---|
1153 | be_vector.push_back(r_write_be[i]); |
---|
1154 | data_vector.push_back(r_write_data[i]); |
---|
1155 | } |
---|
1156 | m_transaction_tab.set(r_write_trt_index.read(), |
---|
1157 | true, // read request to XRAM |
---|
1158 | m_nline[r_write_address.read()], |
---|
1159 | r_write_srcid.read(), |
---|
1160 | r_write_trdid.read(), |
---|
1161 | r_write_pktid.read(), |
---|
1162 | false, // not a processor read |
---|
1163 | false, // not a single word |
---|
1164 | 0, // word index |
---|
1165 | be_vector, |
---|
1166 | data_vector); |
---|
1167 | r_write_fsm = WRITE_XRAM_REQ; |
---|
1168 | } |
---|
1169 | break; |
---|
1170 | } |
---|
1171 | /////////////////// |
---|
1172 | case WRITE_TRT_DATA: // update an entry in TRT (Write Buffer) |
---|
1173 | { |
---|
1174 | if ( r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE ) { |
---|
1175 | std::vector<be_t> be_vector; |
---|
1176 | std::vector<data_t> data_vector; |
---|
1177 | be_vector.clear(); |
---|
1178 | data_vector.clear(); |
---|
1179 | for ( size_t i=0; i<m_words; i++ ) { |
---|
1180 | be_vector.push_back(r_write_be[i]); |
---|
1181 | data_vector.push_back(r_write_data[i]); |
---|
1182 | } |
---|
1183 | m_transaction_tab.write_data_mask(r_write_trt_index.read(), |
---|
1184 | be_vector, |
---|
1185 | data_vector); |
---|
1186 | r_write_fsm = WRITE_RSP; |
---|
1187 | } |
---|
1188 | break; |
---|
1189 | } |
---|
1190 | //////////////////// |
---|
1191 | case WRITE_XRAM_REQ: // send a request to XRAM_CMD FSM |
---|
1192 | { |
---|
1193 | if ( !r_write_to_xram_cmd_req ) { |
---|
1194 | r_write_to_xram_cmd_req = true; |
---|
1195 | r_write_to_xram_cmd_nline = m_nline[r_write_address.read()]; |
---|
1196 | r_write_to_xram_cmd_trdid = r_write_trt_index.read(); |
---|
1197 | r_write_fsm = WRITE_RSP; |
---|
1198 | } |
---|
1199 | break; |
---|
1200 | } |
---|
1201 | } // end switch r_write_fsm |
---|
1202 | |
---|
1203 | /////////////////////////////////////////////////////////////////////// |
---|
1204 | // XRAM_CMD FSM |
---|
1205 | /////////////////////////////////////////////////////////////////////// |
---|
1206 | // The XRAM_CMD fsm controls the command packets to the XRAM : |
---|
1207 | // - It sends a single cell VCI write in case of MISS request |
---|
1208 | // posted by the READ, WRITE or LLSC FSMs : the WDATA field |
---|
1209 | // contains the cache line index and the TRDID fiels contains |
---|
1210 | // the Transaction Tab index. |
---|
1211 | // The VCI response is a multi-cell packet : the N cells contain |
---|
1212 | // the N data words. |
---|
1213 | // - It sends a multi-cell VCI write when the XRAM_RSP FSM request |
---|
1214 | // to save a dirty line to the XRAM : The first cell contain the |
---|
1215 | // cache line index, the following cells contain the data. |
---|
1216 | // The VCI response is a single cell packet. |
---|
1217 | // This FSM handles requests from the READ, WRITE, LLSC & XRAM_RSP FSMs |
---|
1218 | // with a round-robin priority. |
---|
1219 | //////////////////////////////////////////////////////////////////////// |
---|
1220 | |
---|
1221 | switch ( r_xram_cmd_fsm.read() ) { |
---|
1222 | |
---|
1223 | //////////////////////// |
---|
1224 | case XRAM_CMD_READ_IDLE: |
---|
1225 | if ( r_write_to_xram_cmd_req ) r_xram_cmd_fsm = XRAM_CMD_WRITE_NLINE; |
---|
1226 | else if ( r_llsc_to_xram_cmd_req ) r_xram_cmd_fsm = XRAM_CMD_LLSC_NLINE; |
---|
1227 | else if ( r_xram_rsp_to_xram_cmd_req ) r_xram_cmd_fsm = XRAM_CMD_XRAM_DATA; |
---|
1228 | else if ( r_read_to_xram_cmd_req ) r_xram_cmd_fsm = XRAM_CMD_READ_NLINE; |
---|
1229 | break; |
---|
1230 | //////////////////////// |
---|
1231 | case XRAM_CMD_WRITE_IDLE: |
---|
1232 | if ( r_llsc_to_xram_cmd_req ) r_xram_cmd_fsm = XRAM_CMD_LLSC_NLINE; |
---|
1233 | else if ( r_xram_rsp_to_xram_cmd_req ) r_xram_cmd_fsm = XRAM_CMD_XRAM_DATA; |
---|
1234 | else if ( r_read_to_xram_cmd_req ) r_xram_cmd_fsm = XRAM_CMD_READ_NLINE; |
---|
1235 | else if ( r_write_to_xram_cmd_req ) r_xram_cmd_fsm = XRAM_CMD_WRITE_NLINE; |
---|
1236 | break; |
---|
1237 | //////////////////////// |
---|
1238 | case XRAM_CMD_LLSC_IDLE: |
---|
1239 | if ( r_xram_rsp_to_xram_cmd_req ) r_xram_cmd_fsm = XRAM_CMD_XRAM_DATA; |
---|
1240 | else if ( r_read_to_xram_cmd_req ) r_xram_cmd_fsm = XRAM_CMD_READ_NLINE; |
---|
1241 | else if ( r_write_to_xram_cmd_req ) r_xram_cmd_fsm = XRAM_CMD_WRITE_NLINE; |
---|
1242 | else if ( r_llsc_to_xram_cmd_req ) r_xram_cmd_fsm = XRAM_CMD_LLSC_NLINE; |
---|
1243 | break; |
---|
1244 | //////////////////////// |
---|
1245 | case XRAM_CMD_XRAM_IDLE: |
---|
1246 | if ( r_read_to_xram_cmd_req ) r_xram_cmd_fsm = XRAM_CMD_READ_NLINE; |
---|
1247 | else if ( r_write_to_xram_cmd_req ) r_xram_cmd_fsm = XRAM_CMD_WRITE_NLINE; |
---|
1248 | else if ( r_llsc_to_xram_cmd_req ) r_xram_cmd_fsm = XRAM_CMD_LLSC_NLINE; |
---|
1249 | else if ( r_xram_rsp_to_xram_cmd_req ) r_xram_cmd_fsm = XRAM_CMD_XRAM_DATA; |
---|
1250 | break; |
---|
1251 | ///////////////////////// |
---|
1252 | case XRAM_CMD_READ_NLINE: |
---|
1253 | if ( p_vci_ixr.cmdack ) { |
---|
1254 | r_xram_cmd_fsm = XRAM_CMD_READ_IDLE; |
---|
1255 | r_read_to_xram_cmd_req = false; |
---|
1256 | } |
---|
1257 | break; |
---|
1258 | ////////////////////////// |
---|
1259 | case XRAM_CMD_WRITE_NLINE: |
---|
1260 | if ( p_vci_ixr.cmdack ) { |
---|
1261 | r_xram_cmd_fsm = XRAM_CMD_WRITE_IDLE; |
---|
1262 | r_write_to_xram_cmd_req = false; |
---|
1263 | } |
---|
1264 | break; |
---|
1265 | ///////////////////////// |
---|
1266 | case XRAM_CMD_LLSC_NLINE: |
---|
1267 | if ( p_vci_ixr.cmdack ) { |
---|
1268 | r_xram_cmd_fsm = XRAM_CMD_LLSC_IDLE; |
---|
1269 | r_llsc_to_xram_cmd_req = false; |
---|
1270 | } |
---|
1271 | break; |
---|
1272 | //////////////////////// |
---|
1273 | case XRAM_CMD_XRAM_DATA: |
---|
1274 | if ( p_vci_ixr.cmdack ) { |
---|
1275 | if ( r_xram_cmd_cpt.read() == (m_words - 1) ) { |
---|
1276 | r_xram_cmd_cpt = 0; |
---|
1277 | r_xram_cmd_fsm = XRAM_CMD_XRAM_IDLE; |
---|
1278 | r_xram_rsp_to_xram_cmd_req = false; |
---|
1279 | } else { |
---|
1280 | r_xram_cmd_cpt = r_xram_cmd_cpt + 1; |
---|
1281 | } |
---|
1282 | } |
---|
1283 | break; |
---|
1284 | |
---|
1285 | } // end switch r_xram_cmd_fsm |
---|
1286 | |
---|
1287 | //////////////////////////////////////////////////////////////////////////// |
---|
1288 | // IXR_RSP FSM |
---|
1289 | //////////////////////////////////////////////////////////////////////////// |
---|
1290 | // The IXR_RSP FSM receives the response packets from the XRAM, |
---|
1291 | // for both write transaction, and read transaction. |
---|
1292 | // |
---|
1293 | // - A response to a write request is a single-cell VCI packet. |
---|
1294 | // The Transaction Tab index is contained in the RTRDID field. |
---|
1295 | // The FSM takes the lock protecting the TRT, and the corresponding |
---|
1296 | // entry is erased. |
---|
1297 | // |
---|
1298 | // - A response to a read request is a multi-cell VCI packet. |
---|
1299 | // The Transaction Tab index is contained in the RTRDID field. |
---|
1300 | // The N cells contain the N words of the cache line in the RDATA field. |
---|
1301 | // The FSM takes the lock protecting the TRT to store the line in the TRT |
---|
1302 | // (taking into account the write requests already stored in the TRT). |
---|
1303 | // When the line is completely written, the corresponding rok signal is set. |
---|
1304 | /////////////////////////////////////////////////////////////////////////////// |
---|
1305 | |
---|
1306 | switch ( r_ixr_rsp_fsm.read() ) { |
---|
1307 | |
---|
1308 | /////////////////// |
---|
1309 | case IXR_RSP_IDLE: // test if it's a read or a write transaction |
---|
1310 | { |
---|
1311 | if ( p_vci_ixr.rspval ) { |
---|
1312 | r_ixr_rsp_cpt = 0; |
---|
1313 | r_ixr_rsp_trt_index = p_vci_ixr.rtrdid.read(); |
---|
1314 | if ( p_vci_ixr.reop ) r_ixr_rsp_fsm = IXR_RSP_ACK; |
---|
1315 | else r_ixr_rsp_fsm = IXR_RSP_TRT_READ; |
---|
1316 | } |
---|
1317 | break; |
---|
1318 | } |
---|
1319 | //////////////////////// |
---|
1320 | case IXR_RSP_ACK: // Acknowledge the vci response |
---|
1321 | r_ixr_rsp_fsm = IXR_RSP_TRT_ERASE; |
---|
1322 | break; |
---|
1323 | //////////////////////// |
---|
1324 | case IXR_RSP_TRT_ERASE: // erase the entry in the TRT |
---|
1325 | { |
---|
1326 | if ( r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_RSP ) { |
---|
1327 | m_transaction_tab.erase(r_ixr_rsp_trt_index.read()); |
---|
1328 | r_ixr_rsp_fsm = IXR_RSP_IDLE; |
---|
1329 | } |
---|
1330 | break; |
---|
1331 | } |
---|
1332 | /////////////////////// |
---|
1333 | case IXR_RSP_TRT_READ: // write data in the TRT |
---|
1334 | { |
---|
1335 | if ( (r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_RSP) && p_vci_ixr.rspval ) { |
---|
1336 | bool eop = p_vci_ixr.reop.read(); |
---|
1337 | data_t data = p_vci_ixr.rdata.read(); |
---|
1338 | size_t index = r_ixr_rsp_trt_index.read(); |
---|
1339 | assert( eop == (r_ixr_rsp_cpt.read() == (m_words-1)) |
---|
1340 | && "Error in VCI_MEM_CACHE : invalid length for a response from XRAM"); |
---|
1341 | m_transaction_tab.write_rsp(index, r_ixr_rsp_cpt.read(), data); |
---|
1342 | r_ixr_rsp_cpt = r_ixr_rsp_cpt.read() + 1; |
---|
1343 | if ( eop ) { |
---|
1344 | r_ixr_rsp_to_xram_rsp_rok[r_ixr_rsp_trt_index.read()]=true; |
---|
1345 | r_ixr_rsp_fsm = IXR_RSP_IDLE; |
---|
1346 | } |
---|
1347 | } |
---|
1348 | break; |
---|
1349 | } |
---|
1350 | } // end swich r_ixr_rsp_fsm |
---|
1351 | |
---|
1352 | |
---|
1353 | //////////////////////////////////////////////////////////////////////////// |
---|
1354 | // XRAM_RSP FSM |
---|
1355 | //////////////////////////////////////////////////////////////////////////// |
---|
1356 | // The XRAM_RSP FSM handles the response packets from the XRAM, |
---|
1357 | // for read transactions. |
---|
1358 | // |
---|
1359 | // When a response is available, the corresponding TRT entry |
---|
1360 | // is copied in a local buffer to be written in the cache. |
---|
1361 | // Then, the FSM releases the lock protecting the TRT, and takes the lock |
---|
1362 | // protecting the cache directory. |
---|
1363 | // It selects a cache slot and writes the line in the cache. |
---|
1364 | // If it was a read MISS, the XRAM_RSP FSM send a request to the TGT_RSP |
---|
1365 | // FSM to return the cache line to the registered processor. |
---|
1366 | // If there is no empty slot, a victim line is evicted, and |
---|
1367 | // invalidate requests are sent to the L1 caches containing copies. |
---|
1368 | // If this line is dirty, the XRAM_RSP FSM send a request to the XRAM_CMD |
---|
1369 | // FSM to save the victim line to the XRAM, and register the write transaction |
---|
1370 | // in the TRT (using the entry previously used by the read transaction). |
---|
1371 | /////////////////////////////////////////////////////////////////////////////// |
---|
1372 | |
---|
1373 | switch ( r_xram_rsp_fsm.read() ) { |
---|
1374 | |
---|
1375 | /////////////////// |
---|
1376 | case XRAM_RSP_IDLE: // test if there is a response with a round robin priority |
---|
1377 | { |
---|
1378 | size_t ptr = r_xram_rsp_trt_index.read(); |
---|
1379 | size_t lines = TRANSACTION_TAB_LINES; |
---|
1380 | for(size_t i=0; i<lines; i++){ |
---|
1381 | size_t index=(i+ptr+1)%lines; |
---|
1382 | if(r_ixr_rsp_to_xram_rsp_rok[index]){ |
---|
1383 | r_xram_rsp_trt_index=index; |
---|
1384 | r_ixr_rsp_to_xram_rsp_rok[index]=false; |
---|
1385 | r_xram_rsp_fsm = XRAM_RSP_DIR_LOCK; |
---|
1386 | break; |
---|
1387 | } |
---|
1388 | } |
---|
1389 | break; |
---|
1390 | } |
---|
1391 | /////////////////////// |
---|
1392 | case XRAM_RSP_DIR_LOCK: // Take the lock on the directory |
---|
1393 | { |
---|
1394 | if( r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP ) { |
---|
1395 | r_xram_rsp_fsm = XRAM_RSP_TRT_COPY; |
---|
1396 | } |
---|
1397 | break; |
---|
1398 | } |
---|
1399 | /////////////////////// |
---|
1400 | case XRAM_RSP_TRT_COPY: // Copy the TRT entry in the local buffer and eviction of a cache line |
---|
1401 | { |
---|
1402 | if ( (r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP) ) { |
---|
1403 | size_t index = r_xram_rsp_trt_index.read(); |
---|
1404 | TransactionTabEntry trt_entry(m_transaction_tab.read(index)); |
---|
1405 | |
---|
1406 | r_xram_rsp_trt_buf.copy(trt_entry); // TRT entry local buffer |
---|
1407 | |
---|
1408 | // selects & extracts a victim line from cache |
---|
1409 | size_t way = 0; |
---|
1410 | size_t set = m_y[trt_entry.nline * m_words * 4]; |
---|
1411 | DirectoryEntry victim(m_cache_directory.select(set, way)); |
---|
1412 | |
---|
1413 | for (size_t i=0 ; i<m_words ; i++) r_xram_rsp_victim_data[i] = m_cache_data[way][set][i]; |
---|
1414 | |
---|
1415 | r_xram_rsp_victim_copies = victim.copies; |
---|
1416 | r_xram_rsp_victim_way = way; |
---|
1417 | r_xram_rsp_victim_set = set; |
---|
1418 | r_xram_rsp_victim_nline = victim.tag*m_sets + set; |
---|
1419 | r_xram_rsp_victim_inval = victim.valid && (victim.copies != 0); |
---|
1420 | r_xram_rsp_victim_dirty = victim.dirty; |
---|
1421 | |
---|
1422 | r_xram_rsp_fsm = XRAM_RSP_DIR_UPDT; |
---|
1423 | } |
---|
1424 | break; |
---|
1425 | } |
---|
1426 | /////////////////////// |
---|
1427 | case XRAM_RSP_DIR_UPDT: // updates the cache (both data & directory) |
---|
1428 | { |
---|
1429 | // update data |
---|
1430 | size_t set = r_xram_rsp_victim_set.read(); |
---|
1431 | size_t way = r_xram_rsp_victim_way.read(); |
---|
1432 | for(size_t i=0; i<m_words ; i++){ |
---|
1433 | m_cache_data[way][set][i] = r_xram_rsp_trt_buf.wdata[i]; |
---|
1434 | } |
---|
1435 | // update directory |
---|
1436 | bool dirty = false; |
---|
1437 | for(size_t i=0; i<m_words;i++){ |
---|
1438 | dirty = dirty || (r_xram_rsp_trt_buf.wdata_be[i] != 0); |
---|
1439 | } |
---|
1440 | DirectoryEntry entry; |
---|
1441 | entry.valid = true; |
---|
1442 | entry.lock = false; |
---|
1443 | entry.dirty = dirty; |
---|
1444 | entry.tag = r_xram_rsp_trt_buf.nline / m_sets; |
---|
1445 | if(r_xram_rsp_trt_buf.proc_read) { |
---|
1446 | entry.copies = 0x1 << r_xram_rsp_trt_buf.srcid; |
---|
1447 | } else { |
---|
1448 | entry.copies = 0; |
---|
1449 | } |
---|
1450 | m_cache_directory.write(set, way, entry); |
---|
1451 | // If the victim is not dirty, we erase the entry in the TRT |
---|
1452 | if (!r_xram_rsp_victim_dirty.read()) m_transaction_tab.erase(r_xram_rsp_trt_index.read()); |
---|
1453 | // Next state |
---|
1454 | if ( r_xram_rsp_victim_dirty.read()) r_xram_rsp_fsm = XRAM_RSP_TRT_DIRTY; |
---|
1455 | else if ( r_xram_rsp_trt_buf.proc_read ) r_xram_rsp_fsm = XRAM_RSP_DIR_RSP; |
---|
1456 | else if ( r_xram_rsp_victim_inval.read()) r_xram_rsp_fsm = XRAM_RSP_UPT_LOCK; |
---|
1457 | else r_xram_rsp_fsm = XRAM_RSP_IDLE; |
---|
1458 | break; |
---|
1459 | } |
---|
1460 | //////////////////////// |
---|
1461 | case XRAM_RSP_TRT_DIRTY: // set the TRT entry (write line to XRAM) if the victim is dirty |
---|
1462 | { |
---|
1463 | if ( r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP ) { |
---|
1464 | m_transaction_tab.set(r_xram_rsp_trt_index.read(), |
---|
1465 | false, // write to XRAM |
---|
1466 | r_xram_rsp_victim_nline.read(), // line index |
---|
1467 | 0, |
---|
1468 | 0, |
---|
1469 | 0, |
---|
1470 | false, |
---|
1471 | false, |
---|
1472 | 0, |
---|
1473 | std::vector<be_t>(m_words,0), |
---|
1474 | std::vector<data_t>(m_words,0) ); |
---|
1475 | if ( r_xram_rsp_trt_buf.proc_read ) r_xram_rsp_fsm = XRAM_RSP_DIR_RSP; |
---|
1476 | else if ( r_xram_rsp_victim_inval.read()) r_xram_rsp_fsm = XRAM_RSP_UPT_LOCK; |
---|
1477 | else r_xram_rsp_fsm = XRAM_RSP_WRITE_DIRTY; |
---|
1478 | } |
---|
1479 | break; |
---|
1480 | } |
---|
1481 | ////////////////////// |
---|
1482 | case XRAM_RSP_DIR_RSP: // send a request to TGT_RSP FSM in case of read |
---|
1483 | { |
---|
1484 | if ( !r_xram_rsp_to_tgt_rsp_req ) { |
---|
1485 | r_xram_rsp_to_tgt_rsp_srcid = r_xram_rsp_trt_buf.srcid; |
---|
1486 | r_xram_rsp_to_tgt_rsp_trdid = r_xram_rsp_trt_buf.trdid; |
---|
1487 | r_xram_rsp_to_tgt_rsp_pktid = r_xram_rsp_trt_buf.pktid; |
---|
1488 | for (size_t i=0; i < m_words; i++) { |
---|
1489 | r_xram_rsp_to_tgt_rsp_data[i] = r_xram_rsp_trt_buf.wdata[i]; |
---|
1490 | if( r_xram_rsp_trt_buf.single_word ) { |
---|
1491 | r_xram_rsp_to_tgt_rsp_val[i] = (r_xram_rsp_trt_buf.word_index == i); |
---|
1492 | } else { |
---|
1493 | r_xram_rsp_to_tgt_rsp_val[i] = true; |
---|
1494 | } |
---|
1495 | } |
---|
1496 | r_xram_rsp_to_tgt_rsp_req = true; |
---|
1497 | if ( r_xram_rsp_victim_inval ) r_xram_rsp_fsm = XRAM_RSP_UPT_LOCK; |
---|
1498 | else if ( r_xram_rsp_victim_dirty ) r_xram_rsp_fsm = XRAM_RSP_WRITE_DIRTY; |
---|
1499 | else r_xram_rsp_fsm = XRAM_RSP_IDLE; |
---|
1500 | } |
---|
1501 | break; |
---|
1502 | } |
---|
1503 | /////////////////////// |
---|
1504 | case XRAM_RSP_UPT_LOCK: // Try to register the inval transaction in UPT |
---|
1505 | { |
---|
1506 | if ( r_alloc_upt_fsm == ALLOC_UPT_XRAM_RSP ) { |
---|
1507 | size_t index; |
---|
1508 | copy_t copies = r_xram_rsp_victim_copies.read(); |
---|
1509 | copy_t mask = 0x1; |
---|
1510 | size_t n=0; |
---|
1511 | for ( size_t i=0 ; i<32 ; i++) { |
---|
1512 | if ( copies & mask ) n++; |
---|
1513 | mask = (mask << 1); |
---|
1514 | } |
---|
1515 | bool wok = m_update_tab.set(false, // it's an inval transaction |
---|
1516 | 0, |
---|
1517 | 0, |
---|
1518 | 0, |
---|
1519 | n, |
---|
1520 | index); |
---|
1521 | if ( wok ) { |
---|
1522 | r_xram_rsp_upt_index = index; |
---|
1523 | r_xram_rsp_fsm = XRAM_RSP_INVAL; |
---|
1524 | } else { |
---|
1525 | r_xram_rsp_fsm = XRAM_RSP_WAIT; |
---|
1526 | } |
---|
1527 | } |
---|
1528 | break; |
---|
1529 | } |
---|
1530 | /////////////////// |
---|
1531 | case XRAM_RSP_WAIT: // releases UPT lock for one cycle |
---|
1532 | { |
---|
1533 | r_xram_rsp_fsm = XRAM_RSP_UPT_LOCK; |
---|
1534 | } |
---|
1535 | //////////////////// |
---|
1536 | case XRAM_RSP_INVAL: // send invalidate request to INIT_CMD FSM |
---|
1537 | { |
---|
1538 | if( !r_xram_rsp_to_init_cmd_req ) { |
---|
1539 | r_xram_rsp_to_init_cmd_req = true; |
---|
1540 | r_xram_rsp_to_init_cmd_nline = r_xram_rsp_victim_nline.read(); |
---|
1541 | r_xram_rsp_to_init_cmd_trdid = r_xram_rsp_upt_index; |
---|
1542 | r_xram_rsp_to_init_cmd_copies = r_xram_rsp_victim_copies; |
---|
1543 | if ( r_xram_rsp_victim_dirty ) r_xram_rsp_fsm = XRAM_RSP_WRITE_DIRTY; |
---|
1544 | else r_xram_rsp_fsm = XRAM_RSP_IDLE; |
---|
1545 | } |
---|
1546 | break; |
---|
1547 | } |
---|
1548 | ////////////////////////// |
---|
1549 | case XRAM_RSP_WRITE_DIRTY: // send a write request to XRAM_CMD FSM |
---|
1550 | { |
---|
1551 | if ( !r_xram_rsp_to_xram_cmd_req ) { |
---|
1552 | r_xram_rsp_to_xram_cmd_req = true; |
---|
1553 | r_xram_rsp_to_xram_cmd_nline = r_xram_rsp_victim_nline.read(); |
---|
1554 | r_xram_rsp_to_xram_cmd_trdid = r_xram_rsp_trt_index.read(); |
---|
1555 | for(size_t i=0; i<m_words ; i++) { |
---|
1556 | r_xram_rsp_to_xram_cmd_data[i] = r_xram_rsp_victim_data[i]; |
---|
1557 | } |
---|
1558 | m_cpt_write_dirty++; |
---|
1559 | r_xram_rsp_fsm = XRAM_RSP_IDLE; |
---|
1560 | } |
---|
1561 | break; |
---|
1562 | } |
---|
1563 | } // end swich r_xram_rsp_fsm |
---|
1564 | |
---|
1565 | //////////////////////////////////////////////////////////////////////////////////// |
---|
1566 | // CLEANUP FSM |
---|
1567 | //////////////////////////////////////////////////////////////////////////////////// |
---|
1568 | // The CLEANUP FSM handles the cleanup request from L1 caches. |
---|
1569 | // It accesses the cache directory to update the list of copies. |
---|
1570 | // |
---|
1571 | // !!!!!!!! The actual cleanup of the cache directory has to be done... |
---|
1572 | // |
---|
1573 | //////////////////////////////////////////////////////////////////////////////////// |
---|
1574 | |
---|
1575 | switch ( r_cleanup_fsm.read() ) { |
---|
1576 | |
---|
1577 | /////////////////// |
---|
1578 | case CLEANUP_IDLE: |
---|
1579 | { |
---|
1580 | if ( m_cmd_cleanup_nline_fifo.rok() ) { |
---|
1581 | m_cpt_cleanup++; |
---|
1582 | cmd_cleanup_fifo_get = true; |
---|
1583 | r_cleanup_nline = m_cmd_cleanup_nline_fifo.read(); |
---|
1584 | r_cleanup_srcid = m_cmd_cleanup_srcid_fifo.read(); |
---|
1585 | r_cleanup_trdid = m_cmd_cleanup_trdid_fifo.read(); |
---|
1586 | r_cleanup_pktid = m_cmd_cleanup_pktid_fifo.read(); |
---|
1587 | |
---|
1588 | r_cleanup_fsm = CLEANUP_DIR_LOCK; |
---|
1589 | } |
---|
1590 | break; |
---|
1591 | } |
---|
1592 | ////////////////////// |
---|
1593 | case CLEANUP_DIR_LOCK: |
---|
1594 | { |
---|
1595 | if ( r_alloc_dir_fsm.read() == ALLOC_DIR_CLEANUP ) { |
---|
1596 | |
---|
1597 | // Read the directory |
---|
1598 | size_t way = 0; |
---|
1599 | DirectoryEntry entry = m_cache_directory.read(r_cleanup_nline.read() << (m_words +2) , way); |
---|
1600 | |
---|
1601 | r_cleanup_dirty = entry.dirty; |
---|
1602 | r_cleanup_tag = entry.tag; |
---|
1603 | r_cleanup_lock = entry.lock; |
---|
1604 | r_cleanup_way = way; |
---|
1605 | r_cleanup_copies = entry.copies & ~(0x1 << r_cleanup_srcid.read()); |
---|
1606 | |
---|
1607 | // In case of hit, the copy must be cleaned in the copies bit-vector |
---|
1608 | if( entry.valid ) { |
---|
1609 | r_cleanup_fsm = CLEANUP_DIR_WRITE; |
---|
1610 | } else { |
---|
1611 | r_cleanup_fsm = CLEANUP_RSP; |
---|
1612 | } |
---|
1613 | } |
---|
1614 | break; |
---|
1615 | } |
---|
1616 | /////////////////////// |
---|
1617 | case CLEANUP_DIR_WRITE: |
---|
1618 | { |
---|
1619 | size_t way = r_cleanup_way.read(); |
---|
1620 | #define L2 soclib::common::uint32_log2 |
---|
1621 | size_t set = m_y[r_cleanup_nline.read() << (L2(m_words) +2)]; |
---|
1622 | #undef L2 |
---|
1623 | |
---|
1624 | // update the cache directory (for the copies) |
---|
1625 | DirectoryEntry entry; |
---|
1626 | entry.valid = true; |
---|
1627 | entry.dirty = r_cleanup_dirty.read(); |
---|
1628 | entry.tag = r_cleanup_tag.read(); |
---|
1629 | entry.lock = r_cleanup_lock.read(); |
---|
1630 | entry.copies = r_cleanup_copies.read(); |
---|
1631 | m_cache_directory.write(set, way, entry); |
---|
1632 | |
---|
1633 | // response to the cache |
---|
1634 | r_cleanup_fsm = CLEANUP_RSP; |
---|
1635 | |
---|
1636 | break; |
---|
1637 | } |
---|
1638 | ///////////////// |
---|
1639 | case CLEANUP_RSP: |
---|
1640 | { |
---|
1641 | if ( !r_cleanup_to_tgt_rsp_req ) { |
---|
1642 | r_cleanup_to_tgt_rsp_req = true; |
---|
1643 | r_cleanup_to_tgt_rsp_srcid = r_cleanup_srcid.read(); |
---|
1644 | r_cleanup_to_tgt_rsp_trdid = r_cleanup_trdid.read(); |
---|
1645 | r_cleanup_to_tgt_rsp_pktid = r_cleanup_pktid.read(); |
---|
1646 | r_cleanup_fsm = CLEANUP_IDLE; |
---|
1647 | } |
---|
1648 | break; |
---|
1649 | } |
---|
1650 | } // end switch cleanup fsm |
---|
1651 | |
---|
1652 | //////////////////////////////////////////////////////////////////////////////////// |
---|
1653 | // LLSC FSM |
---|
1654 | //////////////////////////////////////////////////////////////////////////////////// |
---|
1655 | // The LLSC FSM handles the LL & SC atomic access. |
---|
1656 | // |
---|
1657 | // For a LL : |
---|
1658 | // It access the directory to check hit / miss. |
---|
1659 | // - In case of hit, the LL request is registered in the Atomic Table and the |
---|
1660 | // response is sent to the requesting processor. |
---|
1661 | // - In case of miss, the LLSC FSM accesses the transaction table. |
---|
1662 | // If a read transaction to the XRAM for this line already exists, |
---|
1663 | // or if the transaction table is full, it returns to IDLE state. |
---|
1664 | // Otherwise, a new transaction to the XRAM is initiated. |
---|
1665 | // In both cases, the LL request is not consumed in the FIFO. |
---|
1666 | // |
---|
1667 | // For a SC : |
---|
1668 | // It access the directory to check hit / miss. |
---|
1669 | // - In case of hit, the Atomic Table is checked and the proper response |
---|
1670 | // (true or false is sent to the requesting processor. |
---|
1671 | // - In case of miss, the LLSC FSM accesses the transaction table. |
---|
1672 | // If a read transaction to the XRAM for this line already exists, |
---|
1673 | // or if the transaction table is full, it returns to IDLE state. |
---|
1674 | // Otherwise, a new transaction to the XRAM is initiated. |
---|
1675 | // In both cases, the SC request is not consumed in the FIFO. |
---|
1676 | ///////////////////////////////////////////////////////////////////// |
---|
1677 | |
---|
1678 | switch ( r_llsc_fsm.read() ) { |
---|
1679 | |
---|
1680 | /////////////// |
---|
1681 | case LLSC_IDLE: // test LL / SC |
---|
1682 | { |
---|
1683 | if( m_cmd_llsc_addr_fifo.rok() ) { |
---|
1684 | if(m_cmd_llsc_sc_fifo.read()){ |
---|
1685 | m_cpt_sc++; |
---|
1686 | r_llsc_fsm = SC_DIR_LOCK; |
---|
1687 | } |
---|
1688 | else{ |
---|
1689 | m_cpt_ll++; |
---|
1690 | r_llsc_fsm = LL_DIR_LOCK; |
---|
1691 | } |
---|
1692 | } |
---|
1693 | break; |
---|
1694 | } |
---|
1695 | ///////////////// |
---|
1696 | case LL_DIR_LOCK: // check directory for hit / miss |
---|
1697 | { |
---|
1698 | if( r_alloc_dir_fsm.read() == ALLOC_DIR_LLSC ) { |
---|
1699 | size_t way = 0; |
---|
1700 | DirectoryEntry entry(m_cache_directory.read(m_cmd_llsc_addr_fifo.read(), way)); |
---|
1701 | r_llsc_dirty = entry.dirty; |
---|
1702 | r_llsc_tag = entry.tag; |
---|
1703 | r_llsc_way = way; |
---|
1704 | r_llsc_copies = entry.copies | (0x1 << m_cmd_llsc_srcid_fifo.read()); |
---|
1705 | |
---|
1706 | if ( entry.valid ) r_llsc_fsm = LL_DIR_HIT; |
---|
1707 | else r_llsc_fsm = LLSC_TRT_LOCK; |
---|
1708 | } |
---|
1709 | break; |
---|
1710 | } |
---|
1711 | //////////////// |
---|
1712 | case LL_DIR_HIT: // read hit : update the memory cache |
---|
1713 | { |
---|
1714 | size_t way = r_llsc_way.read(); |
---|
1715 | size_t set = m_y[m_cmd_llsc_addr_fifo.read()]; |
---|
1716 | size_t word = m_x[m_cmd_llsc_addr_fifo.read()]; |
---|
1717 | |
---|
1718 | // update directory (lock bit & copies) |
---|
1719 | DirectoryEntry entry; |
---|
1720 | entry.valid = true; |
---|
1721 | entry.dirty = r_llsc_dirty.read(); |
---|
1722 | entry.lock = true; |
---|
1723 | entry.tag = r_llsc_tag.read(); |
---|
1724 | entry.copies = r_llsc_copies.read(); |
---|
1725 | m_cache_directory.write(set, way, entry); |
---|
1726 | |
---|
1727 | // read data in cache |
---|
1728 | r_llsc_data = m_cache_data[way][set][word]; |
---|
1729 | |
---|
1730 | // set Atomic Table |
---|
1731 | m_atomic_tab.set(m_cmd_llsc_srcid_fifo.read(), m_cmd_llsc_addr_fifo.read()); |
---|
1732 | |
---|
1733 | r_llsc_fsm = LL_RSP; |
---|
1734 | break; |
---|
1735 | } |
---|
1736 | //////////// |
---|
1737 | case LL_RSP: // request the TGT_RSP FSM to return data |
---|
1738 | { |
---|
1739 | if ( !r_llsc_to_tgt_rsp_req ) { |
---|
1740 | cmd_llsc_fifo_get = true; |
---|
1741 | r_llsc_to_tgt_rsp_data = r_llsc_data.read(); |
---|
1742 | r_llsc_to_tgt_rsp_srcid = m_cmd_llsc_srcid_fifo.read(); |
---|
1743 | r_llsc_to_tgt_rsp_trdid = m_cmd_llsc_trdid_fifo.read(); |
---|
1744 | r_llsc_to_tgt_rsp_pktid = m_cmd_llsc_pktid_fifo.read(); |
---|
1745 | r_llsc_to_tgt_rsp_req = true; |
---|
1746 | r_llsc_fsm = LLSC_IDLE; |
---|
1747 | } |
---|
1748 | break; |
---|
1749 | } |
---|
1750 | ///////////////// |
---|
1751 | case SC_DIR_LOCK: |
---|
1752 | { |
---|
1753 | if( r_alloc_dir_fsm.read() == ALLOC_DIR_LLSC ) { |
---|
1754 | size_t way = 0; |
---|
1755 | DirectoryEntry entry(m_cache_directory.read(m_cmd_llsc_addr_fifo.read(), way)); |
---|
1756 | bool ok = m_atomic_tab.isatomic(m_cmd_llsc_srcid_fifo.read(),m_cmd_llsc_addr_fifo.read()); |
---|
1757 | if( ok ) { |
---|
1758 | r_llsc_dirty = entry.dirty; |
---|
1759 | r_llsc_tag = entry.tag; |
---|
1760 | r_llsc_way = way; |
---|
1761 | r_llsc_copies = entry.copies; |
---|
1762 | if ( entry.valid ) r_llsc_fsm = SC_DIR_HIT; |
---|
1763 | else r_llsc_fsm = LLSC_TRT_LOCK; |
---|
1764 | } else { |
---|
1765 | r_llsc_fsm = SC_RSP_FALSE; |
---|
1766 | } |
---|
1767 | } |
---|
1768 | break; |
---|
1769 | } |
---|
1770 | //////////////// |
---|
1771 | case SC_DIR_HIT: |
---|
1772 | { |
---|
1773 | size_t way = r_llsc_way.read(); |
---|
1774 | size_t set = m_y[m_cmd_llsc_addr_fifo.read()]; |
---|
1775 | size_t word = m_x[m_cmd_llsc_addr_fifo.read()]; |
---|
1776 | |
---|
1777 | // update directory (lock & dirty bits |
---|
1778 | DirectoryEntry entry; |
---|
1779 | entry.valid = true; |
---|
1780 | entry.dirty = true; |
---|
1781 | entry.lock = true; |
---|
1782 | entry.tag = r_llsc_tag.read(); |
---|
1783 | entry.copies = r_llsc_copies.read(); |
---|
1784 | m_cache_directory.write(set, way, entry); |
---|
1785 | |
---|
1786 | // write data in cache |
---|
1787 | m_cache_data[way][set][word] = m_cmd_llsc_wdata_fifo.read(); |
---|
1788 | |
---|
1789 | // reset Atomic Table |
---|
1790 | m_atomic_tab.reset(m_cmd_llsc_addr_fifo.read()); |
---|
1791 | |
---|
1792 | r_llsc_fsm = SC_RSP_TRUE; |
---|
1793 | break; |
---|
1794 | } |
---|
1795 | ////////////////// |
---|
1796 | case SC_RSP_FALSE: |
---|
1797 | { |
---|
1798 | if( !r_llsc_to_tgt_rsp_req ) { |
---|
1799 | cmd_llsc_fifo_get = true; |
---|
1800 | r_llsc_to_tgt_rsp_req = true; |
---|
1801 | r_llsc_to_tgt_rsp_data = 1; |
---|
1802 | r_llsc_to_tgt_rsp_srcid = m_cmd_llsc_srcid_fifo.read(); |
---|
1803 | r_llsc_to_tgt_rsp_trdid = m_cmd_llsc_trdid_fifo.read(); |
---|
1804 | r_llsc_to_tgt_rsp_pktid = m_cmd_llsc_pktid_fifo.read(); |
---|
1805 | r_llsc_fsm = LLSC_IDLE; |
---|
1806 | } |
---|
1807 | break; |
---|
1808 | } |
---|
1809 | ///////////////// |
---|
1810 | case SC_RSP_TRUE: |
---|
1811 | { |
---|
1812 | if( !r_llsc_to_tgt_rsp_req ) { |
---|
1813 | cmd_llsc_fifo_get = true; |
---|
1814 | r_llsc_to_tgt_rsp_req = true; |
---|
1815 | r_llsc_to_tgt_rsp_data = 0; |
---|
1816 | r_llsc_to_tgt_rsp_srcid = m_cmd_llsc_srcid_fifo.read(); |
---|
1817 | r_llsc_to_tgt_rsp_trdid = m_cmd_llsc_trdid_fifo.read(); |
---|
1818 | r_llsc_to_tgt_rsp_pktid = m_cmd_llsc_pktid_fifo.read(); |
---|
1819 | r_llsc_fsm = LLSC_IDLE; |
---|
1820 | } |
---|
1821 | break; |
---|
1822 | } |
---|
1823 | /////////////////// |
---|
1824 | case LLSC_TRT_LOCK: // read or write miss : check the Transaction Table |
---|
1825 | { |
---|
1826 | if( r_alloc_trt_fsm.read() == ALLOC_TRT_LLSC ) { |
---|
1827 | size_t index = 0; |
---|
1828 | bool hit = m_transaction_tab.hit_read(m_nline[m_cmd_llsc_addr_fifo.read()],index); |
---|
1829 | bool wok = !m_transaction_tab.full(index); |
---|
1830 | |
---|
1831 | if ( hit || !wok ) { // missing line already requested or no space in TRT |
---|
1832 | r_llsc_fsm = LLSC_IDLE; |
---|
1833 | } else { |
---|
1834 | r_llsc_trt_index = index; |
---|
1835 | r_llsc_fsm = LLSC_TRT_SET; |
---|
1836 | } |
---|
1837 | } |
---|
1838 | break; |
---|
1839 | } |
---|
1840 | ////////////////// |
---|
1841 | case LLSC_TRT_SET: // register the XRAM transaction in Transaction Table |
---|
1842 | { |
---|
1843 | if( r_alloc_trt_fsm.read() == ALLOC_TRT_LLSC ) { |
---|
1844 | m_transaction_tab.set(r_llsc_trt_index.read(), |
---|
1845 | true, |
---|
1846 | m_nline[m_cmd_llsc_addr_fifo.read()], |
---|
1847 | m_cmd_llsc_srcid_fifo.read(), |
---|
1848 | m_cmd_llsc_trdid_fifo.read(), |
---|
1849 | m_cmd_llsc_pktid_fifo.read(), |
---|
1850 | false, |
---|
1851 | false, |
---|
1852 | 0, |
---|
1853 | std::vector<be_t>(m_words,0), |
---|
1854 | std::vector<data_t>(m_words,0)); |
---|
1855 | r_llsc_fsm = LLSC_XRAM_REQ; |
---|
1856 | } |
---|
1857 | break; |
---|
1858 | } |
---|
1859 | /////////////////// |
---|
1860 | case LLSC_XRAM_REQ: // request the XRAM_CMD FSM to tetch the missing line |
---|
1861 | { |
---|
1862 | if ( !r_llsc_to_xram_cmd_req ) { |
---|
1863 | r_llsc_to_xram_cmd_req = true; |
---|
1864 | r_llsc_to_xram_cmd_trdid = r_llsc_trt_index.read(); |
---|
1865 | r_llsc_to_xram_cmd_nline = m_nline[m_cmd_llsc_addr_fifo.read()]; |
---|
1866 | if( m_cmd_llsc_sc_fifo.read() ) { |
---|
1867 | r_llsc_fsm = SC_RSP_FALSE; |
---|
1868 | } else { |
---|
1869 | cmd_llsc_fifo_get = true; |
---|
1870 | r_llsc_fsm = LLSC_IDLE; |
---|
1871 | } |
---|
1872 | } |
---|
1873 | break; |
---|
1874 | } |
---|
1875 | } // end switch r_llsc_fsm |
---|
1876 | |
---|
1877 | |
---|
1878 | ////////////////////////////////////////////////////////////////////////////// |
---|
1879 | // INIT_CMD FSM |
---|
1880 | ////////////////////////////////////////////////////////////////////////////// |
---|
1881 | // The INIT_CMD fsm controls the VCI CMD initiator port, used to update |
---|
1882 | // or invalidate cache lines in L1 caches. |
---|
1883 | // It implements a round-robin priority between the two following requests: |
---|
1884 | // - r_write_to_init_cmd_req : update request from WRITE FSM |
---|
1885 | // - r_xram_rsp_to_init_cmd_req : invalidate request from XRAM_RSP FSM |
---|
1886 | // The inval request is a single cell VCI write command containing the |
---|
1887 | // index of the line to be invalidated. |
---|
1888 | // The update request is a multi-cells VCI write command : The first cell |
---|
1889 | // contains the index of the cache line to be updated. The second cell contains |
---|
1890 | // the index of the first modified word in the line. The following cells |
---|
1891 | // contain the data. |
---|
1892 | /////////////////////////////////////////////////////////////////////////////// |
---|
1893 | |
---|
1894 | switch ( r_init_cmd_fsm.read() ) { |
---|
1895 | |
---|
1896 | //////////////////////// |
---|
1897 | case INIT_CMD_UPDT_IDLE: // Invalidate requests have highest priority |
---|
1898 | { |
---|
1899 | if ( r_xram_rsp_to_init_cmd_req ) { |
---|
1900 | r_init_cmd_fsm = INIT_CMD_INVAL_SEL; |
---|
1901 | m_cpt_inval++; |
---|
1902 | } else if ( r_write_to_init_cmd_req ) { |
---|
1903 | r_init_cmd_fsm = INIT_CMD_UPDT_SEL; |
---|
1904 | m_cpt_update++; |
---|
1905 | } |
---|
1906 | break; |
---|
1907 | } |
---|
1908 | ///////////////////////// |
---|
1909 | case INIT_CMD_INVAL_IDLE: // Update requests have highest priority |
---|
1910 | { |
---|
1911 | if ( r_write_to_init_cmd_req ) { |
---|
1912 | r_init_cmd_fsm = INIT_CMD_UPDT_SEL; |
---|
1913 | m_cpt_update++; |
---|
1914 | } else if ( r_xram_rsp_to_init_cmd_req ) { |
---|
1915 | r_init_cmd_fsm = INIT_CMD_INVAL_SEL; |
---|
1916 | m_cpt_inval++; |
---|
1917 | } |
---|
1918 | break; |
---|
1919 | } |
---|
1920 | //////////////////////// |
---|
1921 | case INIT_CMD_INVAL_SEL: // selects the next L1 cache |
---|
1922 | { |
---|
1923 | |
---|
1924 | if (r_xram_rsp_to_init_cmd_copies.read() == 0) { // no more copies |
---|
1925 | r_xram_rsp_to_init_cmd_req = false; |
---|
1926 | r_init_cmd_fsm = INIT_CMD_INVAL_IDLE; |
---|
1927 | } else { // select the first target |
---|
1928 | copy_t copies = r_xram_rsp_to_init_cmd_copies.read(); |
---|
1929 | copy_t mask = 0x1; |
---|
1930 | for ( size_t i=0 ; i<8*sizeof(copy_t) ; i++ ) { |
---|
1931 | if ( copies & mask ) { |
---|
1932 | r_init_cmd_target = i; |
---|
1933 | break; |
---|
1934 | } |
---|
1935 | mask = mask << 1; |
---|
1936 | } // end for |
---|
1937 | m_cpt_inval_mult++; |
---|
1938 | r_init_cmd_fsm = INIT_CMD_INVAL_NLINE; |
---|
1939 | r_xram_rsp_to_init_cmd_copies = copies & ~mask; |
---|
1940 | } |
---|
1941 | break; |
---|
1942 | } |
---|
1943 | //////////////////////// |
---|
1944 | case INIT_CMD_INVAL_NLINE: // send the cache line index |
---|
1945 | { |
---|
1946 | if ( p_vci_ini.cmdack ) r_init_cmd_fsm = INIT_CMD_INVAL_SEL; |
---|
1947 | break; |
---|
1948 | } |
---|
1949 | /////////////////////// |
---|
1950 | case INIT_CMD_UPDT_SEL: // selects the next L1 cache |
---|
1951 | { |
---|
1952 | if (r_write_to_init_cmd_copies.read() == 0) { // no more copies |
---|
1953 | r_write_to_init_cmd_req = false; |
---|
1954 | r_init_cmd_fsm = INIT_CMD_UPDT_IDLE; |
---|
1955 | } else { // select the first target |
---|
1956 | copy_t copies = r_write_to_init_cmd_copies.read(); |
---|
1957 | copy_t mask = 0x1; |
---|
1958 | for ( size_t i=0 ; i<8*sizeof(copy_t) ; i++ ) { |
---|
1959 | if ( copies & mask ) { |
---|
1960 | r_init_cmd_target = i; |
---|
1961 | break; |
---|
1962 | } |
---|
1963 | mask = mask << 1; |
---|
1964 | } // end for |
---|
1965 | r_init_cmd_fsm = INIT_CMD_UPDT_NLINE; |
---|
1966 | r_write_to_init_cmd_copies = copies & ~mask; |
---|
1967 | r_init_cmd_cpt = 0; |
---|
1968 | m_cpt_update_mult++; |
---|
1969 | } |
---|
1970 | break; |
---|
1971 | } |
---|
1972 | ///////////////////////// |
---|
1973 | case INIT_CMD_UPDT_NLINE: // send the cache line index |
---|
1974 | { |
---|
1975 | if ( p_vci_ini.cmdack ) r_init_cmd_fsm = INIT_CMD_UPDT_INDEX; |
---|
1976 | break; |
---|
1977 | } |
---|
1978 | ///////////////////////// |
---|
1979 | case INIT_CMD_UPDT_INDEX: // send the first word index |
---|
1980 | { |
---|
1981 | if ( p_vci_ini.cmdack ) r_init_cmd_fsm = INIT_CMD_UPDT_DATA; |
---|
1982 | break; |
---|
1983 | } |
---|
1984 | //////////////////////// |
---|
1985 | case INIT_CMD_UPDT_DATA: // send the data |
---|
1986 | { |
---|
1987 | if ( p_vci_ini.cmdack ) { |
---|
1988 | if ( r_init_cmd_cpt.read() == (r_write_to_init_cmd_count.read()-1) ) { |
---|
1989 | r_init_cmd_fsm = INIT_CMD_UPDT_SEL; |
---|
1990 | } else { |
---|
1991 | r_init_cmd_cpt = r_init_cmd_cpt.read() + 1; |
---|
1992 | } |
---|
1993 | } |
---|
1994 | break; |
---|
1995 | } |
---|
1996 | } // end switch r_init_cmd_fsm |
---|
1997 | |
---|
1998 | ///////////////////////////////////////////////////////////////////// |
---|
1999 | // TGT_RSP FSM |
---|
2000 | ///////////////////////////////////////////////////////////////////// |
---|
2001 | // The TGT_RSP fsm sends the responses on the VCI target port |
---|
2002 | // with a round robin priority between six requests : |
---|
2003 | // - r_read_to_tgt_rsp_req |
---|
2004 | // - r_write_to_tgt_rsp_req |
---|
2005 | // - r_llsc_to_tgt_rsp_req |
---|
2006 | // - r_cleanup_to_tgt_rsp_req |
---|
2007 | // - r_init_rsp_to_tgt_rsp_req |
---|
2008 | // - r_xram_rsp_to_tgt_rsp_req |
---|
2009 | // The ordering is : read > write > llsc > cleanup > xram > init |
---|
2010 | ///////////////////////////////////////////////////////////////////// |
---|
2011 | |
---|
2012 | switch ( r_tgt_rsp_fsm.read() ) { |
---|
2013 | |
---|
2014 | /////////////////////// |
---|
2015 | case TGT_RSP_READ_IDLE: // write requests have the highest priority |
---|
2016 | { |
---|
2017 | if ( r_write_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_WRITE; |
---|
2018 | else if ( r_llsc_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_LLSC; |
---|
2019 | else if ( r_cleanup_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_CLEANUP; |
---|
2020 | else if ( r_xram_rsp_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_XRAM_TEST; |
---|
2021 | else if ( r_init_rsp_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_INIT; |
---|
2022 | else if ( r_read_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_READ_TEST; |
---|
2023 | break; |
---|
2024 | } |
---|
2025 | //////////////////////// |
---|
2026 | case TGT_RSP_WRITE_IDLE: // llsc requests have the highest priority |
---|
2027 | { |
---|
2028 | if ( r_llsc_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_LLSC; |
---|
2029 | else if ( r_cleanup_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_CLEANUP; |
---|
2030 | else if ( r_xram_rsp_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_XRAM_TEST; |
---|
2031 | else if ( r_init_rsp_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_INIT; |
---|
2032 | else if ( r_read_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_READ_TEST; |
---|
2033 | else if ( r_write_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_WRITE; |
---|
2034 | break; |
---|
2035 | } |
---|
2036 | /////////////////////// |
---|
2037 | case TGT_RSP_LLSC_IDLE: // cleanup requests have the highest priority |
---|
2038 | { |
---|
2039 | if ( r_cleanup_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_CLEANUP; |
---|
2040 | else if ( r_xram_rsp_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_XRAM_TEST; |
---|
2041 | else if ( r_init_rsp_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_INIT; |
---|
2042 | else if ( r_read_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_READ_TEST; |
---|
2043 | else if ( r_write_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_WRITE; |
---|
2044 | else if ( r_llsc_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_LLSC; |
---|
2045 | break; |
---|
2046 | } |
---|
2047 | ////////////////////////// |
---|
2048 | case TGT_RSP_CLEANUP_IDLE: // xram requests have the highest priority |
---|
2049 | { |
---|
2050 | if ( r_xram_rsp_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_XRAM_TEST; |
---|
2051 | else if ( r_init_rsp_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_INIT; |
---|
2052 | else if ( r_read_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_READ_TEST; |
---|
2053 | else if ( r_write_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_WRITE; |
---|
2054 | else if ( r_llsc_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_LLSC; |
---|
2055 | else if ( r_cleanup_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_CLEANUP; |
---|
2056 | break; |
---|
2057 | } |
---|
2058 | /////////////////////// |
---|
2059 | case TGT_RSP_XRAM_IDLE: // init requests have the highest priority |
---|
2060 | { |
---|
2061 | if ( r_init_rsp_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_INIT; |
---|
2062 | else if ( r_read_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_READ_TEST; |
---|
2063 | else if ( r_write_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_WRITE; |
---|
2064 | else if ( r_llsc_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_LLSC; |
---|
2065 | else if ( r_cleanup_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_CLEANUP; |
---|
2066 | else if ( r_xram_rsp_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_XRAM_TEST; |
---|
2067 | break; |
---|
2068 | } |
---|
2069 | /////////////////////// |
---|
2070 | case TGT_RSP_INIT_IDLE: // read requests have the highest priority |
---|
2071 | { |
---|
2072 | if ( r_read_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_READ_TEST; |
---|
2073 | else if ( r_write_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_WRITE; |
---|
2074 | else if ( r_llsc_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_LLSC; |
---|
2075 | else if ( r_cleanup_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_CLEANUP; |
---|
2076 | else if ( r_xram_rsp_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_XRAM_TEST; |
---|
2077 | else if ( r_init_rsp_to_tgt_rsp_req ) r_tgt_rsp_fsm = TGT_RSP_INIT; |
---|
2078 | break; |
---|
2079 | } |
---|
2080 | /////////////////////// |
---|
2081 | case TGT_RSP_READ_TEST: // test if word or cache line |
---|
2082 | { |
---|
2083 | bool line = true; |
---|
2084 | size_t index; |
---|
2085 | for ( size_t i=0; i< m_words ; i++ ) { |
---|
2086 | line = line && r_read_to_tgt_rsp_val[i]; |
---|
2087 | if ( r_read_to_tgt_rsp_val[i] ) index = i; |
---|
2088 | } |
---|
2089 | if ( line ) { |
---|
2090 | r_tgt_rsp_cpt = 0; |
---|
2091 | r_tgt_rsp_fsm = TGT_RSP_READ_LINE; |
---|
2092 | } else { |
---|
2093 | r_tgt_rsp_cpt = index; |
---|
2094 | r_tgt_rsp_fsm = TGT_RSP_READ_WORD; |
---|
2095 | } |
---|
2096 | break; |
---|
2097 | } |
---|
2098 | /////////////////////// |
---|
2099 | case TGT_RSP_READ_WORD: // send one word response |
---|
2100 | { |
---|
2101 | if ( p_vci_tgt.rspack ) { |
---|
2102 | r_tgt_rsp_fsm = TGT_RSP_READ_IDLE; |
---|
2103 | r_read_to_tgt_rsp_req = false; |
---|
2104 | } |
---|
2105 | break; |
---|
2106 | } |
---|
2107 | /////////////////////// |
---|
2108 | case TGT_RSP_READ_LINE: // send one complete cache line |
---|
2109 | { |
---|
2110 | if ( p_vci_tgt.rspack ) { |
---|
2111 | if ( r_tgt_rsp_cpt.read() == (m_words-1) ) { |
---|
2112 | r_tgt_rsp_fsm = TGT_RSP_READ_IDLE; |
---|
2113 | r_read_to_tgt_rsp_req = false; |
---|
2114 | } else { |
---|
2115 | r_tgt_rsp_cpt = r_tgt_rsp_cpt.read() + 1; |
---|
2116 | } |
---|
2117 | } |
---|
2118 | break; |
---|
2119 | } |
---|
2120 | /////////////////// |
---|
2121 | case TGT_RSP_WRITE: // send the write acknowledge |
---|
2122 | { |
---|
2123 | if ( p_vci_tgt.rspack ) { |
---|
2124 | r_tgt_rsp_fsm = TGT_RSP_WRITE_IDLE; |
---|
2125 | r_write_to_tgt_rsp_req = false; |
---|
2126 | } |
---|
2127 | break; |
---|
2128 | } |
---|
2129 | ////////////////// |
---|
2130 | case TGT_RSP_LLSC: // send one atomic word response |
---|
2131 | { |
---|
2132 | if ( p_vci_tgt.rspack ) { |
---|
2133 | r_tgt_rsp_fsm = TGT_RSP_LLSC_IDLE; |
---|
2134 | r_llsc_to_tgt_rsp_req = false; |
---|
2135 | } |
---|
2136 | break; |
---|
2137 | } |
---|
2138 | ///////////////////// |
---|
2139 | case TGT_RSP_CLEANUP: // send the cleanup acknowledge |
---|
2140 | { |
---|
2141 | if ( p_vci_tgt.rspack ) { |
---|
2142 | r_tgt_rsp_fsm = TGT_RSP_CLEANUP_IDLE; |
---|
2143 | r_cleanup_to_tgt_rsp_req = false; |
---|
2144 | } |
---|
2145 | break; |
---|
2146 | } |
---|
2147 | /////////////////////// |
---|
2148 | case TGT_RSP_XRAM_TEST: // test if word or cache line |
---|
2149 | { |
---|
2150 | bool line = true; |
---|
2151 | size_t index; |
---|
2152 | for ( size_t i=0; i< m_words ; i++ ) { |
---|
2153 | line = line && r_xram_rsp_to_tgt_rsp_val[i]; |
---|
2154 | if ( r_xram_rsp_to_tgt_rsp_val[i] ) index = i; |
---|
2155 | } |
---|
2156 | if ( line ) { |
---|
2157 | r_tgt_rsp_cpt = 0; |
---|
2158 | r_tgt_rsp_fsm = TGT_RSP_XRAM_LINE; |
---|
2159 | } else { |
---|
2160 | r_tgt_rsp_cpt = index; |
---|
2161 | r_tgt_rsp_fsm = TGT_RSP_XRAM_WORD; |
---|
2162 | } |
---|
2163 | break; |
---|
2164 | } |
---|
2165 | /////////////////////// |
---|
2166 | case TGT_RSP_XRAM_WORD: // send one word response |
---|
2167 | { |
---|
2168 | if ( p_vci_tgt.rspack ) { |
---|
2169 | r_tgt_rsp_fsm = TGT_RSP_XRAM_IDLE; |
---|
2170 | r_xram_rsp_to_tgt_rsp_req = false; |
---|
2171 | } |
---|
2172 | break; |
---|
2173 | } |
---|
2174 | /////////////////////// |
---|
2175 | case TGT_RSP_XRAM_LINE: // send one complete cache line |
---|
2176 | { |
---|
2177 | if ( p_vci_tgt.rspack ) { |
---|
2178 | if ( r_tgt_rsp_cpt.read() == (m_words-1) ) { |
---|
2179 | r_tgt_rsp_fsm = TGT_RSP_XRAM_IDLE; |
---|
2180 | r_xram_rsp_to_tgt_rsp_req = false; |
---|
2181 | } else { |
---|
2182 | r_tgt_rsp_cpt = r_tgt_rsp_cpt.read() + 1; |
---|
2183 | } |
---|
2184 | } |
---|
2185 | break; |
---|
2186 | } |
---|
2187 | /////////////////// |
---|
2188 | case TGT_RSP_INIT: // send the pending write acknowledge |
---|
2189 | { |
---|
2190 | if ( p_vci_tgt.rspack ) { |
---|
2191 | r_tgt_rsp_fsm = TGT_RSP_INIT_IDLE; |
---|
2192 | r_init_rsp_to_tgt_rsp_req = false; |
---|
2193 | } |
---|
2194 | break; |
---|
2195 | } |
---|
2196 | } // end switch tgt_rsp_fsm |
---|
2197 | |
---|
2198 | //////////////////////////////////////////////////////////////////////////////////// |
---|
2199 | // ALLOC_UPT FSM |
---|
2200 | //////////////////////////////////////////////////////////////////////////////////// |
---|
2201 | // The ALLOC_UPT FSM allocates the access to the Update/Inval Table (UPT). |
---|
2202 | // with a round robin priority between three FSMs : INIT_RSP > WRITE > XRAM_RSP |
---|
2203 | // - The WRITE FSM initiates update transactions and sets new entry in UPT. |
---|
2204 | // - The XRAM_RSP FSM initiates inval transactions and sets new entry in UPT. |
---|
2205 | // - The INIT_RSP FSM complete those trasactions and erase the UPT entry. |
---|
2206 | // The ressource is always allocated. |
---|
2207 | ///////////////////////////////////////////////////////////////////////////////////// |
---|
2208 | |
---|
2209 | switch ( r_alloc_upt_fsm.read() ) { |
---|
2210 | |
---|
2211 | //////////////////////// |
---|
2212 | case ALLOC_UPT_INIT_RSP: |
---|
2213 | if ( (r_init_rsp_fsm.read() != INIT_RSP_UPT_LOCK) && |
---|
2214 | (r_init_rsp_fsm.read() != INIT_RSP_UPT_CLEAR) ) |
---|
2215 | { |
---|
2216 | if (r_write_fsm.read()==WRITE_UPT_LOCK) r_alloc_upt_fsm = ALLOC_UPT_WRITE; |
---|
2217 | else if (r_xram_rsp_fsm.read()==XRAM_RSP_UPT_LOCK) r_alloc_upt_fsm = ALLOC_UPT_XRAM_RSP; |
---|
2218 | } |
---|
2219 | break; |
---|
2220 | |
---|
2221 | ///////////////////// |
---|
2222 | case ALLOC_UPT_WRITE: |
---|
2223 | if ( r_write_fsm.read() != WRITE_UPT_LOCK ) |
---|
2224 | { |
---|
2225 | if (r_xram_rsp_fsm.read()==XRAM_RSP_UPT_LOCK) r_alloc_upt_fsm = ALLOC_UPT_XRAM_RSP; |
---|
2226 | else if (r_init_rsp_fsm.read()==INIT_RSP_UPT_LOCK) r_alloc_upt_fsm = ALLOC_UPT_INIT_RSP; |
---|
2227 | } |
---|
2228 | break; |
---|
2229 | |
---|
2230 | //////////////////////// |
---|
2231 | case ALLOC_UPT_XRAM_RSP: |
---|
2232 | if ( r_xram_rsp_fsm.read() != XRAM_RSP_UPT_LOCK ) |
---|
2233 | { |
---|
2234 | if (r_init_rsp_fsm.read()==INIT_RSP_UPT_LOCK) r_alloc_upt_fsm = ALLOC_UPT_INIT_RSP; |
---|
2235 | else if (r_write_fsm.read()==WRITE_UPT_LOCK) r_alloc_upt_fsm = ALLOC_UPT_WRITE; |
---|
2236 | } |
---|
2237 | break; |
---|
2238 | |
---|
2239 | } // end switch r_alloc_upt_fsm |
---|
2240 | |
---|
2241 | //////////////////////////////////////////////////////////////////////////////////// |
---|
2242 | // ALLOC_DIR FSM |
---|
2243 | //////////////////////////////////////////////////////////////////////////////////// |
---|
2244 | // The ALLOC_DIR FSM allocates the access to the directory and |
---|
2245 | // the data cache with a round robin priority between 5 user FSMs : |
---|
2246 | // The cyclic ordering is READ > WRITE > LLSC > CLEANUP > XRAM_RSP |
---|
2247 | // The ressource is always allocated. |
---|
2248 | ///////////////////////////////////////////////////////////////////////////////////// |
---|
2249 | |
---|
2250 | switch ( r_alloc_dir_fsm.read() ) { |
---|
2251 | |
---|
2252 | //////////////////// |
---|
2253 | case ALLOC_DIR_READ: |
---|
2254 | if ( ( (r_read_fsm.read() != READ_DIR_LOCK) && |
---|
2255 | (r_read_fsm.read() != READ_TRT_LOCK) ) |
---|
2256 | || |
---|
2257 | ( (r_read_fsm.read() == READ_TRT_LOCK) && |
---|
2258 | (r_alloc_trt_fsm.read() == ALLOC_TRT_READ) ) ) |
---|
2259 | { |
---|
2260 | if (r_write_fsm.read()==WRITE_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_WRITE; |
---|
2261 | else if ((r_llsc_fsm.read()==LL_DIR_LOCK) || |
---|
2262 | (r_llsc_fsm.read()==SC_DIR_LOCK)) r_alloc_dir_fsm = ALLOC_DIR_LLSC; |
---|
2263 | else if (r_cleanup_fsm.read()==CLEANUP_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_CLEANUP; |
---|
2264 | else if (r_xram_rsp_fsm.read()==XRAM_RSP_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_XRAM_RSP; |
---|
2265 | } |
---|
2266 | break; |
---|
2267 | |
---|
2268 | ///////////////////// |
---|
2269 | case ALLOC_DIR_WRITE: |
---|
2270 | if ( ( (r_write_fsm.read() != WRITE_DIR_LOCK) && |
---|
2271 | (r_write_fsm.read() != WRITE_TRT_LOCK) && |
---|
2272 | (r_write_fsm.read() != WRITE_DIR_HIT_READ) ) |
---|
2273 | || |
---|
2274 | ( (r_write_fsm.read() == WRITE_TRT_LOCK) && |
---|
2275 | (r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE) ) ) |
---|
2276 | { |
---|
2277 | if ((r_llsc_fsm.read()==LL_DIR_LOCK) || |
---|
2278 | (r_llsc_fsm.read()==SC_DIR_LOCK)) r_alloc_dir_fsm = ALLOC_DIR_LLSC; |
---|
2279 | else if (r_cleanup_fsm.read()==CLEANUP_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_CLEANUP; |
---|
2280 | else if (r_xram_rsp_fsm.read()==XRAM_RSP_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_XRAM_RSP; |
---|
2281 | else if (r_read_fsm.read()==READ_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_READ; |
---|
2282 | } |
---|
2283 | break; |
---|
2284 | |
---|
2285 | //////////////////// |
---|
2286 | case ALLOC_DIR_LLSC: |
---|
2287 | if ( ( (r_llsc_fsm.read() != LL_DIR_LOCK) && |
---|
2288 | (r_llsc_fsm.read() != LL_DIR_HIT ) && |
---|
2289 | (r_llsc_fsm.read() != SC_DIR_LOCK) && |
---|
2290 | (r_llsc_fsm.read() != SC_DIR_HIT ) && |
---|
2291 | (r_llsc_fsm.read() != LLSC_TRT_LOCK ) ) |
---|
2292 | || |
---|
2293 | ( (r_llsc_fsm.read() == LLSC_TRT_LOCK ) && |
---|
2294 | (r_alloc_trt_fsm.read() == ALLOC_TRT_LLSC) ) ) |
---|
2295 | { |
---|
2296 | if (r_cleanup_fsm.read()==CLEANUP_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_CLEANUP; |
---|
2297 | else if (r_xram_rsp_fsm.read()==XRAM_RSP_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_XRAM_RSP; |
---|
2298 | else if (r_read_fsm.read()==READ_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_READ; |
---|
2299 | else if (r_write_fsm.read()==WRITE_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_WRITE; |
---|
2300 | } |
---|
2301 | break; |
---|
2302 | |
---|
2303 | /////////////////////// |
---|
2304 | case ALLOC_DIR_CLEANUP: |
---|
2305 | if ( (r_cleanup_fsm.read() != CLEANUP_DIR_LOCK) ) |
---|
2306 | { |
---|
2307 | if (r_xram_rsp_fsm.read()==XRAM_RSP_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_XRAM_RSP; |
---|
2308 | else if (r_read_fsm.read()==READ_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_READ; |
---|
2309 | else if (r_write_fsm.read()==WRITE_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_WRITE; |
---|
2310 | else if ((r_llsc_fsm.read()==LL_DIR_LOCK) || |
---|
2311 | (r_llsc_fsm.read()==SC_DIR_LOCK)) r_alloc_dir_fsm = ALLOC_DIR_LLSC; |
---|
2312 | } |
---|
2313 | break; |
---|
2314 | //////////////////////// |
---|
2315 | case ALLOC_DIR_XRAM_RSP: |
---|
2316 | if ( (r_xram_rsp_fsm.read() != XRAM_RSP_DIR_LOCK) && |
---|
2317 | (r_xram_rsp_fsm.read() != XRAM_RSP_TRT_COPY) ) |
---|
2318 | { |
---|
2319 | if (r_read_fsm.read()==READ_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_READ; |
---|
2320 | else if (r_write_fsm.read()==WRITE_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_WRITE; |
---|
2321 | else if ((r_llsc_fsm.read()==LL_DIR_LOCK) || |
---|
2322 | (r_llsc_fsm.read()==SC_DIR_LOCK)) r_alloc_dir_fsm = ALLOC_DIR_LLSC; |
---|
2323 | else if (r_cleanup_fsm.read()==CLEANUP_DIR_LOCK) r_alloc_dir_fsm = ALLOC_DIR_CLEANUP; |
---|
2324 | } |
---|
2325 | break; |
---|
2326 | |
---|
2327 | } // end switch alloc_dir_fsm |
---|
2328 | |
---|
2329 | //////////////////////////////////////////////////////////////////////////////////// |
---|
2330 | // ALLOC_TRT FSM |
---|
2331 | //////////////////////////////////////////////////////////////////////////////////// |
---|
2332 | // The ALLOC_TRT fsm allocates the access to the Transaction Table (write buffer) |
---|
2333 | // with a round robin priority between 4 user FSMs : |
---|
2334 | // The cyclic priority is READ > WRITE > LLSC > XRAM_RSP |
---|
2335 | // The ressource is always allocated. |
---|
2336 | /////////////////////////////////////////////////////////////////////////////////// |
---|
2337 | |
---|
2338 | switch (r_alloc_trt_fsm) { |
---|
2339 | |
---|
2340 | //////////////////// |
---|
2341 | case ALLOC_TRT_READ: |
---|
2342 | if ( r_read_fsm.read() != READ_TRT_LOCK ) |
---|
2343 | { |
---|
2344 | if (r_write_fsm.read()==WRITE_TRT_LOCK) r_alloc_trt_fsm = ALLOC_TRT_WRITE; |
---|
2345 | else if (r_llsc_fsm.read()==LLSC_TRT_LOCK) r_alloc_trt_fsm = ALLOC_TRT_LLSC; |
---|
2346 | else if (r_xram_rsp_fsm.read()==XRAM_RSP_TRT_COPY) r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; |
---|
2347 | else if ((r_ixr_rsp_fsm.read()==IXR_RSP_TRT_ERASE) || |
---|
2348 | (r_ixr_rsp_fsm.read()==IXR_RSP_TRT_READ)) r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; |
---|
2349 | } |
---|
2350 | break; |
---|
2351 | ///////////////////// |
---|
2352 | case ALLOC_TRT_WRITE: |
---|
2353 | if ( r_write_fsm.read() != WRITE_TRT_LOCK ) |
---|
2354 | { |
---|
2355 | if (r_llsc_fsm.read()==LLSC_TRT_LOCK) r_alloc_trt_fsm = ALLOC_TRT_LLSC; |
---|
2356 | else if (r_xram_rsp_fsm.read()==XRAM_RSP_TRT_COPY) r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; |
---|
2357 | else if ((r_ixr_rsp_fsm.read()==IXR_RSP_TRT_ERASE) || |
---|
2358 | (r_ixr_rsp_fsm.read()==IXR_RSP_TRT_READ)) r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; |
---|
2359 | else if (r_read_fsm.read()==READ_TRT_LOCK) r_alloc_trt_fsm = ALLOC_TRT_READ; |
---|
2360 | } |
---|
2361 | break; |
---|
2362 | //////////////////// |
---|
2363 | case ALLOC_TRT_LLSC: |
---|
2364 | if ( r_llsc_fsm.read() != LLSC_TRT_LOCK ) |
---|
2365 | { |
---|
2366 | if (r_xram_rsp_fsm.read()==XRAM_RSP_TRT_COPY) r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; |
---|
2367 | else if ((r_ixr_rsp_fsm.read()==IXR_RSP_TRT_ERASE) || |
---|
2368 | (r_ixr_rsp_fsm.read()==IXR_RSP_TRT_READ)) r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; |
---|
2369 | else if (r_read_fsm.read()==READ_TRT_LOCK) r_alloc_trt_fsm = ALLOC_TRT_READ; |
---|
2370 | else if (r_write_fsm.read()==WRITE_TRT_LOCK) r_alloc_trt_fsm = ALLOC_TRT_WRITE; |
---|
2371 | } |
---|
2372 | break; |
---|
2373 | //////////////////////// |
---|
2374 | case ALLOC_TRT_XRAM_RSP: |
---|
2375 | if ( (r_xram_rsp_fsm.read() != XRAM_RSP_TRT_COPY) && |
---|
2376 | (r_xram_rsp_fsm.read() != XRAM_RSP_DIR_UPDT) ) { |
---|
2377 | if ((r_ixr_rsp_fsm.read()==IXR_RSP_TRT_ERASE) || |
---|
2378 | (r_ixr_rsp_fsm.read()==IXR_RSP_TRT_READ)) r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; |
---|
2379 | else if (r_read_fsm.read()==READ_TRT_LOCK) r_alloc_trt_fsm = ALLOC_TRT_READ; |
---|
2380 | else if (r_write_fsm.read()==WRITE_TRT_LOCK) r_alloc_trt_fsm = ALLOC_TRT_WRITE; |
---|
2381 | else if (r_llsc_fsm.read()==LLSC_TRT_LOCK) r_alloc_trt_fsm = ALLOC_TRT_LLSC; |
---|
2382 | } |
---|
2383 | break; |
---|
2384 | //////////////////////// |
---|
2385 | case ALLOC_TRT_IXR_RSP: |
---|
2386 | if ( (r_ixr_rsp_fsm.read() != IXR_RSP_TRT_ERASE) && |
---|
2387 | (r_ixr_rsp_fsm.read() != IXR_RSP_TRT_READ) ) { |
---|
2388 | if (r_read_fsm.read()==READ_TRT_LOCK) r_alloc_trt_fsm = ALLOC_TRT_READ; |
---|
2389 | else if (r_write_fsm.read()==WRITE_TRT_LOCK) r_alloc_trt_fsm = ALLOC_TRT_WRITE; |
---|
2390 | else if (r_llsc_fsm.read()==LLSC_TRT_LOCK) r_alloc_trt_fsm = ALLOC_TRT_LLSC; |
---|
2391 | else if (r_xram_rsp_fsm.read()==XRAM_RSP_TRT_COPY) r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; |
---|
2392 | } |
---|
2393 | break; |
---|
2394 | |
---|
2395 | } // end switch alloc_trt_fsm |
---|
2396 | |
---|
2397 | //////////////////////////////////////////////////////////////////////////////////// |
---|
2398 | // TGT_CMD to READ FIFO |
---|
2399 | //////////////////////////////////////////////////////////////////////////////////// |
---|
2400 | |
---|
2401 | if ( cmd_read_fifo_put ) { |
---|
2402 | if ( cmd_read_fifo_get ) { |
---|
2403 | m_cmd_read_addr_fifo.put_and_get(p_vci_tgt.address.read()); |
---|
2404 | m_cmd_read_word_fifo.put_and_get((p_vci_tgt.plen.read() == 4)); |
---|
2405 | m_cmd_read_srcid_fifo.put_and_get(p_vci_tgt.srcid.read()); |
---|
2406 | m_cmd_read_trdid_fifo.put_and_get(p_vci_tgt.trdid.read()); |
---|
2407 | m_cmd_read_pktid_fifo.put_and_get(p_vci_tgt.pktid.read()); |
---|
2408 | } else { |
---|
2409 | m_cmd_read_addr_fifo.simple_put(p_vci_tgt.address.read()); |
---|
2410 | m_cmd_read_word_fifo.simple_put((p_vci_tgt.plen.read() == 4)); |
---|
2411 | m_cmd_read_srcid_fifo.simple_put(p_vci_tgt.srcid.read()); |
---|
2412 | m_cmd_read_trdid_fifo.simple_put(p_vci_tgt.trdid.read()); |
---|
2413 | m_cmd_read_pktid_fifo.simple_put(p_vci_tgt.pktid.read()); |
---|
2414 | } |
---|
2415 | } else { |
---|
2416 | if ( cmd_read_fifo_get ) { |
---|
2417 | m_cmd_read_addr_fifo.simple_get(); |
---|
2418 | m_cmd_read_word_fifo.simple_get(); |
---|
2419 | m_cmd_read_srcid_fifo.simple_get(); |
---|
2420 | m_cmd_read_trdid_fifo.simple_get(); |
---|
2421 | m_cmd_read_pktid_fifo.simple_get(); |
---|
2422 | } |
---|
2423 | } |
---|
2424 | ///////////////////////////////////////////////////////////////////// |
---|
2425 | // TGT_CMD to WRITE FIFO |
---|
2426 | ///////////////////////////////////////////////////////////////////// |
---|
2427 | |
---|
2428 | if ( cmd_write_fifo_put ) { |
---|
2429 | if ( cmd_write_fifo_get ) { |
---|
2430 | m_cmd_write_addr_fifo.put_and_get(p_vci_tgt.address.read()); |
---|
2431 | m_cmd_write_eop_fifo.put_and_get(p_vci_tgt.eop.read()); |
---|
2432 | m_cmd_write_srcid_fifo.put_and_get(p_vci_tgt.srcid.read()); |
---|
2433 | m_cmd_write_trdid_fifo.put_and_get(p_vci_tgt.trdid.read()); |
---|
2434 | m_cmd_write_pktid_fifo.put_and_get(p_vci_tgt.pktid.read()); |
---|
2435 | m_cmd_write_data_fifo.put_and_get(p_vci_tgt.wdata.read()); |
---|
2436 | m_cmd_write_be_fifo.put_and_get(p_vci_tgt.be.read()); |
---|
2437 | } else { |
---|
2438 | m_cmd_write_addr_fifo.simple_put(p_vci_tgt.address.read()); |
---|
2439 | m_cmd_write_eop_fifo.simple_put(p_vci_tgt.eop.read()); |
---|
2440 | m_cmd_write_srcid_fifo.simple_put(p_vci_tgt.srcid.read()); |
---|
2441 | m_cmd_write_trdid_fifo.simple_put(p_vci_tgt.trdid.read()); |
---|
2442 | m_cmd_write_pktid_fifo.simple_put(p_vci_tgt.pktid.read()); |
---|
2443 | m_cmd_write_data_fifo.simple_put(p_vci_tgt.wdata.read()); |
---|
2444 | m_cmd_write_be_fifo.simple_put(p_vci_tgt.be.read()); |
---|
2445 | } |
---|
2446 | } else { |
---|
2447 | if ( cmd_write_fifo_get ) { |
---|
2448 | m_cmd_write_addr_fifo.simple_get(); |
---|
2449 | m_cmd_write_eop_fifo.simple_get(); |
---|
2450 | m_cmd_write_srcid_fifo.simple_get(); |
---|
2451 | m_cmd_write_trdid_fifo.simple_get(); |
---|
2452 | m_cmd_write_pktid_fifo.simple_get(); |
---|
2453 | m_cmd_write_data_fifo.simple_get(); |
---|
2454 | m_cmd_write_be_fifo.simple_get(); |
---|
2455 | } |
---|
2456 | } |
---|
2457 | //////////////////////////////////////////////////////////////////////////////////// |
---|
2458 | // TGT_CMD to LLSC FIFO |
---|
2459 | //////////////////////////////////////////////////////////////////////////////////// |
---|
2460 | |
---|
2461 | if ( cmd_llsc_fifo_put ) { |
---|
2462 | if ( cmd_llsc_fifo_get ) { |
---|
2463 | m_cmd_llsc_addr_fifo.put_and_get(p_vci_tgt.address.read()); |
---|
2464 | m_cmd_llsc_sc_fifo.put_and_get(p_vci_tgt.cmd.read() == vci_param::CMD_STORE_COND); |
---|
2465 | m_cmd_llsc_srcid_fifo.put_and_get(p_vci_tgt.srcid.read()); |
---|
2466 | m_cmd_llsc_trdid_fifo.put_and_get(p_vci_tgt.trdid.read()); |
---|
2467 | m_cmd_llsc_pktid_fifo.put_and_get(p_vci_tgt.pktid.read()); |
---|
2468 | m_cmd_llsc_wdata_fifo.put_and_get(p_vci_tgt.wdata.read()); |
---|
2469 | } else { |
---|
2470 | m_cmd_llsc_addr_fifo.simple_put(p_vci_tgt.address.read()); |
---|
2471 | m_cmd_llsc_sc_fifo.simple_put(p_vci_tgt.cmd.read() == vci_param::CMD_STORE_COND); |
---|
2472 | m_cmd_llsc_srcid_fifo.simple_put(p_vci_tgt.srcid.read()); |
---|
2473 | m_cmd_llsc_trdid_fifo.simple_put(p_vci_tgt.trdid.read()); |
---|
2474 | m_cmd_llsc_pktid_fifo.simple_put(p_vci_tgt.pktid.read()); |
---|
2475 | m_cmd_llsc_wdata_fifo.simple_put(p_vci_tgt.wdata.read()); |
---|
2476 | } |
---|
2477 | } else { |
---|
2478 | if ( cmd_llsc_fifo_get ) { |
---|
2479 | m_cmd_llsc_addr_fifo.simple_get(); |
---|
2480 | m_cmd_llsc_sc_fifo.simple_get(); |
---|
2481 | m_cmd_llsc_srcid_fifo.simple_get(); |
---|
2482 | m_cmd_llsc_trdid_fifo.simple_get(); |
---|
2483 | m_cmd_llsc_pktid_fifo.simple_get(); |
---|
2484 | m_cmd_llsc_wdata_fifo.simple_get(); |
---|
2485 | } |
---|
2486 | } |
---|
2487 | |
---|
2488 | //////////////////////////////////////////////////////////////////////////////////// |
---|
2489 | // TGT_CMD to CLEANUP FIFO |
---|
2490 | //////////////////////////////////////////////////////////////////////////////////// |
---|
2491 | |
---|
2492 | if ( cmd_cleanup_fifo_put ) { |
---|
2493 | if ( cmd_cleanup_fifo_get ) { |
---|
2494 | m_cmd_cleanup_srcid_fifo.put_and_get(p_vci_tgt.srcid.read()); |
---|
2495 | m_cmd_cleanup_trdid_fifo.put_and_get(p_vci_tgt.trdid.read()); |
---|
2496 | m_cmd_cleanup_pktid_fifo.put_and_get(p_vci_tgt.pktid.read()); |
---|
2497 | m_cmd_cleanup_nline_fifo.put_and_get(p_vci_tgt.wdata.read()); |
---|
2498 | } else { |
---|
2499 | m_cmd_cleanup_srcid_fifo.simple_put(p_vci_tgt.srcid.read()); |
---|
2500 | m_cmd_cleanup_trdid_fifo.simple_put(p_vci_tgt.trdid.read()); |
---|
2501 | m_cmd_cleanup_pktid_fifo.simple_put(p_vci_tgt.pktid.read()); |
---|
2502 | m_cmd_cleanup_nline_fifo.simple_put(p_vci_tgt.wdata.read()); |
---|
2503 | } |
---|
2504 | } else { |
---|
2505 | if ( cmd_cleanup_fifo_get ) { |
---|
2506 | m_cmd_cleanup_srcid_fifo.simple_get(); |
---|
2507 | m_cmd_cleanup_trdid_fifo.simple_get(); |
---|
2508 | m_cmd_cleanup_pktid_fifo.simple_get(); |
---|
2509 | m_cmd_cleanup_nline_fifo.simple_get(); |
---|
2510 | } |
---|
2511 | } |
---|
2512 | |
---|
2513 | m_cpt_cycles++; |
---|
2514 | |
---|
2515 | } // end transition() |
---|
2516 | |
---|
2517 | ///////////////////////////// |
---|
2518 | tmpl(void)::genMoore() |
---|
2519 | ///////////////////////////// |
---|
2520 | { |
---|
2521 | //////////////////////////////////////////////////////////// |
---|
2522 | // Command signals on the p_vci_ixr port |
---|
2523 | //////////////////////////////////////////////////////////// |
---|
2524 | |
---|
2525 | |
---|
2526 | p_vci_ixr.be = 0xF; |
---|
2527 | p_vci_ixr.pktid = 0; |
---|
2528 | p_vci_ixr.srcid = m_srcid_ixr; |
---|
2529 | p_vci_ixr.cons = false; |
---|
2530 | p_vci_ixr.wrap = false; |
---|
2531 | p_vci_ixr.contig = true; |
---|
2532 | p_vci_ixr.clen = 0; |
---|
2533 | p_vci_ixr.cfixed = false; |
---|
2534 | |
---|
2535 | if ( r_xram_cmd_fsm.read() == XRAM_CMD_READ_NLINE ) { |
---|
2536 | p_vci_ixr.cmd = vci_param::CMD_READ; |
---|
2537 | p_vci_ixr.cmdval = true; |
---|
2538 | p_vci_ixr.address = (r_read_to_xram_cmd_nline.read()*m_words*4); |
---|
2539 | p_vci_ixr.plen = m_words*4; |
---|
2540 | p_vci_ixr.wdata = 0x00000000; |
---|
2541 | p_vci_ixr.trdid = r_read_to_xram_cmd_trdid.read(); |
---|
2542 | p_vci_ixr.eop = true; |
---|
2543 | } |
---|
2544 | else if ( r_xram_cmd_fsm.read() == XRAM_CMD_LLSC_NLINE ) { |
---|
2545 | p_vci_ixr.cmd = vci_param::CMD_READ; |
---|
2546 | p_vci_ixr.cmdval = true; |
---|
2547 | p_vci_ixr.address = (r_llsc_to_xram_cmd_nline.read()*m_words*4); |
---|
2548 | p_vci_ixr.plen = m_words*4; |
---|
2549 | p_vci_ixr.wdata = 0x00000000; |
---|
2550 | p_vci_ixr.trdid = r_llsc_to_xram_cmd_trdid.read(); |
---|
2551 | p_vci_ixr.eop = true; |
---|
2552 | } |
---|
2553 | else if ( r_xram_cmd_fsm.read() == XRAM_CMD_WRITE_NLINE ) { |
---|
2554 | p_vci_ixr.cmd = vci_param::CMD_READ; |
---|
2555 | p_vci_ixr.cmdval = true; |
---|
2556 | p_vci_ixr.address = (r_write_to_xram_cmd_nline.read()*m_words*4); |
---|
2557 | p_vci_ixr.plen = m_words*4; |
---|
2558 | p_vci_ixr.wdata = 0x00000000; |
---|
2559 | p_vci_ixr.trdid = r_write_to_xram_cmd_trdid.read(); |
---|
2560 | p_vci_ixr.eop = true; |
---|
2561 | } |
---|
2562 | else if ( r_xram_cmd_fsm.read() == XRAM_CMD_XRAM_DATA ) { |
---|
2563 | p_vci_ixr.cmd = vci_param::CMD_WRITE; |
---|
2564 | p_vci_ixr.cmdval = true; |
---|
2565 | p_vci_ixr.address = ((r_xram_rsp_to_xram_cmd_nline.read()*m_words+r_xram_cmd_cpt.read())*4); |
---|
2566 | p_vci_ixr.plen = m_words*4; |
---|
2567 | p_vci_ixr.wdata = r_xram_rsp_to_xram_cmd_data[r_xram_cmd_cpt.read()].read(); |
---|
2568 | p_vci_ixr.trdid = r_xram_rsp_to_xram_cmd_trdid.read(); |
---|
2569 | p_vci_ixr.eop = (r_xram_cmd_cpt == (m_words-1)); |
---|
2570 | } else { |
---|
2571 | p_vci_ixr.cmdval = false; |
---|
2572 | p_vci_ixr.address = 0; |
---|
2573 | p_vci_ixr.plen = 0; |
---|
2574 | p_vci_ixr.wdata = 0; |
---|
2575 | p_vci_ixr.trdid = 0; |
---|
2576 | p_vci_ixr.eop = false; |
---|
2577 | } |
---|
2578 | |
---|
2579 | //////////////////////////////////////////////////// |
---|
2580 | // Response signals on the p_vci_ixr port |
---|
2581 | //////////////////////////////////////////////////// |
---|
2582 | |
---|
2583 | if ( ((r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_RSP) && |
---|
2584 | (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ)) || |
---|
2585 | (r_ixr_rsp_fsm.read() == IXR_RSP_ACK) ) p_vci_ixr.rspack = true; |
---|
2586 | else p_vci_ixr.rspack = false; |
---|
2587 | |
---|
2588 | //////////////////////////////////////////////////// |
---|
2589 | // Command signals on the p_vci_tgt port |
---|
2590 | //////////////////////////////////////////////////// |
---|
2591 | |
---|
2592 | switch ((tgt_cmd_fsm_state_e)r_tgt_cmd_fsm.read()) { |
---|
2593 | case TGT_CMD_IDLE: |
---|
2594 | p_vci_tgt.cmdack = false; |
---|
2595 | break; |
---|
2596 | case TGT_CMD_READ: |
---|
2597 | p_vci_tgt.cmdack = m_cmd_read_addr_fifo.wok(); |
---|
2598 | break; |
---|
2599 | case TGT_CMD_READ_EOP: |
---|
2600 | p_vci_tgt.cmdack = true; |
---|
2601 | break; |
---|
2602 | case TGT_CMD_WRITE: |
---|
2603 | p_vci_tgt.cmdack = m_cmd_write_addr_fifo.wok(); |
---|
2604 | break; |
---|
2605 | case TGT_CMD_ATOMIC: |
---|
2606 | p_vci_tgt.cmdack = m_cmd_llsc_addr_fifo.wok(); |
---|
2607 | break; |
---|
2608 | case TGT_CMD_CLEANUP: |
---|
2609 | p_vci_tgt.cmdack = m_cmd_cleanup_nline_fifo.wok(); |
---|
2610 | break; |
---|
2611 | default: |
---|
2612 | p_vci_tgt.cmdack = false; |
---|
2613 | break; |
---|
2614 | } |
---|
2615 | |
---|
2616 | //////////////////////////////////////////////////// |
---|
2617 | // Response signals on the p_vci_tgt port |
---|
2618 | //////////////////////////////////////////////////// |
---|
2619 | switch ( r_tgt_rsp_fsm.read() ) { |
---|
2620 | |
---|
2621 | case TGT_RSP_READ_IDLE: |
---|
2622 | case TGT_RSP_WRITE_IDLE: |
---|
2623 | case TGT_RSP_LLSC_IDLE: |
---|
2624 | case TGT_RSP_CLEANUP_IDLE: |
---|
2625 | case TGT_RSP_XRAM_IDLE: |
---|
2626 | case TGT_RSP_INIT_IDLE: |
---|
2627 | case TGT_RSP_READ_TEST: |
---|
2628 | case TGT_RSP_XRAM_TEST: |
---|
2629 | |
---|
2630 | p_vci_tgt.rspval = false; |
---|
2631 | p_vci_tgt.rsrcid = 0; |
---|
2632 | p_vci_tgt.rdata = 0; |
---|
2633 | p_vci_tgt.rpktid = 0; |
---|
2634 | p_vci_tgt.rtrdid = 0; |
---|
2635 | p_vci_tgt.rerror = 0; |
---|
2636 | p_vci_tgt.reop = false; |
---|
2637 | break; |
---|
2638 | case TGT_RSP_READ_LINE: |
---|
2639 | p_vci_tgt.rspval = true; |
---|
2640 | p_vci_tgt.rdata = r_read_to_tgt_rsp_data[r_tgt_rsp_cpt.read()].read(); |
---|
2641 | p_vci_tgt.rsrcid = r_read_to_tgt_rsp_srcid.read(); |
---|
2642 | p_vci_tgt.rtrdid = r_read_to_tgt_rsp_trdid.read(); |
---|
2643 | p_vci_tgt.rpktid = r_read_to_tgt_rsp_pktid.read(); |
---|
2644 | p_vci_tgt.rerror = 0; |
---|
2645 | p_vci_tgt.reop = (r_tgt_rsp_cpt.read()==(m_words-1)); |
---|
2646 | break; |
---|
2647 | case TGT_RSP_READ_WORD: |
---|
2648 | p_vci_tgt.rspval = true; |
---|
2649 | p_vci_tgt.rdata = r_read_to_tgt_rsp_data[r_tgt_rsp_cpt.read()].read(); |
---|
2650 | p_vci_tgt.rsrcid = r_read_to_tgt_rsp_srcid.read(); |
---|
2651 | p_vci_tgt.rtrdid = r_read_to_tgt_rsp_trdid.read(); |
---|
2652 | p_vci_tgt.rpktid = r_read_to_tgt_rsp_pktid.read(); |
---|
2653 | p_vci_tgt.rerror = 0; |
---|
2654 | p_vci_tgt.reop = true; |
---|
2655 | break; |
---|
2656 | case TGT_RSP_WRITE: |
---|
2657 | p_vci_tgt.rspval = true; |
---|
2658 | p_vci_tgt.rdata = 0; |
---|
2659 | p_vci_tgt.rsrcid = r_write_to_tgt_rsp_srcid.read(); |
---|
2660 | p_vci_tgt.rtrdid = r_write_to_tgt_rsp_trdid.read(); |
---|
2661 | p_vci_tgt.rpktid = r_write_to_tgt_rsp_pktid.read(); |
---|
2662 | p_vci_tgt.rerror = 0; |
---|
2663 | p_vci_tgt.reop = true; |
---|
2664 | break; |
---|
2665 | case TGT_RSP_CLEANUP: |
---|
2666 | p_vci_tgt.rspval = true; |
---|
2667 | p_vci_tgt.rdata = 0; |
---|
2668 | p_vci_tgt.rsrcid = r_cleanup_to_tgt_rsp_srcid.read(); |
---|
2669 | p_vci_tgt.rtrdid = r_cleanup_to_tgt_rsp_trdid.read(); |
---|
2670 | p_vci_tgt.rpktid = r_cleanup_to_tgt_rsp_pktid.read(); |
---|
2671 | p_vci_tgt.rerror = 0; |
---|
2672 | p_vci_tgt.reop = true; |
---|
2673 | break; |
---|
2674 | case TGT_RSP_LLSC: |
---|
2675 | p_vci_tgt.rspval = true; |
---|
2676 | p_vci_tgt.rdata = r_llsc_to_tgt_rsp_data.read(); |
---|
2677 | p_vci_tgt.rsrcid = r_llsc_to_tgt_rsp_srcid.read(); |
---|
2678 | p_vci_tgt.rtrdid = r_llsc_to_tgt_rsp_trdid.read(); |
---|
2679 | p_vci_tgt.rpktid = r_llsc_to_tgt_rsp_pktid.read(); |
---|
2680 | p_vci_tgt.rerror = 0; |
---|
2681 | p_vci_tgt.reop = true; |
---|
2682 | break; |
---|
2683 | case TGT_RSP_XRAM_LINE: |
---|
2684 | p_vci_tgt.rspval = true; |
---|
2685 | p_vci_tgt.rdata = r_xram_rsp_to_tgt_rsp_data[r_tgt_rsp_cpt.read()].read(); |
---|
2686 | p_vci_tgt.rsrcid = r_xram_rsp_to_tgt_rsp_srcid.read(); |
---|
2687 | p_vci_tgt.rtrdid = r_xram_rsp_to_tgt_rsp_trdid.read(); |
---|
2688 | p_vci_tgt.rpktid = r_xram_rsp_to_tgt_rsp_pktid.read(); |
---|
2689 | p_vci_tgt.rerror = 0; |
---|
2690 | p_vci_tgt.reop = (r_tgt_rsp_cpt.read()==(m_words-1)); |
---|
2691 | break; |
---|
2692 | case TGT_RSP_XRAM_WORD: |
---|
2693 | p_vci_tgt.rspval = true; |
---|
2694 | p_vci_tgt.rdata = r_xram_rsp_to_tgt_rsp_data[r_tgt_rsp_cpt.read()].read(); |
---|
2695 | p_vci_tgt.rsrcid = r_xram_rsp_to_tgt_rsp_srcid.read(); |
---|
2696 | p_vci_tgt.rtrdid = r_xram_rsp_to_tgt_rsp_trdid.read(); |
---|
2697 | p_vci_tgt.rpktid = r_xram_rsp_to_tgt_rsp_pktid.read(); |
---|
2698 | p_vci_tgt.rerror = 0; |
---|
2699 | p_vci_tgt.reop = true; |
---|
2700 | break; |
---|
2701 | case TGT_RSP_INIT: |
---|
2702 | p_vci_tgt.rspval = true; |
---|
2703 | p_vci_tgt.rdata = 0; |
---|
2704 | p_vci_tgt.rsrcid = r_init_rsp_to_tgt_rsp_srcid.read(); |
---|
2705 | p_vci_tgt.rtrdid = r_init_rsp_to_tgt_rsp_trdid.read(); |
---|
2706 | p_vci_tgt.rpktid = r_init_rsp_to_tgt_rsp_pktid.read(); |
---|
2707 | p_vci_tgt.rerror = 0; |
---|
2708 | p_vci_tgt.reop = true; |
---|
2709 | break; |
---|
2710 | } // end switch r_tgt_rsp_fsm |
---|
2711 | |
---|
2712 | /////////////////////////////////////////////////// |
---|
2713 | // Command signals on the p_vci_ini port |
---|
2714 | /////////////////////////////////////////////////// |
---|
2715 | |
---|
2716 | p_vci_ini.cmd = vci_param::CMD_WRITE; |
---|
2717 | p_vci_ini.srcid = m_srcid_ini; |
---|
2718 | p_vci_ini.pktid = 0; |
---|
2719 | p_vci_ini.cons = true; |
---|
2720 | p_vci_ini.wrap = false; |
---|
2721 | p_vci_ini.contig = false; |
---|
2722 | p_vci_ini.clen = 0; |
---|
2723 | p_vci_ini.cfixed = false; |
---|
2724 | |
---|
2725 | switch ( r_init_cmd_fsm.read() ) { |
---|
2726 | |
---|
2727 | case INIT_CMD_UPDT_IDLE: |
---|
2728 | case INIT_CMD_INVAL_IDLE: |
---|
2729 | case INIT_CMD_UPDT_SEL: |
---|
2730 | case INIT_CMD_INVAL_SEL: |
---|
2731 | p_vci_ini.cmdval = false; |
---|
2732 | p_vci_ini.address = 0; |
---|
2733 | p_vci_ini.wdata = 0; |
---|
2734 | p_vci_ini.be = 0; |
---|
2735 | p_vci_ini.plen = 0; |
---|
2736 | p_vci_ini.trdid = 0; |
---|
2737 | p_vci_ini.eop = false; |
---|
2738 | break; |
---|
2739 | case INIT_CMD_INVAL_NLINE: |
---|
2740 | p_vci_ini.cmdval = true; |
---|
2741 | p_vci_ini.address = m_coherence_table[r_init_cmd_target.read()]; |
---|
2742 | p_vci_ini.wdata = r_xram_rsp_to_init_cmd_nline.read(); |
---|
2743 | p_vci_ini.be = 0xF; |
---|
2744 | p_vci_ini.plen = 4; |
---|
2745 | p_vci_ini.trdid = r_xram_rsp_to_init_cmd_trdid.read(); |
---|
2746 | p_vci_ini.eop = true; |
---|
2747 | break; |
---|
2748 | case INIT_CMD_UPDT_NLINE: |
---|
2749 | p_vci_ini.cmdval = true; |
---|
2750 | p_vci_ini.address = m_coherence_table[r_init_cmd_target.read()] + 4; |
---|
2751 | p_vci_ini.wdata = r_write_to_init_cmd_nline.read(); |
---|
2752 | p_vci_ini.be = 0xF; |
---|
2753 | p_vci_ini.plen = 4 * (r_write_to_init_cmd_count.read() + 2); |
---|
2754 | p_vci_ini.trdid = r_write_to_init_cmd_trdid.read(); |
---|
2755 | p_vci_ini.eop = false; |
---|
2756 | break; |
---|
2757 | case INIT_CMD_UPDT_INDEX: |
---|
2758 | p_vci_ini.cmdval = true; |
---|
2759 | p_vci_ini.address = m_coherence_table[r_init_cmd_target.read()] + 4; |
---|
2760 | p_vci_ini.wdata = r_write_to_init_cmd_index.read(); |
---|
2761 | p_vci_ini.be = 0xF; |
---|
2762 | p_vci_ini.plen = 4 * (r_write_to_init_cmd_count.read() + 2); |
---|
2763 | p_vci_ini.trdid = r_write_to_init_cmd_trdid.read(); |
---|
2764 | p_vci_ini.eop = false; |
---|
2765 | break; |
---|
2766 | case INIT_CMD_UPDT_DATA: |
---|
2767 | p_vci_ini.cmdval = true; |
---|
2768 | p_vci_ini.address = m_coherence_table[r_init_cmd_target.read()] + 4; |
---|
2769 | p_vci_ini.wdata = r_write_to_init_cmd_data[r_init_cmd_cpt.read() + |
---|
2770 | r_write_to_init_cmd_index.read()].read(); |
---|
2771 | if(r_write_to_init_cmd_we[r_init_cmd_cpt.read() + |
---|
2772 | r_write_to_init_cmd_index.read()].read()) |
---|
2773 | p_vci_ini.be = 0xF; |
---|
2774 | else p_vci_ini.be = 0x0; |
---|
2775 | p_vci_ini.plen = 4 * (r_write_to_init_cmd_count.read() + 2); |
---|
2776 | p_vci_ini.trdid = r_write_to_init_cmd_trdid.read(); |
---|
2777 | p_vci_ini.eop = ( r_init_cmd_cpt.read() == (r_write_to_init_cmd_count.read()-1) ); |
---|
2778 | break; |
---|
2779 | } // end switch r_init_cmd_fsm |
---|
2780 | |
---|
2781 | ////////////////////////////////////////////////////// |
---|
2782 | // Response signals on the p_vci_ini port |
---|
2783 | ////////////////////////////////////////////////////// |
---|
2784 | |
---|
2785 | if ( r_init_rsp_fsm.read() == INIT_RSP_IDLE ) p_vci_ini.rspack = true; |
---|
2786 | else p_vci_ini.rspack = false; |
---|
2787 | |
---|
2788 | } // end genMoore() |
---|
2789 | |
---|
2790 | }} // end name space |
---|