Changeset 291 for trunk/modules/vci_mem_cache_v4
- Timestamp:
- Jan 28, 2013, 1:59:32 PM (12 years ago)
- Location:
- trunk/modules/vci_mem_cache_v4/caba
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/modules/vci_mem_cache_v4/caba/metadata/vci_mem_cache_v4.sd
r273 r291 24 24 Uses('common:mapping_table'), 25 25 Uses('caba:generic_fifo'), 26 Uses('caba:generic_llsc_global_table'), 26 27 ], 27 28 -
trunk/modules/vci_mem_cache_v4/caba/source/include/vci_mem_cache_v4.h
r289 r291 54 54 #include "mapping_table.h" 55 55 #include "int_tab.h" 56 #include "generic_llsc_global_table.h" 56 57 #include "mem_cache_directory_v4.h" 57 58 #include "xram_transaction_v4.h" … … 422 423 CacheData m_cache_data; // data array[set][way][word] 423 424 HeapDirectory m_heap; // heap for copies 425 GenericLLSCGlobalTable 426 < 427 32 , // desired number of slots 428 4096, // number of processors in the system 429 8000, // registratioçn life span (in # of LL operations) 430 typename vci_param::fast_addr_t // address type 431 > 432 m_llsc_table; // ll/sc global registration table 424 433 425 434 // adress masks … … 491 500 sc_signal<size_t> r_read_next_ptr; // Next entry to point to 492 501 sc_signal<bool> r_read_last_free; // Last free entry 502 sc_signal<typename vci_param::fast_addr_t> 503 r_read_ll_key; // LL key returned by the llsc_global_table 493 504 494 505 // Buffer between READ fsm and IXR_CMD fsm (ask a missing cache line to XRAM) … … 505 516 sc_signal<size_t> r_read_to_tgt_rsp_word; // first word of the response 506 517 sc_signal<size_t> r_read_to_tgt_rsp_length; // length of the response 518 sc_signal<typename vci_param::fast_addr_t> 519 r_read_to_tgt_rsp_ll_key; // LL key returned by the llsc_global_table 507 520 508 521 /////////////////////////////////////////////////////////////// … … 533 546 sc_signal<size_t> r_write_trt_index; // index in Transaction Table 534 547 sc_signal<size_t> r_write_upt_index; // index in Update Table 548 sc_signal<bool> r_write_sc_fail; // sc command failed 549 sc_signal<bool> r_write_pending_sc; // sc command pending in WRITE fsm 535 550 536 551 // Buffer between WRITE fsm and TGT_RSP fsm (acknowledge a write command from L1) 537 sc_signal<bool> r_write_to_tgt_rsp_req; // valid request 538 sc_signal<size_t> r_write_to_tgt_rsp_srcid; // transaction srcid 539 sc_signal<size_t> r_write_to_tgt_rsp_trdid; // transaction trdid 540 sc_signal<size_t> r_write_to_tgt_rsp_pktid; // transaction pktid 552 sc_signal<bool> r_write_to_tgt_rsp_req; // valid request 553 sc_signal<size_t> r_write_to_tgt_rsp_srcid; // transaction srcid 554 sc_signal<size_t> r_write_to_tgt_rsp_trdid; // transaction trdid 555 sc_signal<size_t> r_write_to_tgt_rsp_pktid; // transaction pktid 556 sc_signal<bool> r_write_to_tgt_rsp_sc_fail; // sc command failed 541 557 542 558 // Buffer between WRITE fsm and IXR_CMD fsm (ask a missing cache line to XRAM) … … 721 737 sc_signal<size_t> r_xram_rsp_to_tgt_rsp_length; // length of the response 722 738 sc_signal<bool> r_xram_rsp_to_tgt_rsp_rerror; // send error to requester 739 sc_signal<typename vci_param::fast_addr_t> 740 r_xram_rsp_to_tgt_rsp_ll_key; // LL key returned by the llsc_global_table 723 741 724 742 // Buffer between XRAM_RSP fsm and INIT_CMD fsm (Inval L1 Caches) -
trunk/modules/vci_mem_cache_v4/caba/source/include/xram_transaction_v4.h
r253 r291 32 32 std::vector<be_t> wdata_be; // be for each data in the write buffer 33 33 bool rerror; // error returned by xram 34 data_t ll_key; // LL key returned by the llsc_global_table 34 35 35 36 ///////////////////////////////////////////////////////////////////// … … 127 128 wdata.assign(source.wdata.begin(),source.wdata.end()); 128 129 rerror = source.rerror; 130 ll_key = source.ll_key; 129 131 } 130 132 … … 325 327 // - data : the data to write (in case of write) 326 328 // - data_be : the mask of the data to write (in case of write) 329 // - ll_key : the ll key (if any) returned by the llsc_global_table 327 330 ///////////////////////////////////////////////////////////////////// 328 331 void set(const size_t index, … … 336 339 const size_t word_index, 337 340 const std::vector<be_t> &data_be, 338 const std::vector<data_t> &data) 341 const std::vector<data_t> &data, 342 const data_t ll_key = 0) 339 343 { 340 344 assert( (index < size_tab) … … 354 358 tab[index].read_length = read_length; 355 359 tab[index].word_index = word_index; 360 tab[index].ll_key = ll_key; 356 361 for(size_t i=0; i<tab[index].wdata.size(); i++) 357 362 { -
trunk/modules/vci_mem_cache_v4/caba/source/src/vci_mem_cache_v4.cpp
r290 r291 293 293 m_cache_data( nways, nsets, nwords ), 294 294 m_heap( m_heap_size ), 295 m_llsc_table(), 295 296 296 297 #define L2 soclib::common::uint32_log2 … … 511 512 << " | " << ixr_rsp_fsm_str[r_ixr_rsp_fsm] 512 513 << " | " << xram_rsp_fsm_str[r_xram_rsp_fsm] << std::endl; 514 515 //m_llsc_table.print_trace(); 516 513 517 } 514 518 … … 860 864 assert(((p_vci_tgt.pktid.read() & 0x7) == 0x6) && 861 865 "The type specified in the pktid field is incompatible with the LL CMD"); 862 assert(false && "TODO : LL not implemented"); //TODO 863 //r_tgt_cmd_fsm = TGT_CMD_READ; 866 r_tgt_cmd_fsm = TGT_CMD_READ; 864 867 } 865 868 else if ( p_vci_tgt.cmd.read() == vci_param::CMD_NOP ) … … 874 877 "The type specified in the pktid field is incompatible with the NOP CMD"); 875 878 876 if( p_vci_tgt.pktid.read() == TYPE_CAS)879 if((p_vci_tgt.pktid.read() & 0x7) == TYPE_CAS) 877 880 r_tgt_cmd_fsm = TGT_CMD_CAS; 878 881 else // TYPE_SC 879 assert(false && "TODO : SC not implemented"); //TODO 880 //r_tgt_cmd_fsm = TGT_CMD_WRITE; 882 r_tgt_cmd_fsm = TGT_CMD_WRITE; 881 883 } 882 884 else … … 892 894 ////////////////// 893 895 case TGT_CMD_READ: 894 if ((m_x[(vci_addr_t)p_vci_tgt.address.read()]+(p_vci_tgt.plen.read()>>2)) > 16) 896 // This test checks that the read does not cross a cache line limit. 897 // It must not be taken into account when dealing with an LL CMD. 898 if (((m_x[(vci_addr_t)p_vci_tgt.address.read()]+(p_vci_tgt.plen.read()>>2)) > 16) && ( p_vci_tgt.cmd.read() != vci_param::CMD_LOCKED_READ )) 895 899 { 896 900 std::cout … … 907 911 << std::endl; 908 912 std::cout 909 << " read command packets must contain one single flit"913 << " read or ll command packets must contain one single flit" 910 914 << std::endl; 911 915 exit(0); … … 927 931 #endif 928 932 cmd_read_fifo_put = true; 929 m_cpt_read++; 933 if ( p_vci_tgt.cmd.read() == vci_param::CMD_LOCKED_READ ) 934 m_cpt_ll++; 935 else 936 m_cpt_read++; 930 937 r_tgt_cmd_fsm = TGT_CMD_IDLE; 931 938 } … … 1139 1146 // READ FSM 1140 1147 //////////////////////////////////////////////////////////////////////////////////// 1141 // The READ FSM controls the VCI read requests.1148 // The READ FSM controls the VCI read and ll requests. 1142 1149 // It takes the lock protecting the cache directory to check the cache line status: 1143 1150 // - In case of HIT … … 1174 1181 << " srcid = " << std::dec << m_cmd_read_srcid_fifo.read() 1175 1182 << " / address = " << std::hex << m_cmd_read_addr_fifo.read() 1183 << " / pktid = " << std::hex << m_cmd_read_pktid_fifo.read() 1176 1184 << " / nwords = " << std::dec << m_cmd_read_length_fifo.read() << std::endl; 1177 1185 } … … 1211 1219 DirectoryEntry entry = 1212 1220 m_cache_directory.read(m_cmd_read_addr_fifo.read(), way); 1213 1221 if((m_cmd_read_pktid_fifo.read() & 0x7) == TYPE_LL) // access the global table ONLY when we have an LL cmd 1222 { 1223 r_read_ll_key = m_llsc_table.ll(m_cmd_read_addr_fifo.read()); 1224 } 1214 1225 r_read_is_cnt = entry.is_cnt; 1215 1226 r_read_dirty = entry.dirty; … … 1256 1267 << " / count = " <<std::dec << entry.count 1257 1268 << " / is_cnt = " << entry.is_cnt << std::endl; 1269 if((m_cmd_read_pktid_fifo.read() & 0x7) == TYPE_LL) 1270 { 1271 std::cout 1272 << " <MEMC " << name() << ".READ_DIR_LOCK> global_llsc_table LL access" << std::endl; 1273 } 1258 1274 } 1259 1275 #endif … … 1604 1620 r_read_to_tgt_rsp_trdid = m_cmd_read_trdid_fifo.read(); 1605 1621 r_read_to_tgt_rsp_pktid = m_cmd_read_pktid_fifo.read(); 1606 cmd_read_fifo_get = true; 1607 r_read_to_tgt_rsp_req = true; 1608 r_read_fsm = READ_IDLE; 1622 r_read_to_tgt_rsp_ll_key = r_read_ll_key.read(); 1623 cmd_read_fifo_get = true; 1624 r_read_to_tgt_rsp_req = true; 1625 r_read_fsm = READ_IDLE; 1609 1626 1610 1627 #if DEBUG_MEMC_READ … … 1673 1690 m_x[(vci_addr_t)(m_cmd_read_addr_fifo.read())], 1674 1691 std::vector<be_t>(m_words,0), 1675 std::vector<data_t>(m_words,0)); 1692 std::vector<data_t>(m_words,0), 1693 r_read_ll_key.read()); 1676 1694 #if DEBUG_MEMC_READ 1677 1695 if( m_debug_read_fsm ) … … 1718 1736 // WRITE FSM 1719 1737 /////////////////////////////////////////////////////////////////////////////////// 1720 // The WRITE FSM handles the write bursts sent by the processors.1738 // The WRITE FSM handles the write bursts and sc requests sent by the processors. 1721 1739 // All addresses in a burst must be in the same cache line. 1722 1740 // A complete write burst is consumed in the FIFO & copied to a local buffer. … … 1728 1746 // returned to the writing processor. 1729 1747 // If the data is cached by other processors, a coherence transaction must 1730 // be launched :1748 // be launched (sc requests always require a coherence transaction): 1731 1749 // It is a multicast update if the line is not in counter mode, and the processor 1732 1750 // takes the lock protecting the Update Table (UPT) to register this transaction. … … 1753 1771 if ( m_cmd_write_addr_fifo.rok() ) 1754 1772 { 1755 m_cpt_write++; 1756 m_cpt_write_cells++; 1773 if((m_cmd_write_pktid_fifo.read() & 0x7) == TYPE_SC) 1774 m_cpt_sc++; 1775 else 1776 { 1777 m_cpt_write++; 1778 m_cpt_write_cells++; 1779 } 1757 1780 1758 1781 // consume a word in the FIFO & write it in the local buffer 1759 1782 cmd_write_fifo_get = true; 1783 r_write_pending_sc = false; 1760 1784 size_t index = m_x[(vci_addr_t)(m_cmd_write_addr_fifo.read())]; 1761 1785 … … 1775 1799 } 1776 1800 1777 if( m_cmd_write_eop_fifo.read() )1801 if( m_cmd_write_eop_fifo.read() || ((m_cmd_write_pktid_fifo.read() & 0x7) == TYPE_SC) ) 1778 1802 { 1779 1803 r_write_fsm = WRITE_DIR_REQ; … … 1824 1848 // consume a word in the FIFO & write it in the local buffer 1825 1849 cmd_write_fifo_get = true; 1850 r_write_pending_sc = false; 1826 1851 size_t index = r_write_word_index.read() + r_write_word_count.read(); 1827 1852 … … 1844 1869 if ( r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE ) 1845 1870 { 1871 if(((r_write_pktid.read() & 0x7) == TYPE_SC) && not r_write_pending_sc.read()) // check for an SC command (and check that its second flit is not already consumed) 1872 { 1873 if ( m_cmd_write_addr_fifo.rok() ) 1874 { 1875 size_t index = m_x[(vci_addr_t)(r_write_address.read())]; 1876 bool sc_success = m_llsc_table.sc(r_write_address.read(),r_write_data[index].read()); 1877 r_write_sc_fail = !sc_success; 1878 1879 assert(m_cmd_write_eop_fifo.read() && "Error in VCI_MEM_CACHE : invalid packet format for SC command"); 1880 // consume a word in the FIFO & write it in the local buffer 1881 cmd_write_fifo_get = true; 1882 r_write_pending_sc = true; 1883 index = m_x[(vci_addr_t)(m_cmd_write_addr_fifo.read())]; 1884 1885 r_write_address = (addr_t)(m_cmd_write_addr_fifo.read()); 1886 r_write_word_index = index; 1887 r_write_word_count = 1; 1888 r_write_data[index] = m_cmd_write_data_fifo.read(); 1889 if (!sc_success) 1890 { 1891 r_write_fsm = WRITE_RSP; 1892 break; 1893 } 1894 } 1895 else break; 1896 } 1897 //else it is a TYPE_WRITE, need a simple sw access to the 1898 // llsc_global_table 1899 else 1900 { 1901 m_llsc_table.sw(r_write_address.read()); 1902 } 1846 1903 r_write_fsm = WRITE_DIR_LOCK; 1847 1904 } … … 1905 1962 << " count = " << entry.count 1906 1963 << " is_cnt = " << entry.is_cnt << std::endl; 1964 if((r_write_pktid.read() & 0x7) == TYPE_SC) 1965 std::cout << " <MEMC " << name() << ".WRITE_DIR_LOCK> global_llsc_table SC access" << std::endl; 1966 else 1967 std::cout << " <MEMC " << name() << ".WRITE_DIR_LOCK> global_llsc_table SW access" << std::endl; 1907 1968 } 1908 1969 #endif … … 1985 2046 1986 2047 // no_update is true when there is no need for coherence transaction 1987 bool no_update = (r_write_count.read()==0) || ( owner && (r_write_count.read()==1)); 2048 // (tests for sc requests) 2049 bool no_update = ((r_write_count.read()==0) || ( owner && (r_write_count.read()==1) && (r_write_pktid.read() != TYPE_SC))); 1988 2050 1989 2051 // write data in the cache if no coherence transaction … … 2004 2066 } 2005 2067 2006 if ( owner and not no_update )2068 if ( owner and not no_update and (r_write_pktid.read() != TYPE_SC)) 2007 2069 { 2008 2070 r_write_count = r_write_count.read() - 1; … … 2127 2189 case WRITE_UPT_REQ: 2128 2190 { 2129 // prepare the coherence ransaction for the INIT_CMD FSM2191 // prepare the coherence transaction for the INIT_CMD FSM 2130 2192 // and write the first copy in the FIFO 2131 2193 // send the request if only one copy … … 2146 2208 for (size_t i=min ; i<max ; i++) r_write_to_init_cmd_data[i] = r_write_data[i]; 2147 2209 2148 if( (r_write_copy.read() != r_write_srcid.read()) or 2210 if( (r_write_copy.read() != r_write_srcid.read()) or (r_write_pktid.read() == TYPE_SC) or 2149 2211 #if L1_MULTI_CACHE 2150 2212 (r_write_copy_cache.read() != r_write_pktid.read()) or … … 2159 2221 write_to_init_cmd_fifo_cache_id= r_write_copy_cache.read(); 2160 2222 #endif 2161 if(r_write_count.read() == 1 )2223 if(r_write_count.read() == 1 || ((r_write_count.read() == 0) && (r_write_pktid.read() == TYPE_SC)) ) 2162 2224 { 2163 2225 r_write_fsm = WRITE_IDLE; … … 2207 2269 bool dec_upt_counter; 2208 2270 2209 if( (entry.owner.srcid != r_write_srcid.read()) or2271 if(((entry.owner.srcid != r_write_srcid.read()) || (r_write_pktid.read() == TYPE_SC)) or 2210 2272 #if L1_MULTI_CACHE 2211 2273 (entry.owner.cache_id != r_write_pktid.read()) or 2212 2274 #endif 2213 entry.owner.inst) // put te next srcid in the fifo2275 entry.owner.inst) // put the next srcid in the fifo 2214 2276 { 2215 2277 dec_upt_counter = false; … … 2299 2361 { 2300 2362 // post the request to TGT_RSP_FSM 2301 r_write_to_tgt_rsp_req = true; 2302 r_write_to_tgt_rsp_srcid = r_write_srcid.read(); 2303 r_write_to_tgt_rsp_trdid = r_write_trdid.read(); 2304 r_write_to_tgt_rsp_pktid = r_write_pktid.read(); 2363 r_write_to_tgt_rsp_req = true; 2364 r_write_to_tgt_rsp_srcid = r_write_srcid.read(); 2365 r_write_to_tgt_rsp_trdid = r_write_trdid.read(); 2366 r_write_to_tgt_rsp_pktid = r_write_pktid.read(); 2367 r_write_to_tgt_rsp_sc_fail = r_write_sc_fail.read(); 2305 2368 2306 2369 // try to get a new write request from the FIFO 2307 2370 if ( m_cmd_write_addr_fifo.rok() ) 2308 2371 { 2309 m_cpt_write++; 2310 m_cpt_write_cells++; 2372 if((m_cmd_write_pktid_fifo.read() & 0x7) == TYPE_SC) 2373 m_cpt_sc++; 2374 else 2375 { 2376 m_cpt_write++; 2377 m_cpt_write_cells++; 2378 } 2311 2379 2312 2380 // consume a word in the FIFO & write it in the local buffer 2313 2381 cmd_write_fifo_get = true; 2382 r_write_pending_sc = false; 2314 2383 size_t index = m_x[(vci_addr_t)(m_cmd_write_addr_fifo.read())]; 2315 2384 … … 2329 2398 } 2330 2399 2331 if( m_cmd_write_eop_fifo.read() )2400 if( m_cmd_write_eop_fifo.read() || ((m_cmd_write_pktid_fifo.read() & 0x7) == TYPE_SC) ) 2332 2401 { 2333 2402 r_write_fsm = WRITE_DIR_REQ; … … 3191 3260 entry.lock = false; 3192 3261 entry.dirty = dirty; 3193 entry.tag = r_xram_rsp_trt_buf.nline / m_sets;3262 entry.tag = r_xram_rsp_trt_buf.nline / m_sets; 3194 3263 entry.ptr = 0; 3195 3264 if(cached_read) … … 3305 3374 r_xram_rsp_to_tgt_rsp_word = r_xram_rsp_trt_buf.word_index; 3306 3375 r_xram_rsp_to_tgt_rsp_length = r_xram_rsp_trt_buf.read_length; 3376 r_xram_rsp_to_tgt_rsp_ll_key = r_xram_rsp_trt_buf.ll_key; 3307 3377 r_xram_rsp_to_tgt_rsp_rerror = false; 3308 3378 r_xram_rsp_to_tgt_rsp_req = true; … … 4371 4441 ////////////////////// 4372 4442 case CAS_DIR_HIT_WRITE: // test if a CC transaction is required 4373 // write data in cache if no CC request 4374 { 4443 // write data in cache if no CC request 4444 { 4445 // The CAS is a success => sw access to the llsc_global_table 4446 m_llsc_table.sw(m_cmd_cas_addr_fifo.read()); 4447 4375 4448 // test coherence request 4376 4449 if(r_cas_count.read()) // replicated line … … 4422 4495 << " / value = " << r_cas_wdata.read() 4423 4496 << " / count = " << r_cas_count.read() << std::endl; 4497 std::cout << " <MEMC " << name() << ".CAS_DIR_HIT_WRITE> global_llsc_table SW access" << std::endl; 4424 4498 } 4425 4499 #endif … … 6383 6457 case TGT_RSP_READ: 6384 6458 p_vci_tgt.rspval = true; 6385 p_vci_tgt.rdata = r_read_to_tgt_rsp_data[r_tgt_rsp_cpt.read()].read(); 6459 if( ((r_read_to_tgt_rsp_pktid.read() & 0x7) == TYPE_LL) 6460 && (r_tgt_rsp_cpt.read() == (r_read_to_tgt_rsp_word.read()+r_read_to_tgt_rsp_length-1)) ) 6461 p_vci_tgt.rdata = r_read_to_tgt_rsp_data[r_tgt_rsp_cpt.read()-1].read(); 6462 else if ((r_read_to_tgt_rsp_pktid.read() & 0x7) == TYPE_LL) 6463 p_vci_tgt.rdata = r_read_to_tgt_rsp_ll_key.read(); 6464 else 6465 p_vci_tgt.rdata = r_read_to_tgt_rsp_data[r_tgt_rsp_cpt.read()].read(); 6386 6466 p_vci_tgt.rsrcid = r_read_to_tgt_rsp_srcid.read(); 6387 6467 p_vci_tgt.rtrdid = r_read_to_tgt_rsp_trdid.read(); … … 6391 6471 break; 6392 6472 case TGT_RSP_WRITE: 6473 /*if( ((r_write_to_tgt_rsp_pktid.read() & 0x7) == TYPE_SC) ) 6474 { 6475 std::cout << "SC RSP / rsrcid = " << r_write_to_tgt_rsp_srcid.read() << " / rdata = " << r_write_to_tgt_rsp_sc_fail.read() << std::endl; 6476 }*/ 6393 6477 p_vci_tgt.rspval = true; 6394 p_vci_tgt.rdata = 0; 6478 if( ((r_write_to_tgt_rsp_pktid.read() & 0x7) == TYPE_SC) && r_write_to_tgt_rsp_sc_fail.read() ) 6479 p_vci_tgt.rdata = 1; 6480 else 6481 p_vci_tgt.rdata = 0; 6395 6482 p_vci_tgt.rsrcid = r_write_to_tgt_rsp_srcid.read(); 6396 6483 p_vci_tgt.rtrdid = r_write_to_tgt_rsp_trdid.read(); 6397 6484 p_vci_tgt.rpktid = r_write_to_tgt_rsp_pktid.read(); 6398 p_vci_tgt.rerror = 0x2 & ( (1 << vci_param::E) - 1); 6485 //p_vci_tgt.rerror = 0x2 & ( (1 << vci_param::E) - 1); 6486 p_vci_tgt.rerror = 0; 6399 6487 p_vci_tgt.reop = true; 6400 6488 break; … … 6419 6507 case TGT_RSP_XRAM: 6420 6508 p_vci_tgt.rspval = true; 6421 p_vci_tgt.rdata = r_xram_rsp_to_tgt_rsp_data[r_tgt_rsp_cpt.read()].read(); 6509 if( ((r_xram_rsp_to_tgt_rsp_pktid.read() & 0x7) == TYPE_LL) 6510 && (r_tgt_rsp_cpt.read() == (r_xram_rsp_to_tgt_rsp_word.read()+r_xram_rsp_to_tgt_rsp_length-1)) ) 6511 p_vci_tgt.rdata = r_xram_rsp_to_tgt_rsp_ll_key.read(); 6512 else 6513 p_vci_tgt.rdata = r_xram_rsp_to_tgt_rsp_data[r_tgt_rsp_cpt.read()].read(); 6422 6514 p_vci_tgt.rsrcid = r_xram_rsp_to_tgt_rsp_srcid.read(); 6423 6515 p_vci_tgt.rtrdid = r_xram_rsp_to_tgt_rsp_trdid.read(); … … 6430 6522 case TGT_RSP_INIT: 6431 6523 p_vci_tgt.rspval = true; 6432 p_vci_tgt.rdata = 0; 6524 p_vci_tgt.rdata = 0; // Can be a CAS or SC rsp 6433 6525 p_vci_tgt.rsrcid = r_init_rsp_to_tgt_rsp_srcid.read(); 6434 6526 p_vci_tgt.rtrdid = r_init_rsp_to_tgt_rsp_trdid.read(); 6435 6527 p_vci_tgt.rpktid = r_init_rsp_to_tgt_rsp_pktid.read(); 6436 p_vci_tgt.rerror = 0; // Can be a CAS rsp6528 p_vci_tgt.rerror = 0; 6437 6529 p_vci_tgt.reop = true; 6438 6530 break;
Note: See TracChangeset
for help on using the changeset viewer.