Changeset 489 for trunk/modules/vci_mem_cache
- Timestamp:
- Aug 9, 2013, 11:00:05 AM (11 years ago)
- Location:
- trunk/modules/vci_mem_cache
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/modules/vci_mem_cache/caba/source/include/mem_cache_directory.h
r449 r489 6 6 #include <cassert> 7 7 #include "arithmetics.h" 8 9 // !!!10 // The L1_MULTI_CACHE mechanism does no longer work with the new pktid encoding11 // of TSAR. Turning the define below to a non null value will cause the memcache12 // to behave in an unpredicted way.13 // TODO Either remove the mechanism from the mem cache or update its behaviour.14 15 #define L1_MULTI_CACHE 016 8 17 9 //#define RANDOM_EVICTION … … 46 38 bool inst; // Is the owner an ICache ? 47 39 size_t srcid; // The SRCID of the owner 48 #if L1_MULTI_CACHE49 size_t cache_id; // In multi_cache configuration50 #endif51 40 52 41 //////////////////////// 53 42 // Constructors 54 43 //////////////////////// 55 Owner(bool i_inst 56 ,size_t i_srcid 57 #if L1_MULTI_CACHE 58 ,size_t i_cache_id 59 #endif 60 ){ 44 Owner(bool i_inst, 45 size_t i_srcid) 46 { 61 47 inst = i_inst; 62 48 srcid = i_srcid; 63 #if L1_MULTI_CACHE 64 cache_id= i_cache_id; 65 #endif 66 } 67 68 Owner(const Owner &a){ 49 } 50 51 Owner(const Owner &a) 52 { 69 53 inst = a.inst; 70 54 srcid = a.srcid; 71 #if L1_MULTI_CACHE 72 cache_id= a.cache_id; 73 #endif 74 } 75 76 Owner(){ 55 } 56 57 Owner() 58 { 77 59 inst = false; 78 60 srcid = 0; 79 #if L1_MULTI_CACHE80 cache_id= 0;81 #endif82 61 } 83 62 // end constructors … … 114 93 owner.inst = 0; 115 94 owner.srcid = 0; 116 #if L1_MULTI_CACHE117 owner.cache_id= 0;118 #endif119 95 ptr = 0; 120 96 } … … 171 147 << " ; Count = " << count 172 148 << " ; Owner = " << owner.srcid 173 #if L1_MULTI_CACHE174 << "." << owner.cache_id175 #endif176 149 << " " << owner.inst 177 150 << " ; Pointer = " << ptr << std::endl; … … 322 295 // - entry : the entry value 323 296 ///////////////////////////////////////////////////////////////////// 324 void write(const size_t &set, const size_t &way, const DirectoryEntry &entry) 297 void write( const size_t &set, 298 const size_t &way, 299 const DirectoryEntry &entry) 325 300 { 326 301 assert( (set<m_sets) … … 368 343 DirectoryEntry select(const size_t &set, size_t &way) 369 344 { 370 assert( (set < m_sets)345 assert( (set < m_sets) 371 346 && "Cache Directory : (select) The set index is invalid"); 372 347 373 for(size_t i=0; i<m_ways; i++){ 374 if(!m_dir_tab[set][i].valid){ 375 way=i; 376 return DirectoryEntry(m_dir_tab[set][way]); 348 // looking for an empty slot 349 for(size_t i=0; i<m_ways; i++) 350 { 351 if( not m_dir_tab[set][i].valid ) 352 { 353 way=i; 354 return DirectoryEntry(m_dir_tab[set][way]); 355 } 377 356 } 378 }379 357 380 358 #ifdef RANDOM_EVICTION 381 lfsr = (lfsr >> 1) ^ ((-(lfsr & 1)) & 0xd0000001);382 way = lfsr % m_ways;383 return DirectoryEntry(m_dir_tab[set][way]);359 lfsr = (lfsr >> 1) ^ ((-(lfsr & 1)) & 0xd0000001); 360 way = lfsr % m_ways; 361 return DirectoryEntry(m_dir_tab[set][way]); 384 362 #endif 385 363 386 for(size_t i=0; i<m_ways; i++){ 387 if(!(m_lru_tab[set][i].recent) && !(m_dir_tab[set][i].lock)){ 388 way=i; 389 return DirectoryEntry(m_dir_tab[set][way]); 364 // looking for a not locked and not recently used entry 365 for(size_t i=0; i<m_ways; i++) 366 { 367 if((not m_lru_tab[set][i].recent) && (not m_dir_tab[set][i].lock) ) 368 { 369 way=i; 370 return DirectoryEntry(m_dir_tab[set][way]); 371 } 390 372 } 391 } 392 for(size_t i=0; i<m_ways; i++){ 393 if( !(m_lru_tab[set][i].recent) && (m_dir_tab[set][i].lock)){ 394 way=i; 395 return DirectoryEntry(m_dir_tab[set][way]); 373 374 // looking for a locked not recently used entry 375 for(size_t i=0; i<m_ways; i++) 376 { 377 if( (not m_lru_tab[set][i].recent) && (m_dir_tab[set][i].lock)) 378 { 379 way=i; 380 return DirectoryEntry(m_dir_tab[set][way]); 381 } 396 382 } 397 } 398 for(size_t i=0; i<m_ways; i++){ 399 if( (m_lru_tab[set][i].recent) && !(m_dir_tab[set][i].lock)){ 400 way=i; 401 return DirectoryEntry(m_dir_tab[set][way]); 383 384 // looking for a recently used entry not locked 385 for(size_t i=0; i<m_ways; i++) 386 { 387 if( (m_lru_tab[set][i].recent) && (not m_dir_tab[set][i].lock)) 388 { 389 way=i; 390 return DirectoryEntry(m_dir_tab[set][way]); 391 } 402 392 } 403 } 404 way = 0; 405 return DirectoryEntry(m_dir_tab[set][0]); 393 394 // select way 0 (even if entry is locked and recently used) 395 way = 0; 396 return DirectoryEntry(m_dir_tab[set][0]); 406 397 } // end select() 407 398 … … 437 428 //////////////////////// 438 429 HeapEntry() 439 :owner(false,0 440 #if L1_MULTI_CACHE 441 ,0 442 #endif 443 ) 430 :owner(false,0) 444 431 { 445 432 next = 0; … … 449 436 // Constructor 450 437 //////////////////////// 451 HeapEntry(const HeapEntry &entry){ 438 HeapEntry(const HeapEntry &entry) 439 { 452 440 owner.inst = entry.owner.inst; 453 441 owner.srcid = entry.owner.srcid; 454 #if L1_MULTI_CACHE455 owner.cache_id = entry.owner.cache_id;456 #endif457 442 next = entry.next; 458 443 } // end constructor … … 461 446 // The copy() function copies an existing source entry to a target 462 447 ///////////////////////////////////////////////////////////////////// 463 void copy(const HeapEntry &entry){ 448 void copy(const HeapEntry &entry) 449 { 464 450 owner.inst = entry.owner.inst; 465 451 owner.srcid = entry.owner.srcid; 466 #if L1_MULTI_CACHE467 owner.cache_id = entry.owner.cache_id;468 #endif469 452 next = entry.next; 470 453 } // end copy() … … 477 460 << " -- owner.inst : " << std::dec << owner.inst << std::endl 478 461 << " -- owner.srcid : " << std::dec << owner.srcid << std::endl 479 #if L1_MULTI_CACHE480 << " -- owner.cache_id : " << std::dec << owner.cache_id << std::endl481 #endif482 462 << " -- next : " << std::dec << next << std::endl; 483 463 … … 640 620 // Cache Data 641 621 //////////////////////////////////////////////////////////////////////// 642 class CacheData { 622 class CacheData 623 { 643 624 private: 644 625 const uint32_t m_sets; … … 650 631 public: 651 632 633 /////////////////////////////////////////////////////// 652 634 CacheData(uint32_t ways, uint32_t sets, uint32_t words) 653 : m_sets(sets), m_ways(ways), m_words(words) {654 635 : m_sets(sets), m_ways(ways), m_words(words) 636 { 655 637 m_cache_data = new uint32_t ** [ways]; 656 for ( size_t i=0 ; i < ways ; i++ ) { 657 m_cache_data[i] = new uint32_t * [sets]; 638 for ( size_t i=0 ; i < ways ; i++ ) 639 { 640 m_cache_data[i] = new uint32_t * [sets]; 658 641 } 659 for ( size_t i=0; i<ways; i++ ) { 660 for ( size_t j=0; j<sets; j++ ) { 661 m_cache_data[i][j] = new uint32_t [words]; 662 } 642 for ( size_t i=0; i<ways; i++ ) 643 { 644 for ( size_t j=0; j<sets; j++ ) 645 { 646 m_cache_data[i][j] = new uint32_t [words]; 647 } 663 648 } 664 } 665 666 ~CacheData() { 667 for(size_t i=0; i<m_ways ; i++){ 668 for(size_t j=0; j<m_sets ; j++){ 649 } 650 //////////// 651 ~CacheData() 652 { 653 for(size_t i=0; i<m_ways ; i++) 654 { 655 for(size_t j=0; j<m_sets ; j++) 656 { 669 657 delete [] m_cache_data[i][j]; 670 658 } 671 659 } 672 for(size_t i=0; i<m_ways ; i++){ 660 for(size_t i=0; i<m_ways ; i++) 661 { 673 662 delete [] m_cache_data[i]; 674 663 } 675 664 delete [] m_cache_data; 676 665 } 677 678 uint32_t read ( 679 const uint32_t &way, 680 const uint32_t &set, 681 const uint32_t &word) const { 682 683 assert((set < m_sets ) && "Cache data error: Trying to read a wrong set" ); 684 assert((way < m_ways ) && "Cache data error: Trying to read a wrong way" ); 685 assert((word < m_words) && "Cache data error: Trying to read a wrong word"); 686 687 return m_cache_data[way][set][word]; 688 } 689 690 void read_line( 691 const uint32_t &way, 692 const uint32_t &set, 693 sc_core::sc_signal<uint32_t> * cache_line) 694 { 695 assert((set < m_sets ) && "Cache data error: Trying to read a wrong set" ); 696 assert((way < m_ways ) && "Cache data error: Trying to read a wrong way" ); 697 698 for (uint32_t word=0; word<m_words; word++) 699 cache_line[word].write(m_cache_data[way][set][word]); 700 } 701 702 void write ( 703 const uint32_t &way, 704 const uint32_t &set, 705 const uint32_t &word, 706 const uint32_t &data, 707 const uint32_t &be = 0xF) { 708 709 assert((set < m_sets ) && "Cache data error: Trying to write a wrong set" ); 710 assert((way < m_ways ) && "Cache data error: Trying to write a wrong way" ); 711 assert((word < m_words) && "Cache data error: Trying to write a wrong word"); 712 assert((be <= 0xF ) && "Cache data error: Trying to write a wrong word cell"); 713 714 if (be == 0x0) return; 715 716 if (be == 0xF) { 717 m_cache_data[way][set][word] = data; 718 return; 719 } 720 721 uint32_t mask = 0; 722 if (be & 0x1) mask = mask | 0x000000FF; 723 if (be & 0x2) mask = mask | 0x0000FF00; 724 if (be & 0x4) mask = mask | 0x00FF0000; 725 if (be & 0x8) mask = mask | 0xFF000000; 726 727 m_cache_data[way][set][word] = 728 (data & mask) | (m_cache_data[way][set][word] & ~mask); 666 ////////////////////////////////////////// 667 uint32_t read ( const uint32_t &way, 668 const uint32_t &set, 669 const uint32_t &word) const 670 { 671 assert((set < m_sets ) && "Cache data error: Trying to read a wrong set" ); 672 assert((way < m_ways ) && "Cache data error: Trying to read a wrong way" ); 673 assert((word < m_words) && "Cache data error: Trying to read a wrong word"); 674 675 return m_cache_data[way][set][word]; 676 } 677 ////////////////////////////////////////// 678 void read_line( const uint32_t &way, 679 const uint32_t &set, 680 sc_core::sc_signal<uint32_t> * cache_line) 681 { 682 assert((set < m_sets ) && "Cache data error: Trying to read a wrong set" ); 683 assert((way < m_ways ) && "Cache data error: Trying to read a wrong way" ); 684 685 for (uint32_t word=0; word<m_words; word++) 686 cache_line[word].write(m_cache_data[way][set][word]); 687 } 688 ///////////////////////////////////////// 689 void write ( const uint32_t &way, 690 const uint32_t &set, 691 const uint32_t &word, 692 const uint32_t &data, 693 const uint32_t &be = 0xF) 694 { 695 696 assert((set < m_sets ) && "Cache data error: Trying to write a wrong set" ); 697 assert((way < m_ways ) && "Cache data error: Trying to write a wrong way" ); 698 assert((word < m_words) && "Cache data error: Trying to write a wrong word"); 699 assert((be <= 0xF ) && "Cache data error: Trying to write a wrong be"); 700 701 if (be == 0x0) return; 702 703 if (be == 0xF) 704 { 705 m_cache_data[way][set][word] = data; 706 return; 707 } 708 709 uint32_t mask = 0; 710 if (be & 0x1) mask = mask | 0x000000FF; 711 if (be & 0x2) mask = mask | 0x0000FF00; 712 if (be & 0x4) mask = mask | 0x00FF0000; 713 if (be & 0x8) mask = mask | 0xFF000000; 714 715 m_cache_data[way][set][word] = 716 (data & mask) | (m_cache_data[way][set][word] & ~mask); 729 717 } 730 718 }; // end class CacheData -
trunk/modules/vci_mem_cache/caba/source/include/vci_mem_cache.h
r483 r489 25 25 * SOCLIB_LGPL_HEADER_END 26 26 * 27 * Maintainers: alain eric.guthmuller@polytechnique.edu 27 * Maintainers: alain.greiner@lip6.fr 28 * eric.guthmuller@polytechnique.edu 28 29 * cesar.fuguet-tortolero@lip6.fr 29 30 * alexandre.joannou@lip6.fr … … 150 151 MULTI_ACK_UPT_LOCK, 151 152 MULTI_ACK_UPT_CLEAR, 152 MULTI_ACK_WRITE_RSP, 153 MULTI_ACK_CONFIG_ACK 153 MULTI_ACK_WRITE_RSP 154 154 }; 155 155 … … 159 159 CONFIG_IDLE, 160 160 CONFIG_LOOP, 161 CONFIG_WAIT, 161 162 CONFIG_RSP, 162 163 CONFIG_DIR_REQ, 163 164 CONFIG_DIR_ACCESS, 164 CONFIG_ DIR_IVT_LOCK,165 CONFIG_IVT_LOCK, 165 166 CONFIG_BC_SEND, 166 CONFIG_BC_WAIT, 167 CONFIG_INV_SEND, 167 CONFIG_INVAL_SEND, 168 168 CONFIG_HEAP_REQ, 169 169 CONFIG_HEAP_SCAN, 170 170 CONFIG_HEAP_LAST, 171 CONFIG_INV_WAIT 171 CONFIG_TRT_LOCK, 172 CONFIG_TRT_SET, 173 CONFIG_PUT_REQ 172 174 }; 173 175 … … 197 199 WRITE_DIR_REQ, 198 200 WRITE_DIR_LOCK, 199 WRITE_DIR_READ,200 201 WRITE_DIR_HIT, 201 202 WRITE_UPT_LOCK, … … 209 210 WRITE_MISS_TRT_SET, 210 211 WRITE_MISS_XRAM_REQ, 212 WRITE_BC_DIR_READ, 211 213 WRITE_BC_TRT_LOCK, 212 214 WRITE_BC_IVT_LOCK, … … 221 223 { 222 224 IXR_RSP_IDLE, 223 IXR_RSP_ACK,224 225 IXR_RSP_TRT_ERASE, 225 226 IXR_RSP_TRT_READ … … 235 236 XRAM_RSP_DIR_UPDT, 236 237 XRAM_RSP_DIR_RSP, 237 XRAM_RSP_I NVAL_LOCK,238 XRAM_RSP_IVT_LOCK, 238 239 XRAM_RSP_INVAL_WAIT, 239 240 XRAM_RSP_INVAL, … … 253 254 IXR_CMD_CAS_IDLE, 254 255 IXR_CMD_XRAM_IDLE, 255 IXR_CMD_READ, 256 IXR_CMD_WRITE, 257 IXR_CMD_CAS, 258 IXR_CMD_XRAM 256 IXR_CMD_CONFIG_IDLE, 257 IXR_CMD_READ_TRT, 258 IXR_CMD_WRITE_TRT, 259 IXR_CMD_CAS_TRT, 260 IXR_CMD_XRAM_TRT, 261 IXR_CMD_CONFIG_TRT, 262 IXR_CMD_READ_SEND, 263 IXR_CMD_WRITE_SEND, 264 IXR_CMD_CAS_SEND, 265 IXR_CMD_XRAM_SEND, 266 IXR_CMD_CONFIG_SEND 259 267 }; 260 268 … … 302 310 CLEANUP_IVT_CLEAR, 303 311 CLEANUP_WRITE_RSP, 304 CLEANUP_CONFIG_ACK,305 312 CLEANUP_SEND_CLACK 306 313 }; … … 325 332 ALLOC_TRT_CAS, 326 333 ALLOC_TRT_XRAM_RSP, 327 ALLOC_TRT_IXR_RSP 334 ALLOC_TRT_IXR_RSP, 335 ALLOC_TRT_CONFIG, 336 ALLOC_TRT_IXR_CMD 328 337 }; 329 338 … … 386 395 }; 387 396 388 /* Configuration commands */ 389 enum cmd_config_type_e 390 { 391 CMD_CONFIG_INVAL = 0, 392 CMD_CONFIG_SYNC = 1 393 }; 394 395 // debug variables (for each FSM) 397 // debug variables 396 398 bool m_debug; 397 399 bool m_debug_previous_valid; 398 400 size_t m_debug_previous_count; 399 401 bool m_debug_previous_dirty; 400 sc_signal<data_t>* m_debug_previous_data; 401 sc_signal<data_t>* m_debug_data; 402 403 bool m_monitor_ok; 404 addr_t m_monitor_base; 405 addr_t m_monitor_length; 402 data_t * m_debug_previous_data; 403 data_t * m_debug_data; 406 404 407 405 // instrumentation counters … … 531 529 uint32_t m_broadcast_boundaries; 532 530 533 //////////////////////////////////////////////////534 // Registers controlled by the TGT_CMD fsm535 //////////////////////////////////////////////////536 537 sc_signal<int> r_tgt_cmd_fsm;538 539 531 // Fifo between TGT_CMD fsm and READ fsm 540 532 GenericFifo<addr_t> m_cmd_read_addr_fifo; … … 580 572 sc_signal<size_t> r_tgt_cmd_config_cmd; 581 573 574 ////////////////////////////////////////////////// 575 // Registers controlled by the TGT_CMD fsm 576 ////////////////////////////////////////////////// 577 578 sc_signal<int> r_tgt_cmd_fsm; 579 sc_signal<size_t> r_tgt_cmd_srcid; // srcid for response to config 580 sc_signal<size_t> r_tgt_cmd_trdid; // trdid for response to config 581 sc_signal<size_t> r_tgt_cmd_pktid; // pktid for response to config 582 582 583 /////////////////////////////////////////////////////// 583 584 // Registers controlled by the CONFIG fsm 584 585 /////////////////////////////////////////////////////// 585 586 586 sc_signal<int> r_config_fsm; // FSM state 587 sc_signal<bool> r_config_lock; // lock protecting exclusive access 588 sc_signal<int> r_config_cmd; // config request status 589 sc_signal<addr_t> r_config_address; // target buffer physical address 590 sc_signal<size_t> r_config_srcid; // config request srcid 591 sc_signal<size_t> r_config_trdid; // config request trdid 592 sc_signal<size_t> r_config_pktid; // config request pktid 593 sc_signal<size_t> r_config_nlines; // number of lines covering the buffer 594 sc_signal<size_t> r_config_dir_way; // DIR: selected way 595 sc_signal<size_t> r_config_dir_count; // DIR: number of copies 596 sc_signal<bool> r_config_dir_is_cnt; // DIR: counter mode (broadcast required) 597 sc_signal<size_t> r_config_dir_copy_srcid; // DIR: first copy SRCID 598 sc_signal<bool> r_config_dir_copy_inst; // DIR: first copy L1 type 599 sc_signal<size_t> r_config_dir_next_ptr; // DIR: index of next copy in HEAP 600 sc_signal<size_t> r_config_heap_next; // current pointer to scan HEAP 601 602 sc_signal<size_t> r_config_ivt_index; // IVT index 587 sc_signal<int> r_config_fsm; // FSM state 588 sc_signal<bool> r_config_lock; // lock protecting exclusive access 589 sc_signal<int> r_config_cmd; // config request type 590 sc_signal<addr_t> r_config_address; // target buffer physical address 591 sc_signal<size_t> r_config_srcid; // config request srcid 592 sc_signal<size_t> r_config_trdid; // config request trdid 593 sc_signal<size_t> r_config_pktid; // config request pktid 594 sc_signal<size_t> r_config_cmd_lines; // number of lines to be handled 595 sc_signal<size_t> r_config_rsp_lines; // number of lines not completed 596 sc_signal<size_t> r_config_dir_way; // DIR: selected way 597 sc_signal<bool> r_config_dir_lock; // DIR: locked entry 598 sc_signal<size_t> r_config_dir_count; // DIR: number of copies 599 sc_signal<bool> r_config_dir_is_cnt; // DIR: counter mode (broadcast) 600 sc_signal<size_t> r_config_dir_copy_srcid; // DIR: first copy SRCID 601 sc_signal<bool> r_config_dir_copy_inst; // DIR: first copy L1 type 602 sc_signal<size_t> r_config_dir_ptr; // DIR: index of next copy in HEAP 603 sc_signal<size_t> r_config_heap_next; // current pointer to scan HEAP 604 sc_signal<size_t> r_config_trt_index; // selected entry in TRT 605 sc_signal<size_t> r_config_ivt_index; // selected entry in IVT 606 607 // Buffer between CONFIG fsm and IXR_CMD fsm 608 sc_signal<bool> r_config_to_ixr_cmd_req; // valid request 609 sc_signal<size_t> r_config_to_ixr_cmd_index; // TRT index 610 603 611 604 612 // Buffer between CONFIG fsm and TGT_RSP fsm (send a done response to L1 cache) … … 617 625 GenericFifo<size_t> m_config_to_cc_send_srcid_fifo; // fifo for owners srcid 618 626 619 #if L1_MULTI_CACHE620 GenericFifo<size_t> m_config_to_cc_send_cache_id_fifo; // fifo for cache_id621 #endif622 623 627 /////////////////////////////////////////////////////// 624 628 // Registers controlled by the READ fsm 625 629 /////////////////////////////////////////////////////// 626 630 627 sc_signal<int> r_read_fsm; // FSM state 628 sc_signal<size_t> r_read_copy; // Srcid of the first copy 629 sc_signal<size_t> r_read_copy_cache; // Srcid of the first copy 630 sc_signal<bool> r_read_copy_inst; // Type of the first copy 631 sc_signal<tag_t> r_read_tag; // cache line tag (in directory) 632 sc_signal<bool> r_read_is_cnt; // is_cnt bit (in directory) 633 sc_signal<bool> r_read_lock; // lock bit (in directory) 634 sc_signal<bool> r_read_dirty; // dirty bit (in directory) 635 sc_signal<size_t> r_read_count; // number of copies 636 sc_signal<size_t> r_read_ptr; // pointer to the heap 637 sc_signal<data_t> * r_read_data; // data (one cache line) 638 sc_signal<size_t> r_read_way; // associative way (in cache) 639 sc_signal<size_t> r_read_trt_index; // Transaction Table index 640 sc_signal<size_t> r_read_next_ptr; // Next entry to point to 641 sc_signal<bool> r_read_last_free; // Last free entry 642 sc_signal<addr_t> r_read_ll_key; // LL key from the llsc_global_table 643 644 // Buffer between READ fsm and IXR_CMD fsm (ask a missing cache line to XRAM) 645 sc_signal<bool> r_read_to_ixr_cmd_req; // valid request 646 sc_signal<addr_t> r_read_to_ixr_cmd_nline; // cache line index 647 sc_signal<size_t> r_read_to_ixr_cmd_trdid; // index in Transaction Table 631 sc_signal<int> r_read_fsm; // FSM state 632 sc_signal<size_t> r_read_copy; // Srcid of the first copy 633 sc_signal<size_t> r_read_copy_cache; // Srcid of the first copy 634 sc_signal<bool> r_read_copy_inst; // Type of the first copy 635 sc_signal<tag_t> r_read_tag; // cache line tag (in directory) 636 sc_signal<bool> r_read_is_cnt; // is_cnt bit (in directory) 637 sc_signal<bool> r_read_lock; // lock bit (in directory) 638 sc_signal<bool> r_read_dirty; // dirty bit (in directory) 639 sc_signal<size_t> r_read_count; // number of copies 640 sc_signal<size_t> r_read_ptr; // pointer to the heap 641 sc_signal<data_t> * r_read_data; // data (one cache line) 642 sc_signal<size_t> r_read_way; // associative way (in cache) 643 sc_signal<size_t> r_read_trt_index; // Transaction Table index 644 sc_signal<size_t> r_read_next_ptr; // Next entry to point to 645 sc_signal<bool> r_read_last_free; // Last free entry 646 sc_signal<addr_t> r_read_ll_key; // LL key from llsc_global_table 647 648 // Buffer between READ fsm and IXR_CMD fsm 649 sc_signal<bool> r_read_to_ixr_cmd_req; // valid request 650 sc_signal<size_t> r_read_to_ixr_cmd_index; // TRT index 648 651 649 652 // Buffer between READ fsm and TGT_RSP fsm (send a hit read response to L1 cache) 650 sc_signal<bool> r_read_to_tgt_rsp_req; // valid request651 sc_signal<size_t> r_read_to_tgt_rsp_srcid; // Transaction srcid652 sc_signal<size_t> r_read_to_tgt_rsp_trdid; // Transaction trdid653 sc_signal<size_t> r_read_to_tgt_rsp_pktid; // Transaction pktid654 sc_signal<data_t> * r_read_to_tgt_rsp_data; // data (one cache line)655 sc_signal<size_t> r_read_to_tgt_rsp_word; // first word of the response656 sc_signal<size_t> r_read_to_tgt_rsp_length; // length of the response657 sc_signal<addr_t> r_read_to_tgt_rsp_ll_key; // LL key from thellsc_global_table653 sc_signal<bool> r_read_to_tgt_rsp_req; // valid request 654 sc_signal<size_t> r_read_to_tgt_rsp_srcid; // Transaction srcid 655 sc_signal<size_t> r_read_to_tgt_rsp_trdid; // Transaction trdid 656 sc_signal<size_t> r_read_to_tgt_rsp_pktid; // Transaction pktid 657 sc_signal<data_t> * r_read_to_tgt_rsp_data; // data (one cache line) 658 sc_signal<size_t> r_read_to_tgt_rsp_word; // first word of the response 659 sc_signal<size_t> r_read_to_tgt_rsp_length; // length of the response 660 sc_signal<addr_t> r_read_to_tgt_rsp_ll_key; // LL key from llsc_global_table 658 661 659 662 /////////////////////////////////////////////////////////////// … … 661 664 /////////////////////////////////////////////////////////////// 662 665 663 sc_signal<int> r_write_fsm; // FSM state664 sc_signal<addr_t> r_write_address; // first word address665 sc_signal<size_t> r_write_word_index; // first word index in line666 sc_signal<size_t> r_write_word_count; // number of words in line667 sc_signal<size_t> r_write_srcid; // transaction srcid668 sc_signal<size_t> r_write_trdid; // transaction trdid669 sc_signal<size_t> r_write_pktid; // transaction pktid670 sc_signal<data_t> * r_write_data; // data (one cache line)671 sc_signal<be_t> * r_write_be; // one byte enable per word672 sc_signal<bool> r_write_byte; // (BE != 0X0) and (BE != 0xF)673 sc_signal<bool> r_write_is_cnt; // is_cnt bit (in directory)674 sc_signal<bool> r_write_lock; // lock bit (in directory)675 sc_signal<tag_t> r_write_tag; // cache line tag (in directory)676 sc_signal<size_t> r_write_copy; // first owner of the line677 sc_signal<size_t> r_write_copy_cache; // first owner of the line678 sc_signal<bool> r_write_copy_inst; // is this owner a ICache ?679 sc_signal<size_t> r_write_count; // number of copies680 sc_signal<size_t> r_write_ptr; // pointer to the heap681 sc_signal<size_t> r_write_next_ptr; // next pointer to the heap682 sc_signal<bool> r_write_to_dec; // need to decrement update counter683 sc_signal<size_t> r_write_way; // way of the line684 sc_signal<size_t> r_write_trt_index; // index in Transaction Table685 sc_signal<size_t> r_write_upt_index; // index in Update Table686 sc_signal<bool> r_write_sc_fail; // sc command failed687 sc_signal<bool> r_write_pending_sc; // sc command pending666 sc_signal<int> r_write_fsm; // FSM state 667 sc_signal<addr_t> r_write_address; // first word address 668 sc_signal<size_t> r_write_word_index; // first word index in line 669 sc_signal<size_t> r_write_word_count; // number of words in line 670 sc_signal<size_t> r_write_srcid; // transaction srcid 671 sc_signal<size_t> r_write_trdid; // transaction trdid 672 sc_signal<size_t> r_write_pktid; // transaction pktid 673 sc_signal<data_t> * r_write_data; // data (one cache line) 674 sc_signal<be_t> * r_write_be; // one byte enable per word 675 sc_signal<bool> r_write_byte; // (BE != 0X0) and (BE != 0xF) 676 sc_signal<bool> r_write_is_cnt; // is_cnt bit (in directory) 677 sc_signal<bool> r_write_lock; // lock bit (in directory) 678 sc_signal<tag_t> r_write_tag; // cache line tag (in directory) 679 sc_signal<size_t> r_write_copy; // first owner of the line 680 sc_signal<size_t> r_write_copy_cache; // first owner of the line 681 sc_signal<bool> r_write_copy_inst; // is this owner a ICache ? 682 sc_signal<size_t> r_write_count; // number of copies 683 sc_signal<size_t> r_write_ptr; // pointer to the heap 684 sc_signal<size_t> r_write_next_ptr; // next pointer to the heap 685 sc_signal<bool> r_write_to_dec; // need to decrement update counter 686 sc_signal<size_t> r_write_way; // way of the line 687 sc_signal<size_t> r_write_trt_index; // index in Transaction Table 688 sc_signal<size_t> r_write_upt_index; // index in Update Table 689 sc_signal<bool> r_write_sc_fail; // sc command failed 690 sc_signal<bool> r_write_pending_sc; // sc command pending 688 691 689 692 // Buffer between WRITE fsm and TGT_RSP fsm (acknowledge a write command from L1) … … 694 697 sc_signal<bool> r_write_to_tgt_rsp_sc_fail; // sc command failed 695 698 696 // Buffer between WRITE fsm and IXR_CMD fsm (ask a missing cache line to XRAM) 697 sc_signal<bool> r_write_to_ixr_cmd_req; // valid request 698 sc_signal<bool> r_write_to_ixr_cmd_write; // write request 699 sc_signal<addr_t> r_write_to_ixr_cmd_nline; // cache line index 700 sc_signal<data_t> * r_write_to_ixr_cmd_data; // cache line data 701 sc_signal<size_t> r_write_to_ixr_cmd_trdid; // index in Transaction Table 699 // Buffer between WRITE fsm and IXR_CMD fsm 700 sc_signal<bool> r_write_to_ixr_cmd_req; // valid request 701 sc_signal<bool> r_write_to_ixr_cmd_put; // request type (GET/PUT) 702 sc_signal<size_t> r_write_to_ixr_cmd_index; // TRT index 702 703 703 704 // Buffer between WRITE fsm and CC_SEND fsm (Update/Invalidate L1 caches) … … 713 714 GenericFifo<size_t> m_write_to_cc_send_srcid_fifo; // fifo for srcids 714 715 715 #if L1_MULTI_CACHE716 GenericFifo<size_t> m_write_to_cc_send_cache_id_fifo; // fifo for srcids717 #endif718 719 716 // Buffer between WRITE fsm and MULTI_ACK fsm (Decrement UPT entry) 720 717 sc_signal<bool> r_write_to_multi_ack_req; // valid request … … 732 729 sc_signal<addr_t> r_multi_ack_nline; // pending write nline 733 730 734 // signaling completion of multi-inval to CONFIG fsm735 sc_signal<bool> r_multi_ack_to_config_ack;736 737 731 // Buffer between MULTI_ACK fsm and TGT_RSP fsm (complete write/update transaction) 738 732 sc_signal<bool> r_multi_ack_to_tgt_rsp_req; // valid request … … 751 745 sc_signal<addr_t> r_cleanup_nline; // cache line index 752 746 753 #if L1_MULTI_CACHE754 sc_signal<size_t> r_cleanup_pktid; // transaction pktid755 #endif756 747 757 748 sc_signal<copy_t> r_cleanup_copy; // first copy … … 780 771 sc_signal<size_t> r_cleanup_index; // index of the INVAL line (in the UPT) 781 772 782 // signaling completion of broadcast-inval to CONFIG fsm783 sc_signal<bool> r_cleanup_to_config_ack;784 785 773 // Buffer between CLEANUP fsm and TGT_RSP fsm (acknowledge a write command from L1) 786 774 sc_signal<bool> r_cleanup_to_tgt_rsp_req; // valid request … … 793 781 /////////////////////////////////////////////////////// 794 782 795 sc_signal<int> r_cas_fsm; // FSM state796 sc_signal<data_t> r_cas_wdata; // write data word797 sc_signal<data_t> * r_cas_rdata; // read data word798 sc_signal<uint32_t> r_cas_lfsr; // lfsr for random introducing799 sc_signal<size_t> r_cas_cpt; // size of command800 sc_signal<copy_t> r_cas_copy; // Srcid of the first copy801 sc_signal<copy_t> r_cas_copy_cache; // Srcid of the first copy802 sc_signal<bool> r_cas_copy_inst; // Type of the first copy803 sc_signal<size_t> r_cas_count; // number of copies804 sc_signal<size_t> r_cas_ptr; // pointer to the heap805 sc_signal<size_t> r_cas_next_ptr; // next pointer to the heap806 sc_signal<bool> r_cas_is_cnt; // is_cnt bit (in directory)807 sc_signal<bool> r_cas_dirty; // dirty bit (in directory)808 sc_signal<size_t> r_cas_way; // way in directory809 sc_signal<size_t> r_cas_set; // set in directory810 sc_signal<data_t> r_cas_tag; // cache line tag (in directory)811 sc_signal<size_t> r_cas_trt_index; // Transaction Table index812 sc_signal<size_t> r_cas_upt_index; // Update Table index813 sc_signal<data_t> * r_cas_data; // cache line data814 815 // Buffer between CAS fsm and IXR_CMD fsm (XRAM write)783 sc_signal<int> r_cas_fsm; // FSM state 784 sc_signal<data_t> r_cas_wdata; // write data word 785 sc_signal<data_t> * r_cas_rdata; // read data word 786 sc_signal<uint32_t> r_cas_lfsr; // lfsr for random introducing 787 sc_signal<size_t> r_cas_cpt; // size of command 788 sc_signal<copy_t> r_cas_copy; // Srcid of the first copy 789 sc_signal<copy_t> r_cas_copy_cache; // Srcid of the first copy 790 sc_signal<bool> r_cas_copy_inst; // Type of the first copy 791 sc_signal<size_t> r_cas_count; // number of copies 792 sc_signal<size_t> r_cas_ptr; // pointer to the heap 793 sc_signal<size_t> r_cas_next_ptr; // next pointer to the heap 794 sc_signal<bool> r_cas_is_cnt; // is_cnt bit (in directory) 795 sc_signal<bool> r_cas_dirty; // dirty bit (in directory) 796 sc_signal<size_t> r_cas_way; // way in directory 797 sc_signal<size_t> r_cas_set; // set in directory 798 sc_signal<data_t> r_cas_tag; // cache line tag (in directory) 799 sc_signal<size_t> r_cas_trt_index; // Transaction Table index 800 sc_signal<size_t> r_cas_upt_index; // Update Table index 801 sc_signal<data_t> * r_cas_data; // cache line data 802 803 // Buffer between CAS fsm and IXR_CMD fsm 816 804 sc_signal<bool> r_cas_to_ixr_cmd_req; // valid request 817 sc_signal<addr_t> r_cas_to_ixr_cmd_nline; // cache line index 818 sc_signal<size_t> r_cas_to_ixr_cmd_trdid; // index in Transaction Table 819 sc_signal<bool> r_cas_to_ixr_cmd_write; // write request 820 sc_signal<data_t> * r_cas_to_ixr_cmd_data; // cache line data 821 805 sc_signal<bool> r_cas_to_ixr_cmd_put; // request type (GET/PUT) 806 sc_signal<size_t> r_cas_to_ixr_cmd_index; // TRT index 822 807 823 808 // Buffer between CAS fsm and TGT_RSP fsm … … 840 825 GenericFifo<size_t> m_cas_to_cc_send_srcid_fifo; // fifo for srcids 841 826 842 #if L1_MULTI_CACHE843 GenericFifo<size_t> m_cas_to_cc_send_cache_id_fifo; // fifo for srcids844 #endif845 846 827 //////////////////////////////////////////////////// 847 828 // Registers controlled by the IXR_RSP fsm 848 829 //////////////////////////////////////////////////// 849 830 850 sc_signal<int> r_ixr_rsp_fsm; // FSM state 851 sc_signal<size_t> r_ixr_rsp_trt_index; // TRT entry index 852 sc_signal<size_t> r_ixr_rsp_cpt; // word counter 831 sc_signal<int> r_ixr_rsp_fsm; // FSM state 832 sc_signal<size_t> r_ixr_rsp_trt_index; // TRT entry index 833 sc_signal<size_t> r_ixr_rsp_cpt; // word counter 834 835 // Buffer between IXR_RSP fsm and CONFIG fsm (response from the XRAM) 836 sc_signal<bool> r_ixr_rsp_to_config_ack; // one single bit 853 837 854 838 // Buffer between IXR_RSP fsm and XRAM_RSP fsm (response from the XRAM) 855 sc_signal<bool> * r_ixr_rsp_to_xram_rsp_rok; // A xram response is ready839 sc_signal<bool> * r_ixr_rsp_to_xram_rsp_rok; // one bit per TRT entry 856 840 857 841 //////////////////////////////////////////////////// … … 896 880 GenericFifo<size_t> m_xram_rsp_to_cc_send_srcid_fifo; // fifo for srcids 897 881 898 #if L1_MULTI_CACHE 899 GenericFifo<size_t> m_xram_rsp_to_cc_send_cache_id_fifo; // fifo for srcids 900 #endif 901 902 // Buffer between XRAM_RSP fsm and IXR_CMD fsm (XRAM write) 882 // Buffer between XRAM_RSP fsm and IXR_CMD fsm 903 883 sc_signal<bool> r_xram_rsp_to_ixr_cmd_req; // Valid request 904 sc_signal<addr_t> r_xram_rsp_to_ixr_cmd_nline; // cache line index 905 sc_signal<data_t> * r_xram_rsp_to_ixr_cmd_data; // cache line data 906 sc_signal<size_t> r_xram_rsp_to_ixr_cmd_trdid; // index in transaction table 884 sc_signal<size_t> r_xram_rsp_to_ixr_cmd_index; // TRT index 907 885 908 886 //////////////////////////////////////////////////// … … 911 889 912 890 sc_signal<int> r_ixr_cmd_fsm; 913 sc_signal<size_t> r_ixr_cmd_cpt; 891 sc_signal<size_t> r_ixr_cmd_word; // word index for a put 892 sc_signal<size_t> r_ixr_cmd_trdid; // TRT index value 893 sc_signal<addr_t> r_ixr_cmd_address; // address to XRAM 894 sc_signal<data_t> * r_ixr_cmd_wdata; // cache line buffer 895 sc_signal<bool> r_ixr_cmd_get; // transaction type (PUT/GET) 914 896 915 897 //////////////////////////////////////////////////// -
trunk/modules/vci_mem_cache/caba/source/include/xram_transaction.h
r422 r489 34 34 bool rerror; // error returned by xram 35 35 data_t ll_key; // LL key returned by the llsc_global_table 36 bool config; // transaction required by CONFIG FSM 36 37 37 38 ///////////////////////////////////////////////////////////////////// … … 42 43 valid = false; 43 44 rerror = false; 45 config = false; 44 46 } 45 47 … … 80 82 rerror = source.rerror; 81 83 ll_key = source.ll_key; 84 config = source.config; 82 85 } 83 86 … … 87 90 void print() 88 91 { 92 std::cout << "------- TRT entry -------" << std::endl; 89 93 std::cout << "valid = " << valid << std::endl; 90 94 std::cout << "xram_read = " << xram_read << std::endl; … … 96 100 std::cout << "read_length = " << read_length << std::endl; 97 101 std::cout << "word_index = " << word_index << std::endl; 98 for(size_t i=0; i<wdata_be.size() ; i++){ 99 std::cout << "wdata_be [" << i <<"] = " << wdata_be[i] << std::endl; 100 } 101 for(size_t i=0; i<wdata.size() ; i++){ 102 std::cout << "wdata [" << i <<"] = " << wdata[i] << std::endl; 103 } 102 for(size_t i=0; i<wdata_be.size() ; i++) 103 { 104 std::cout << "wdata_be[" << std::dec << i << "] = " 105 << std::hex << wdata_be[i] << std::endl; 106 } 107 for(size_t i=0; i<wdata.size() ; i++) 108 { 109 std::cout << "wdata[" << std::dec << i << "] = " 110 << std::hex << wdata[i] << std::endl; 111 } 112 std::cout << "rerror = " << rerror << std::endl; 113 std::cout << "ll_key = " << ll_key << std::endl; 114 std::cout << "config = " << config << std::endl; 104 115 std::cout << std::endl; 105 std::cout << "rerror = " << rerror << std::endl;106 116 } 107 117 … … 114 124 wdata_be.clear(); 115 125 wdata.clear(); 116 valid=false; 117 rerror=false; 118 } 119 120 TransactionTabEntry(const TransactionTabEntry &source){ 126 valid = false; 127 rerror = false; 128 config = false; 129 } 130 131 TransactionTabEntry(const TransactionTabEntry &source) 132 { 121 133 valid = source.valid; 122 134 xram_read = source.xram_read; … … 132 144 rerror = source.rerror; 133 145 ll_key = source.ll_key; 146 config = source.config; 134 147 } 135 148 … … 197 210 delete [] tab; 198 211 } 199 200 212 ///////////////////////////////////////////////////////////////////// 201 213 // The size() function returns the size of the tab … … 205 217 return size_tab; 206 218 } 207 208 219 ///////////////////////////////////////////////////////////////////// 209 220 // The init() function initializes the transaction tab entries … … 211 222 void init() 212 223 { 213 for ( size_t i=0; i<size_tab; i++) { 224 for ( size_t i=0; i<size_tab; i++) 225 { 214 226 tab[i].init(); 215 227 } 216 228 } 217 218 229 ///////////////////////////////////////////////////////////////////// 219 230 // The print() function prints a transaction tab entry … … 223 234 void print(const size_t index) 224 235 { 225 assert( (index < size_tab) 226 && "Invalid Transaction Tab Entry"); 236 assert( (index < size_tab) and 237 "MEMC ERROR: The selected entry is out of range in TRT write_data_mask()"); 238 227 239 tab[index].print(); 228 240 return; 229 241 } 230 231 242 ///////////////////////////////////////////////////////////////////// 232 243 // The read() function returns a transaction tab entry. … … 236 247 TransactionTabEntry read(const size_t index) 237 248 { 238 assert( (index < size_tab) 239 && "Invalid Transaction Tab Entry"); 249 assert( (index < size_tab) and 250 "MEMC ERROR: Invalid Transaction Tab Entry"); 251 240 252 return tab[index]; 241 253 } 242 243 254 ///////////////////////////////////////////////////////////////////// 244 255 // The full() function returns the state of the transaction tab … … 249 260 bool full(size_t &index) 250 261 { 251 for(size_t i=0; i<size_tab; i++){ 252 if(!tab[i].valid){ 262 for(size_t i=0; i<size_tab; i++) 263 { 264 if(!tab[i].valid) 265 { 253 266 index=i; 254 267 return false; … … 257 270 return true; 258 271 } 259 260 272 ///////////////////////////////////////////////////////////////////// 261 273 // The hit_read() function checks if an XRAM read transaction exists … … 268 280 bool hit_read(const addr_t nline,size_t &index) 269 281 { 270 for(size_t i=0; i<size_tab; i++){ 271 if((tab[i].valid && (nline==tab[i].nline)) && (tab[i].xram_read)) { 282 for(size_t i=0; i<size_tab; i++) 283 { 284 if((tab[i].valid && (nline==tab[i].nline)) && (tab[i].xram_read)) 285 { 272 286 index=i; 273 287 return true; … … 276 290 return false; 277 291 } 278 279 292 /////////////////////////////////////////////////////////////////////// 280 293 // The hit_write() function looks if an XRAM write transaction exists … … 286 299 bool hit_write(const addr_t nline) 287 300 { 288 for(size_t i=0; i<size_tab; i++){ 289 if(tab[i].valid && (nline==tab[i].nline) && !(tab[i].xram_read)) { 301 for(size_t i=0; i<size_tab; i++) 302 { 303 if(tab[i].valid && (nline==tab[i].nline) && !(tab[i].xram_read)) 304 { 290 305 return true; 291 306 } … … 293 308 return false; 294 309 } 295 296 310 ///////////////////////////////////////////////////////////////////// 297 311 // The write_data_mask() function writes a vector of data (a line). … … 307 321 const std::vector<data_t> &data) 308 322 { 309 assert( (index < size_tab) 310 && "Invalid Transaction Tab Entry"); 311 assert(be.size()==tab[index].wdata_be.size() 312 && "Bad data mask in write_data_mask in TransactionTab"); 313 assert(data.size()==tab[index].wdata.size() 314 && "Bad data in write_data_mask in TransactionTab"); 315 316 for(size_t i=0; i<tab[index].wdata_be.size() ; i++) { 323 assert( (index < size_tab) and 324 "MEMC ERROR: The selected entry is out of range in TRT write_data_mask()"); 325 326 assert( (be.size()==tab[index].wdata_be.size()) and 327 "MEMC ERROR: Bad be size in TRT write_data_mask()"); 328 329 assert( (data.size()==tab[index].wdata.size()) and 330 "MEMC ERROR: Bad data size in TRT write_data_mask()"); 331 332 for(size_t i=0; i<tab[index].wdata_be.size() ; i++) 333 { 317 334 tab[index].wdata_be[i] = tab[index].wdata_be[i] | be[i]; 318 335 data_t mask = be_to_mask(be[i]); … … 320 337 } 321 338 } 322 323 339 ///////////////////////////////////////////////////////////////////// 324 340 // The set() function registers a transaction (read or write) … … 337 353 // - data_be : the mask of the data to write (in case of write) 338 354 // - ll_key : the ll key (if any) returned by the llsc_global_table 355 // - config : transaction required by config FSM 339 356 ///////////////////////////////////////////////////////////////////// 340 357 void set(const size_t index, … … 349 366 const std::vector<be_t> &data_be, 350 367 const std::vector<data_t> &data, 351 const data_t ll_key = 0) 352 { 353 assert( (index < size_tab) 354 && "The selected entry is out of range in set() Transaction Tab"); 355 assert(data_be.size()==tab[index].wdata_be.size() 356 && "Bad data_be argument in set() TransactionTab"); 357 assert(data.size()==tab[index].wdata.size() 358 && "Bad data argument in set() TransactionTab"); 368 const data_t ll_key = 0, 369 const bool config = false) 370 { 371 assert( (index < size_tab) and 372 "MEMC ERROR: The selected entry is out of range in TRT set()"); 373 374 assert( (data_be.size()==tab[index].wdata_be.size()) and 375 "MEMC ERROR: Bad data_be argument in TRT set()"); 376 377 assert( (data.size()==tab[index].wdata.size()) and 378 "MEMC ERROR: Bad data argument in TRT set()"); 359 379 360 380 tab[index].valid = true; … … 368 388 tab[index].word_index = word_index; 369 389 tab[index].ll_key = ll_key; 390 tab[index].config = config; 370 391 for(size_t i=0; i<tab[index].wdata.size(); i++) 371 392 { … … 380 401 // The BE field in TRT is taken into account. 381 402 // Arguments : 382 // - index : the index of the transaction in the transaction tab 383 // - word_index : the index of the data in the line 384 // - data : a 64 bits value 385 // - error : invalid data 403 // - index : index of the entry in TRT 404 // - word : index of the 32 bits word in the line 405 // - data : 64 bits value (first data right) 386 406 ///////////////////////////////////////////////////////////////////// 387 407 void write_rsp(const size_t index, 388 408 const size_t word, 389 const wide_data_t data, 390 const bool rerror) 409 const wide_data_t data) 391 410 { 392 411 data_t value; 393 412 data_t mask; 394 413 395 if ( index >= size_tab ) 396 { 397 std::cout << "VCI_MEM_CACHE ERRROR " << tab_name 398 << " TRT entry out of range in write_rsp()" << std::endl; 399 exit(0); 400 } 401 if ( word > tab[index].wdata_be.size() ) 402 { 403 std::cout << "VCI_MEM_CACHE ERRROR " << tab_name 404 << " Bad word_index in write_rsp() in TRT" << std::endl; 405 exit(0); 406 } 407 if ( not tab[index].valid ) 408 { 409 std::cout << "VCI_MEM_CACHE ERRROR " << tab_name 410 << " TRT Entry invalid in write_rsp()" << std::endl; 411 exit(0); 412 } 413 if ( not tab[index].xram_read ) 414 { 415 std::cout << "VCI_MEM_CACHE ERRROR " << tab_name 416 << " TRT entry is not an XRAM GET in write_rsp()" << std::endl; 417 exit(0); 418 } 414 assert( (index < size_tab) and 415 "MEMC ERROR: The selected entry is out of range in TRT write_rsp()"); 416 417 assert( (word < tab[index].wdata_be.size()) and 418 "MEMC ERROR: Bad word index in TRT write_rsp()"); 419 420 assert( (tab[index].valid) and 421 "MEMC ERROR: TRT entry not valid in TRT write_rsp()"); 422 423 assert( (tab[index].xram_read ) and 424 "MEMC ERROR: TRT entry is not a GET in TRT write_rsp()"); 419 425 420 426 // first 32 bits word … … 427 433 mask = be_to_mask(tab[index].wdata_be[word+1]); 428 434 tab[index].wdata[word+1] = (tab[index].wdata[word+1] & mask) | (value & ~mask); 429 430 // error update 431 tab[index].rerror |= rerror; 432 } 433 435 } 434 436 ///////////////////////////////////////////////////////////////////// 435 437 // The erase() function erases an entry in the transaction tab. … … 439 441 void erase(const size_t index) 440 442 { 441 assert( (index < size_tab) 442 && "The selected entry is out of range in erase() Transaction Tab"); 443 assert( (index < size_tab) and 444 "MEMC ERROR: The selected entry is out of range in TRT erase()"); 445 443 446 tab[index].valid = false; 444 447 tab[index].rerror = false; 448 } 449 ///////////////////////////////////////////////////////////////////// 450 // The is_config() function returns the config flag value. 451 // Arguments : 452 // - index : the index of the entry in the transaction tab 453 ///////////////////////////////////////////////////////////////////// 454 bool is_config(const size_t index) 455 { 456 assert( (index < size_tab) and 457 "MEMC ERROR: The selected entry is out of range in TRT is_config()"); 458 459 return tab[index].config; 445 460 } 446 461 }; // end class TransactionTab -
trunk/modules/vci_mem_cache/caba/source/src/vci_mem_cache.cpp
r483 r489 45 45 #define DEBUG_MEMC_WRITE 1 // detailed trace of WRITE FSM 46 46 #define DEBUG_MEMC_CAS 1 // detailed trace of CAS FSM 47 #define DEBUG_MEMC_IXR_CMD 1 // detailed trace of IXR_ RSPFSM47 #define DEBUG_MEMC_IXR_CMD 1 // detailed trace of IXR_CMD FSM 48 48 #define DEBUG_MEMC_IXR_RSP 1 // detailed trace of IXR_RSP FSM 49 49 #define DEBUG_MEMC_XRAM_RSP 1 // detailed trace of XRAM_RSP FSM … … 124 124 "MULTI_ACK_UPT_LOCK", 125 125 "MULTI_ACK_UPT_CLEAR", 126 "MULTI_ACK_WRITE_RSP", 127 "MULTI_ACK_CONFIG_ACK" 126 "MULTI_ACK_WRITE_RSP" 128 127 }; 129 128 const char *config_fsm_str[] = … … 131 130 "CONFIG_IDLE", 132 131 "CONFIG_LOOP", 132 "CONFIG_WAIT", 133 133 "CONFIG_RSP", 134 134 "CONFIG_DIR_REQ", 135 135 "CONFIG_DIR_ACCESS", 136 "CONFIG_ DIR_IVT_LOCK",136 "CONFIG_IVT_LOCK", 137 137 "CONFIG_BC_SEND", 138 "CONFIG_BC_WAIT", 139 "CONFIG_INV_SEND", 138 "CONFIG_INVAL_SEND", 140 139 "CONFIG_HEAP_REQ", 141 140 "CONFIG_HEAP_SCAN", 142 141 "CONFIG_HEAP_LAST", 143 "CONFIG_INV_WAIT" 142 "CONFIG_TRT_LOCK", 143 "CONFIG_TRT_SET", 144 "CONFIG_PUT_REQ" 144 145 }; 145 146 const char *read_fsm_str[] = … … 165 166 "WRITE_DIR_REQ", 166 167 "WRITE_DIR_LOCK", 167 "WRITE_DIR_READ",168 168 "WRITE_DIR_HIT", 169 169 "WRITE_UPT_LOCK", … … 177 177 "WRITE_MISS_TRT_SET", 178 178 "WRITE_MISS_XRAM_REQ", 179 "WRITE_BC_DIR_READ", 179 180 "WRITE_BC_TRT_LOCK", 180 181 "WRITE_BC_IVT_LOCK", … … 187 188 { 188 189 "IXR_RSP_IDLE", 189 "IXR_RSP_ACK",190 190 "IXR_RSP_TRT_ERASE", 191 191 "IXR_RSP_TRT_READ" … … 199 199 "XRAM_RSP_DIR_UPDT", 200 200 "XRAM_RSP_DIR_RSP", 201 "XRAM_RSP_I NVAL_LOCK",201 "XRAM_RSP_IVT_LOCK", 202 202 "XRAM_RSP_INVAL_WAIT", 203 203 "XRAM_RSP_INVAL", … … 215 215 "IXR_CMD_CAS_IDLE", 216 216 "IXR_CMD_XRAM_IDLE", 217 "IXR_CMD_READ", 218 "IXR_CMD_WRITE", 219 "IXR_CMD_CAS", 220 "IXR_CMD_XRAM" 217 "IXR_CMD_CONFIG_IDLE", 218 "IXR_CMD_READ_TRT", 219 "IXR_CMD_WRITE_TRT", 220 "IXR_CMD_CAS_TRT", 221 "IXR_CMD_XRAM_TRT", 222 "IXR_CMD_CONFIG_TRT", 223 "IXR_CMD_READ_SEND", 224 "IXR_CMD_WRITE_SEND", 225 "IXR_CMD_CAS_SEND", 226 "IXR_CMD_XRAM_SEND", 227 "IXR_CMD_CONFIG_SEND" 221 228 }; 222 229 const char *cas_fsm_str[] = … … 260 267 "CLEANUP_IVT_CLEAR", 261 268 "CLEANUP_WRITE_RSP", 262 "CLEANUP_CONFIG_ACK",263 269 "CLEANUP_SEND_CLACK" 264 270 }; … … 279 285 "ALLOC_TRT_CAS", 280 286 "ALLOC_TRT_XRAM_RSP", 281 "ALLOC_TRT_IXR_RSP" 287 "ALLOC_TRT_IXR_RSP", 288 "ALLOC_TRT_CONFIG", 289 "ALLOC_TRT_IXR_CMD" 282 290 }; 283 291 const char *alloc_upt_fsm_str[] = … … 380 388 m_broadcast_boundaries(0x7C1F), 381 389 382 r_tgt_cmd_fsm("r_tgt_cmd_fsm"),383 390 384 391 // FIFOs … … 407 414 m_cc_receive_to_multi_ack_fifo("m_cc_receive_to_multi_ack_fifo", 4), 408 415 416 r_tgt_cmd_fsm("r_tgt_cmd_fsm"), 417 409 418 r_config_fsm( "r_config_fsm" ), 410 419 … … 418 427 m_write_to_cc_send_inst_fifo("m_write_to_cc_send_inst_fifo",8), 419 428 m_write_to_cc_send_srcid_fifo("m_write_to_cc_send_srcid_fifo",8), 420 #if L1_MULTI_CACHE421 m_write_to_cc_send_cache_id_fifo("m_write_to_cc_send_cache_id_fifo",8),422 #endif423 429 424 430 r_multi_ack_fsm("r_multi_ack_fsm"), … … 430 436 m_cas_to_cc_send_inst_fifo("m_cas_to_cc_send_inst_fifo",8), 431 437 m_cas_to_cc_send_srcid_fifo("m_cas_to_cc_send_srcid_fifo",8), 432 #if L1_MULTI_CACHE433 m_cas_to_cc_send_cache_id_fifo("m_cas_to_cc_send_cache_id_fifo",8),434 #endif435 438 436 439 r_ixr_rsp_fsm("r_ixr_rsp_fsm"), … … 439 442 m_xram_rsp_to_cc_send_inst_fifo("m_xram_rsp_to_cc_send_inst_fifo",8), 440 443 m_xram_rsp_to_cc_send_srcid_fifo("m_xram_rsp_to_cc_send_srcid_fifo",8), 441 #if L1_MULTI_CACHE442 m_xram_rsp_to_cc_send_cache_id_fifo("m_xram_rsp_to_cc_send_cache_id_fifo",8),443 #endif444 444 445 445 r_ixr_cmd_fsm("r_ixr_cmd_fsm"), … … 509 509 r_xram_rsp_victim_data = new sc_signal<data_t>[nwords]; 510 510 r_xram_rsp_to_tgt_rsp_data = new sc_signal<data_t>[nwords]; 511 r_xram_rsp_to_ixr_cmd_data = new sc_signal<data_t>[nwords];512 511 513 512 // Allocation for READ FSM … … 520 519 r_write_to_cc_send_data = new sc_signal<data_t>[nwords]; 521 520 r_write_to_cc_send_be = new sc_signal<be_t>[nwords]; 522 r_write_to_ixr_cmd_data = new sc_signal<data_t>[nwords];523 521 524 522 // Allocation for CAS FSM 525 r_cas_to_ixr_cmd_data = new sc_signal<data_t>[nwords];526 523 r_cas_data = new sc_signal<data_t>[nwords]; 527 524 r_cas_rdata = new sc_signal<data_t>[2]; 528 525 526 // Allocation for IXR_CMD FSM 527 r_ixr_cmd_wdata = new sc_signal<data_t>[nwords]; 528 529 529 // Allocation for debug 530 m_debug_previous_data = new sc_signal<data_t>[nwords];531 m_debug_data = new sc_signal<data_t>[nwords];530 m_debug_previous_data = new data_t[nwords]; 531 m_debug_data = new data_t[nwords]; 532 532 533 533 SC_METHOD(transition); … … 540 540 } // end constructor 541 541 542 ///////////////////////////////////////////////////////////////////////543 tmpl(void) ::start_monitor(addr_t addr, addr_t length)544 ///////////////////////////////////////////////////////////////////////545 {546 m_monitor_ok = true;547 m_monitor_base = addr;548 m_monitor_length = length;549 }550 551 ///////////////////////////////////////////////////////////////////////552 tmpl(void) ::stop_monitor()553 ///////////////////////////////////////////////////////////////////////554 {555 m_monitor_ok = false;556 }557 558 ////////////////////////////////////////////////559 tmpl(void) ::check_monitor( addr_t addr,560 data_t data,561 bool read )562 ////////////////////////////////////////////////563 {564 if((addr >= m_monitor_base) and565 (addr < m_monitor_base + m_monitor_length))566 {567 if ( read ) std::cout << " Monitor MEMC Read ";568 else std::cout << " Monitor MEMC Write";569 std::cout << " / Address = " << std::hex << addr570 << " / Data = " << data571 << " at cycle " << std::dec << m_cpt_cycles << std::endl;572 }573 }574 542 575 543 ///////////////////////////////////////////////////// … … 581 549 DirectoryEntry entry = m_cache_directory.read_neutral(addr, &way, &set ); 582 550 551 // read data and compute data_change 583 552 bool data_change = false; 584 585 553 if ( entry.valid ) 586 554 { 587 m_cache_data.read_line( way, set, m_debug_data );588 589 for ( size_t i = 0 ; i<m_words ; i++ )590 {591 if ( m_debug_previous_valid and592 (m_debug_data[i].read() != m_debug_previous_data[i].read()) )593 594 m_debug_previous_data[i] = m_debug_data[i].read();555 for ( size_t word = 0 ; word<m_words ; word++ ) 556 { 557 m_debug_data[word] = m_cache_data.read(way, set, word); 558 if ( m_debug_previous_valid and 559 (m_debug_data[word] != m_debug_previous_data[word]) ) 560 { 561 data_change = true; 562 } 595 563 } 596 564 } 597 565 566 // print values if any change 598 567 if ( (entry.valid != m_debug_previous_valid) or 599 568 (entry.valid and (entry.count != m_debug_previous_count)) or … … 603 572 << " at cycle " << std::dec << m_cpt_cycles 604 573 << " for address " << std::hex << addr 605 << " / HIT= " << std::dec << entry.valid574 << " / VAL = " << std::dec << entry.valid 606 575 << " / WAY = " << way 607 576 << " / COUNT = " << entry.count 608 577 << " / DIRTY = " << entry.dirty 609 << " / DATA_CHANGE = " << entry.count578 << " / DATA_CHANGE = " << data_change 610 579 << std::endl; 611 } 580 std::cout << std::hex << " /0:" << m_debug_data[0] 581 << "/1:" << m_debug_data[1] 582 << "/2:" << m_debug_data[2] 583 << "/3:" << m_debug_data[3] 584 << "/4:" << m_debug_data[4] 585 << "/5:" << m_debug_data[5] 586 << "/6:" << m_debug_data[6] 587 << "/7:" << m_debug_data[7] 588 << "/8:" << m_debug_data[8] 589 << "/9:" << m_debug_data[9] 590 << "/A:" << m_debug_data[10] 591 << "/B:" << m_debug_data[11] 592 << "/C:" << m_debug_data[12] 593 << "/D:" << m_debug_data[13] 594 << "/E:" << m_debug_data[14] 595 << "/F:" << m_debug_data[15] 596 << std::endl; 597 } 598 599 // register values 612 600 m_debug_previous_count = entry.count; 613 601 m_debug_previous_valid = entry.valid; 614 602 m_debug_previous_dirty = entry.dirty; 603 for( size_t word=0 ; word<m_words ; word++ ) 604 m_debug_previous_data[word] = m_debug_data[word]; 615 605 } 616 606 … … 677 667 delete [] r_xram_rsp_victim_data; 678 668 delete [] r_xram_rsp_to_tgt_rsp_data; 679 delete [] r_xram_rsp_to_ixr_cmd_data;680 669 681 670 delete [] r_read_data; … … 755 744 m_config_to_cc_send_inst_fifo.init(); 756 745 m_config_to_cc_send_srcid_fifo.init(); 757 #if L1_MULTI_CACHE758 m_config_to_cc_send_cache_id_fifo.init();759 #endif760 746 761 747 r_tgt_cmd_to_tgt_rsp_req = false; … … 772 758 m_write_to_cc_send_inst_fifo.init(); 773 759 m_write_to_cc_send_srcid_fifo.init(); 774 #if L1_MULTI_CACHE775 m_write_to_cc_send_cache_id_fifo.init();776 #endif777 760 778 761 r_cleanup_to_tgt_rsp_req = false; … … 780 763 m_cc_receive_to_cleanup_fifo.init(); 781 764 782 r_multi_ack_to_tgt_rsp_req 765 r_multi_ack_to_tgt_rsp_req = false; 783 766 784 767 m_cc_receive_to_multi_ack_fifo.init(); … … 788 771 r_cas_lfsr = -1 ; 789 772 r_cas_to_ixr_cmd_req = false; 790 r_cas_to_cc_send_multi_req = false;791 r_cas_to_cc_send_brdcast_req = false;773 r_cas_to_cc_send_multi_req = false; 774 r_cas_to_cc_send_brdcast_req = false; 792 775 793 776 m_cas_to_cc_send_inst_fifo.init(); 794 777 m_cas_to_cc_send_srcid_fifo.init(); 795 #if L1_MULTI_CACHE796 m_cas_to_cc_send_cache_id_fifo.init();797 #endif798 778 799 779 for(size_t i=0; i<m_trt_lines ; i++) … … 810 790 m_xram_rsp_to_cc_send_inst_fifo.init(); 811 791 m_xram_rsp_to_cc_send_srcid_fifo.init(); 812 #if L1_MULTI_CACHE 813 m_xram_rsp_to_cc_send_cache_id_fifo.init(); 814 #endif 815 816 r_ixr_cmd_cpt = 0; 792 817 793 r_alloc_dir_reset_cpt = 0; 818 794 r_alloc_heap_reset_cpt = 0; … … 863 839 size_t write_to_cc_send_fifo_srcid = 0; 864 840 865 #if L1_MULTI_CACHE866 size_t write_to_cc_send_fifo_cache_id = 0;867 #endif868 869 841 bool xram_rsp_to_cc_send_fifo_put = false; 870 842 bool xram_rsp_to_cc_send_fifo_get = false; … … 872 844 size_t xram_rsp_to_cc_send_fifo_srcid = 0; 873 845 874 #if L1_MULTI_CACHE875 size_t xram_rsp_to_cc_send_fifo_cache_id = 0;876 #endif877 878 846 bool config_to_cc_send_fifo_put = false; 879 847 bool config_to_cc_send_fifo_get = false; … … 885 853 bool cas_to_cc_send_fifo_inst = false; 886 854 size_t cas_to_cc_send_fifo_srcid = 0; 887 888 #if L1_MULTI_CACHE889 size_t cas_to_cc_send_fifo_cache_id = 0;890 #endif891 855 892 856 m_debug = (m_cpt_cycles > m_debug_start_cycle) and m_debug_ok; … … 939 903 // - For MEMC_CMD_TYPE, the response is delayed until the operation is completed. 940 904 //////////////////////////////////////////////////////////////////////////////////// 905 906 //std::cout << std::endl << "tgt_cmd_fsm" << std::endl; 941 907 942 908 switch(r_tgt_cmd_fsm.read()) … … 1042 1008 case TGT_CMD_ERROR: // response error must be sent 1043 1009 1044 // wait if pending TGT_CMD request to TGT_RSP FSM1010 // wait if pending request 1045 1011 if(r_tgt_cmd_to_tgt_rsp_req.read()) break; 1046 1012 … … 1076 1042 size_t error; 1077 1043 uint32_t rdata = 0; // default value 1044 uint32_t wdata = p_vci_tgt.wdata.read(); 1078 1045 1079 1046 if ( (p_vci_tgt.cmd.read() == vci_param_int::CMD_READ) // get lock … … 1084 1051 error = 0; 1085 1052 r_config_lock = true; 1053 if ( rdata == 0 ) 1054 { 1055 r_tgt_cmd_srcid = p_vci_tgt.srcid.read(); 1056 r_tgt_cmd_trdid = p_vci_tgt.trdid.read(); 1057 r_tgt_cmd_pktid = p_vci_tgt.pktid.read(); 1058 } 1086 1059 } 1087 1060 else if ( (p_vci_tgt.cmd.read() == vci_param_int::CMD_WRITE) // release lock 1088 and (cell == MEMC_LOCK) ) 1061 and (cell == MEMC_LOCK) 1062 and (p_vci_tgt.srcid.read() == r_tgt_cmd_srcid.read()) ) 1089 1063 { 1090 1064 need_rsp = true; … … 1093 1067 } 1094 1068 else if ( (p_vci_tgt.cmd.read() == vci_param_int::CMD_WRITE) // set addr_lo 1095 and (cell == MEMC_ADDR_LO) ) 1096 { 1069 and (cell == MEMC_ADDR_LO) 1070 and (p_vci_tgt.srcid.read() == r_tgt_cmd_srcid.read()) ) 1071 { 1072 assert( ((wdata % (m_words*vci_param_int::B)) == 0) and 1073 "VCI_MEM_CACHE CONFIG ERROR: The buffer must be aligned on a cache line"); 1074 1097 1075 need_rsp = true; 1098 1076 error = 0; … … 1101 1079 } 1102 1080 else if ( (p_vci_tgt.cmd.read() == vci_param_int::CMD_WRITE) // set addr_hi 1103 and (cell == MEMC_ADDR_HI) ) 1081 and (cell == MEMC_ADDR_HI) 1082 and (p_vci_tgt.srcid.read() == r_tgt_cmd_srcid.read()) ) 1083 1104 1084 { 1105 1085 need_rsp = true; … … 1109 1089 } 1110 1090 else if ( (p_vci_tgt.cmd.read() == vci_param_int::CMD_WRITE) // set buf_lines 1111 and (cell == MEMC_BUF_LENGTH) ) 1091 and (cell == MEMC_BUF_LENGTH) 1092 and (p_vci_tgt.srcid.read() == r_tgt_cmd_srcid.read()) ) 1112 1093 { 1113 1094 need_rsp = true; 1114 1095 error = 0; 1115 1096 size_t lines = (size_t)(p_vci_tgt.wdata.read()/(m_words<<2)); 1116 if ( r_config_address.read()/(m_words*vci_param_int::B) ) lines++; 1117 r_config_nlines = lines; 1097 if ( r_config_address.read()%(m_words*4) ) lines++; 1098 r_config_cmd_lines = lines; 1099 r_config_rsp_lines = lines; 1118 1100 } 1119 1101 else if ( (p_vci_tgt.cmd.read() == vci_param_int::CMD_WRITE) // set cmd type 1120 and (cell == MEMC_CMD_TYPE) ) 1102 and (cell == MEMC_CMD_TYPE) 1103 and (p_vci_tgt.srcid.read() == r_tgt_cmd_srcid.read()) ) 1121 1104 { 1122 1105 need_rsp = false; 1123 1106 error = 0; 1124 1107 r_config_cmd = p_vci_tgt.wdata.read(); 1108 1109 // prepare delayed response from CONFIG FSM 1125 1110 r_config_srcid = p_vci_tgt.srcid.read(); 1126 1111 r_config_trdid = p_vci_tgt.trdid.read(); … … 1153 1138 << " address = " << std::hex << p_vci_tgt.address.read() 1154 1139 << " / wdata = " << p_vci_tgt.wdata.read() 1140 << " / need_rsp = " << need_rsp 1155 1141 << " / error = " << error << std::endl; 1156 1142 #endif … … 1256 1242 // MULTI_ACK FSM 1257 1243 ///////////////////////////////////////////////////////////////////////// 1258 // This FSM controls the response to the multicast update or multicast 1259 // inval coherence requests sent by the memory cache to the L1 caches and 1260 // update the UPT. 1244 // This FSM controls the response to the multicast update requests sent 1245 // by the memory cache to the L1 caches and update the UPT. 1261 1246 // 1262 1247 // - The FSM decrements the proper entry in UPT, … … 1264 1249 // - If required, it sends a request to the TGT_RSP FSM to complete 1265 1250 // a pending write transaction. 1266 // - If required, it sends an acknowledge to the CONFIG FSM to signal1267 // completion of a line inval.1268 1251 // 1269 1252 // All those multi-ack packets are one flit packet. 1270 // The index in the UPT is defined in the UPDTID field.1253 // The index in the UPT is defined in the TRDID field. 1271 1254 //////////////////////////////////////////////////////////////////////// 1255 1256 //std::cout << std::endl << "multi_ack_fsm" << std::endl; 1272 1257 1273 1258 switch(r_multi_ack_fsm.read()) … … 1381 1366 r_multi_ack_nline = m_upt.nline(r_multi_ack_upt_index.read()); 1382 1367 bool need_rsp = m_upt.need_rsp(r_multi_ack_upt_index.read()); 1383 bool need_ack = m_upt.need_ack(r_multi_ack_upt_index.read());1384 1368 1385 1369 // clear the UPT entry … … 1387 1371 1388 1372 if ( need_rsp ) r_multi_ack_fsm = MULTI_ACK_WRITE_RSP; 1389 else if ( need_ack ) r_multi_ack_fsm = MULTI_ACK_CONFIG_ACK;1390 1373 else r_multi_ack_fsm = MULTI_ACK_IDLE; 1391 1374 … … 1418 1401 break; 1419 1402 } 1420 //////////////////////////1421 case MULTI_ACK_CONFIG_ACK: // Signals multi-inval completion to CONFIG FSM1422 // Wait if pending request1423 {1424 if ( r_multi_ack_to_config_ack.read() ) break;1425 1426 r_multi_ack_to_config_ack = true;1427 r_multi_ack_fsm = MULTI_ACK_IDLE;1428 1429 #if DEBUG_MEMC_MULTI_ACK1430 if(m_debug)1431 std::cout << " <MEMC " << name() << " MULTI_ACK_CONFIG_ACK>"1432 << " Signals inval completion to CONFIG FSM" << std::endl;1433 #endif1434 break;1435 }1436 1403 } // end switch r_multi_ack_fsm 1437 1404 … … 1441 1408 // The CONFIG FSM handles the VCI configuration requests (INVAL & SYNC). 1442 1409 // The target buffer can have any size, and there is one single command for 1443 // all cache lines covered by the target buffer. 1444 // An INVAL or SYNC configuration request is defined by the followinf registers: 1445 // - bool r_config_cmd : INVAL / SYNC / NOP) 1410 // all cache lines covered by the target buffer. 1411 // 1412 // An INVAL or SYNC configuration operation is defined by the following registers: 1413 // - bool r_config_cmd : INVAL / SYNC / NOP 1446 1414 // - uint64_t r_config_address : buffer base address 1447 // - uint32_t r_config_nlines : number of lines covering buffer 1415 // - uint32_t r_config_cmd_lines : number of lines to be handled 1416 // - uint32_t r_config_rsp_lines : number of lines not completed 1448 1417 // 1449 1418 // For both INVAL and SYNC commands, the CONFIG FSM contains the loop handling 1450 // all cache lines covered by the target buffer. 1451 // 1419 // all cache lines covered by the buffer. The various lines of a given buffer 1420 // can be pipelined: the CONFIG FSM does not wait the response for line (n) to send 1421 // the command for line (n+1). It decrements the r_config_cmd_lines counter until 1422 // the last request has been registered in TRT (for a SYNC), or in IVT (for an INVAL). 1423 // 1452 1424 // - INVAL request: 1453 // For each line, it access to the DIR array.1425 // For each line, it access to the DIR. 1454 1426 // In case of miss, it does nothing, and a response is requested to TGT_RSP FSM. 1455 1427 // In case of hit, with no copies in L1 caches, the line is invalidated and 1456 1428 // a response is requested to TGT_RSP FSM. 1457 1429 // If there is copies, a multi-inval, or a broadcast-inval coherence transaction 1458 // is launched and registered in UPT. The multi-inval transaction is signaled 1459 // by the r_multi_ack_to config_ack or r_cleanup_to_config_ack flip-flops. 1460 // The config inval response is sent only when the last line has been invalidated. 1461 // 1430 // is launched and registered in UPT. The multi-inval transaction completion 1431 // is signaled by the CLEANUP FSM by decrementing the r_config_rsp_lines counter. 1432 // The CONFIG INVAL response is sent only when the last line has been invalidated. 1433 // TODO : The target buffer address must be aligned on a cache line boundary. 1434 // This constraint can be released, but it requires to make 2 PUT transactions 1435 // for the first and the last line... 1436 // 1462 1437 // - SYNC request: 1463 // 1464 // ... Not implemented yet ... 1438 // For each line, it access to the DIR. 1439 // In case of miss, it does nothing, and a response is requested to TGT_RSP FSM. 1440 // In case of hit, a PUT transaction is registered in TRT and a request is sent 1441 // to IXR_CMD FSM. The IXR_RSP FSM decrements the r_config_rsp_lines counter 1442 // when a PUT response is received. 1443 // The CONFIG SYNC response is sent only when the last PUT response is received. 1465 1444 // 1466 1445 // From the software point of view, a configuration request is a sequence 1467 // of 6 atomic accesses in an uncached segment: 1446 // of 6 atomic accesses in an uncached segment. A dedicated lock is used 1447 // to handle only one configuration command at a given time: 1468 1448 // - Read MEMC_LOCK : Get the lock 1469 1449 // - Write MEMC_ADDR_LO : Set the buffer address LSB … … 1474 1454 //////////////////////////////////////////////////////////////////////////////////// 1475 1455 1456 //std::cout << std::endl << "config_fsm" << std::endl; 1457 1476 1458 switch( r_config_fsm.read() ) 1477 1459 { … … 1486 1468 if(m_debug) 1487 1469 std::cout << " <MEMC " << name() << " CONFIG_IDLE> Config Request received" 1488 << " address = " << std::hex << r_config_address.read()1489 << " / nlines = " << std::dec << r_config_nlines.read()1470 << " / address = " << std::hex << r_config_address.read() 1471 << " / lines = " << std::dec << r_config_cmd_lines.read() 1490 1472 << " / type = " << r_config_cmd.read() << std::endl; 1491 1473 #endif … … 1494 1476 } 1495 1477 ///////////////// 1496 case CONFIG_LOOP: // test last line1497 { 1498 if ( r_config_ nlines.read() == 0 )1478 case CONFIG_LOOP: // test if last line to be handled 1479 { 1480 if ( r_config_cmd_lines.read() == 0 ) 1499 1481 { 1500 1482 r_config_cmd = MEMC_CMD_NOP; 1501 r_config_fsm = CONFIG_ RSP;1483 r_config_fsm = CONFIG_WAIT; 1502 1484 } 1503 1485 else … … 1509 1491 if(m_debug) 1510 1492 std::cout << " <MEMC " << name() << " CONFIG_LOOP>" 1511 << " address = " << std::hex << r_config_address.read()1512 << " / nlines = " << std::dec << r_config_nlines.read()1493 << " / address = " << std::hex << r_config_address.read() 1494 << " / lines not handled = " << std::dec << r_config_cmd_lines.read() 1513 1495 << " / command = " << r_config_cmd.read() << std::endl; 1514 1496 #endif 1515 1497 break; 1498 } 1499 ///////////////// 1500 case CONFIG_WAIT: // wait completion (last response) 1501 { 1502 if ( r_config_rsp_lines.read() == 0 ) // last response received 1503 { 1504 r_config_fsm = CONFIG_RSP; 1505 } 1506 1507 #if DEBUG_MEMC_CONFIG 1508 if(m_debug) 1509 std::cout << " <MEMC " << name() << " CONFIG_WAIT>" 1510 << " / lines to do = " << std::dec << r_config_rsp_lines.read() << std::endl; 1511 #endif 1512 break; 1513 } 1514 //////////////// 1515 case CONFIG_RSP: // request TGT_RSP FSM to return response 1516 { 1517 if ( not r_config_to_tgt_rsp_req.read() ) 1518 { 1519 r_config_to_tgt_rsp_srcid = r_config_srcid.read(); 1520 r_config_to_tgt_rsp_trdid = r_config_trdid.read(); 1521 r_config_to_tgt_rsp_pktid = r_config_pktid.read(); 1522 r_config_to_tgt_rsp_error = false; 1523 r_config_to_tgt_rsp_req = true; 1524 r_config_fsm = CONFIG_IDLE; 1525 1526 #if DEBUG_MEMC_CONFIG 1527 if(m_debug) 1528 std::cout << " <MEMC " << name() << " CONFIG_RSP> Request TGT_RSP FSM to return response:" 1529 << " error = " << r_config_to_tgt_rsp_error.read() 1530 << " / rsrcid = " << std::hex << r_config_srcid.read() 1531 << " / rtrdid = " << std::hex << r_config_trdid.read() 1532 << " / rpktid = " << std::hex << r_config_pktid.read() << std::endl; 1533 #endif 1534 } 1535 break; 1536 1516 1537 } 1517 1538 //////////////////// … … 1533 1554 case CONFIG_DIR_ACCESS: // Access directory and decode config command 1534 1555 { 1556 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CONFIG) and 1557 "MEMC ERROR in CONFIG_DIR_ACCESS state: bad DIR allocation"); 1558 1535 1559 size_t way = 0; 1536 1560 DirectoryEntry entry = m_cache_directory.read(r_config_address.read(), way); … … 1543 1567 r_config_dir_copy_srcid = entry.owner.srcid; 1544 1568 r_config_dir_is_cnt = entry.is_cnt; 1569 r_config_dir_lock = entry.lock; 1545 1570 r_config_dir_count = entry.count; 1546 r_config_dir_ next_ptr= entry.ptr;1547 1548 r_config_fsm = CONFIG_ DIR_IVT_LOCK;1571 r_config_dir_ptr = entry.ptr; 1572 1573 r_config_fsm = CONFIG_IVT_LOCK; 1549 1574 } 1550 1575 else if ( entry.valid and // hit & sync command … … 1552 1577 (r_config_cmd.read() == MEMC_CMD_SYNC) ) 1553 1578 { 1554 std::cout << "VCI_MEM_CACHE ERROR: " 1555 << "SYNC config request not implemented yet" << std::endl; 1556 exit(0); 1579 r_config_fsm = CONFIG_TRT_LOCK; 1557 1580 } 1558 else // return to LOOP1581 else // miss : return to LOOP 1559 1582 { 1560 r_config_nlines = r_config_nlines.read() - 1; 1561 r_config_address = r_config_address.read() + (m_words<<2); 1562 r_config_fsm = CONFIG_LOOP; 1583 r_config_cmd_lines = r_config_cmd_lines.read() - 1; 1584 r_config_rsp_lines = r_config_rsp_lines.read() - 1; 1585 r_config_cmd_lines = r_config_cmd_lines.read() - 1; 1586 r_config_address = r_config_address.read() + (m_words<<2); 1587 r_config_fsm = CONFIG_LOOP; 1563 1588 } 1564 1589 … … 1574 1599 break; 1575 1600 } 1576 ///////////////////////// 1577 case CONFIG_DIR_IVT_LOCK: // enter this state in case of INVAL command 1578 // Try to get both DIR & IVT locks, and return 1579 // to LOOP state if IVT full. 1580 // Register inval in IVT, and invalidate the 1581 // directory if IVT not full. 1582 { 1601 ///////////////////// 1602 case CONFIG_TRT_LOCK: // enter this state in case of SYNC command 1603 // to a dirty cache line 1604 // keep DIR lock, and try to get TRT lock 1605 // return to LOOP state if TRT full 1606 // reset dirty bit in DIR and register a PUT 1607 // trabsaction in TRT if not full. 1608 { 1609 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CONFIG) and 1610 "MEMC ERROR in CONFIG_TRT_LOCK state: bad DIR allocation"); 1611 1612 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_CONFIG ) 1613 { 1614 size_t index = 0; 1615 bool wok = not m_trt.full(index); 1616 1617 if ( not wok ) 1618 { 1619 r_config_fsm = CONFIG_LOOP; 1620 } 1621 else 1622 { 1623 size_t way = r_config_dir_way.read(); 1624 size_t set = m_y[r_config_address.read()]; 1625 1626 // reset dirty bit in DIR 1627 DirectoryEntry entry; 1628 entry.valid = true; 1629 entry.dirty = false; 1630 entry.tag = m_z[r_config_address.read()]; 1631 entry.is_cnt = r_config_dir_is_cnt.read(); 1632 entry.lock = r_config_dir_lock.read(); 1633 entry.ptr = r_config_dir_ptr.read(); 1634 entry.count = r_config_dir_count.read(); 1635 entry.owner.inst = r_config_dir_copy_inst.read(); 1636 entry.owner.srcid = r_config_dir_copy_srcid.read(); 1637 m_cache_directory.write( set, 1638 way, 1639 entry ); 1640 1641 r_config_trt_index = index; 1642 r_config_fsm = CONFIG_TRT_SET; 1643 } 1644 1645 #if DEBUG_MEMC_CONFIG 1646 if(m_debug) 1647 std::cout << " <MEMC " << name() << " CONFIG_TRT_LOCK> Access TRT: " 1648 << " wok = " << std::dec << wok 1649 << " index = " << index << std::endl; 1650 #endif 1651 } 1652 break; 1653 } 1654 //////////////////// 1655 case CONFIG_TRT_SET: // read data in cache 1656 // and post a PUT request in TRT 1657 { 1658 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CONFIG) and 1659 "MEMC ERROR in CONFIG_TRT_SET state: bad DIR allocation"); 1660 1661 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_CONFIG) and 1662 "MEMC ERROR in CONFIG_TRT_SET state: bad TRT allocation"); 1663 1664 // read data into cache 1665 size_t way = r_config_dir_way.read(); 1666 size_t set = m_y[r_config_address.read()]; 1667 1668 sc_signal<data_t> config_data[16]; 1669 m_cache_data.read_line( way, 1670 set, 1671 config_data ); 1672 1673 // post a PUT request in TRT 1674 std::vector<data_t> data_vector; 1675 data_vector.clear(); 1676 for(size_t i=0; i<m_words; i++) data_vector.push_back(config_data[i].read()); 1677 m_trt.set( r_config_trt_index.read(), 1678 false, // PUT 1679 m_nline[r_config_address.read()], // nline 1680 0, // srcid: unused 1681 0, // trdid: unused 1682 0, // pktid: unused 1683 false, // not proc_read 1684 0, // read_length: unused 1685 0, // word_index: unused 1686 std::vector<be_t>(m_words,0xF), 1687 data_vector); 1688 1689 #if DEBUG_MEMC_CONFIG 1690 if(m_debug) 1691 std::cout << " <MEMC " << name() << " CONFIG_TRT_SET> PUT request in TRT:" 1692 << " address = " << std::hex << r_config_address.read() 1693 << " index = " << std::dec << r_config_trt_index.read() << std::endl; 1694 #endif 1695 break; 1696 } 1697 //////////////////// 1698 case CONFIG_PUT_REQ: // PUT request to IXR_CMD_FSM 1699 { 1700 if ( not r_config_to_ixr_cmd_req.read() ) 1701 { 1702 r_config_to_ixr_cmd_req = true; 1703 r_config_to_ixr_cmd_index = r_config_trt_index.read(); 1704 1705 // prepare next iteration 1706 r_config_cmd_lines = r_config_cmd_lines.read() - 1; 1707 r_config_address = r_config_address.read() + (m_words<<2); 1708 r_config_fsm = CONFIG_LOOP; 1709 1710 #if DEBUG_MEMC_CONFIG 1711 if(m_debug) 1712 std::cout << " <MEMC " << name() << " CONFIG_PUT_REQ> PUT request to IXR_CMD_FSM" 1713 << " / address = " << std::hex << r_config_address.read() << std::endl; 1714 #endif 1715 } 1716 break; 1717 } 1718 ///////////////////// 1719 case CONFIG_IVT_LOCK: // enter this state in case of INVAL command 1720 // Keep DIR lock and Try to get IVT lock. 1721 // Return to LOOP state if IVT full. 1722 // Register inval in IVT, and invalidate the 1723 // directory if IVT not full. 1724 { 1725 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CONFIG) and 1726 "MEMC ERROR in CONFIG_IVT_LOCK state: bad DIR allocation"); 1727 1583 1728 if ( r_alloc_ivt_fsm.read() == ALLOC_IVT_CONFIG ) 1584 1729 { … … 1589 1734 { 1590 1735 m_cache_directory.inval( way, set ); 1591 r_config_nlines = r_config_nlines.read() - 1; 1592 r_config_address = r_config_address.read() + (m_words<<2); 1593 r_config_fsm = CONFIG_LOOP; 1736 r_config_cmd_lines = r_config_cmd_lines.read() - 1; 1737 r_config_rsp_lines = r_config_rsp_lines.read() - 1; 1738 r_config_address = r_config_address.read() + (m_words<<2); 1739 r_config_fsm = CONFIG_LOOP; 1594 1740 1595 1741 #if DEBUG_MEMC_CONFIG 1596 1742 if(m_debug) 1597 std::cout << " <MEMC " << name() << " CONFIG_ DIR_IVT_LOCK>"1743 std::cout << " <MEMC " << name() << " CONFIG_IVT_LOCK>" 1598 1744 << " No copies in L1 : inval DIR entry" << std::endl; 1599 1745 #endif … … 1626 1772 r_config_ivt_index = index; 1627 1773 if ( broadcast ) r_config_fsm = CONFIG_BC_SEND; 1628 else r_config_fsm = CONFIG_INV _SEND;1774 else r_config_fsm = CONFIG_INVAL_SEND; 1629 1775 1630 1776 #if DEBUG_MEMC_CONFIG 1631 1777 if(m_debug) 1632 std::cout << " <MEMC " << name() << " CONFIG_ DIR_IVT_LOCK>"1778 std::cout << " <MEMC " << name() << " CONFIG_IVT_LOCK>" 1633 1779 << " Inval DIR entry and register inval in IVT" 1634 << " :index = " << std::dec << index1780 << " / index = " << std::dec << index 1635 1781 << " / broadcast = " << broadcast << std::endl; 1636 1782 #endif … … 1642 1788 #if DEBUG_MEMC_CONFIG 1643 1789 if(m_debug) 1644 std::cout << " <MEMC " << name() << " CONFIG_ DIR_IVT_LOCK>"1790 std::cout << " <MEMC " << name() << " CONFIG_IVT_LOCK>" 1645 1791 << " IVT full : release DIR & IVT locks and retry" << std::endl; 1646 1792 #endif … … 1656 1802 not r_config_to_cc_send_brdcast_req.read() ) 1657 1803 { 1804 // post bc inval request 1658 1805 r_config_to_cc_send_multi_req = false; 1659 1806 r_config_to_cc_send_brdcast_req = true; 1660 1807 r_config_to_cc_send_trdid = r_config_ivt_index.read(); 1661 1808 r_config_to_cc_send_nline = m_nline[(addr_t)(r_config_address.read())]; 1662 r_cleanup_to_config_ack = false; 1663 r_config_fsm = CONFIG_BC_WAIT; 1809 1810 // prepare next iteration 1811 r_config_cmd_lines = r_config_cmd_lines.read() - 1; 1812 r_config_address = r_config_address.read() + (m_words<<2); 1813 r_config_fsm = CONFIG_LOOP; 1664 1814 1665 1815 #if DEBUG_MEMC_CONFIG … … 1672 1822 break; 1673 1823 } 1674 //////////////////// 1675 case CONFIG_BC_WAIT: // wait broadcast completion to return to LOOP 1676 { 1677 if ( r_cleanup_to_config_ack.read() ) 1678 { 1679 r_config_fsm = CONFIG_LOOP; 1680 r_config_nlines = r_config_nlines.read() - 1; 1681 r_config_address = r_config_address.read() + (m_words<<2); 1682 } 1683 1684 #if DEBUG_MEMC_CONFIG 1685 if(m_debug) 1686 std::cout << " <MEMC " << name() << " CONFIG_BC_WAIT> Waiting BC completion " 1687 << " done = " << r_cleanup_to_config_ack.read() 1688 << std::endl; 1689 #endif 1690 break; 1691 } 1692 ///////////////////// 1693 case CONFIG_INV_SEND: // Post a multi inval request to CC_SEND FSM 1824 /////////////////////// 1825 case CONFIG_INVAL_SEND: // Post a multi inval request to CC_SEND FSM 1694 1826 { 1695 1827 if( not r_config_to_cc_send_multi_req.read() and 1696 1828 not r_config_to_cc_send_brdcast_req.read() ) 1697 1829 { 1830 // post multi inval request 1698 1831 r_config_to_cc_send_multi_req = true; 1699 1832 r_config_to_cc_send_brdcast_req = false; 1700 1833 r_config_to_cc_send_trdid = r_config_ivt_index.read(); 1701 1834 r_config_to_cc_send_nline = m_nline[(addr_t)(r_config_address.read())]; 1702 r_multi_ack_to_config_ack = false; 1703 1835 1836 // post data into FIFO 1704 1837 config_to_cc_send_fifo_srcid = r_config_dir_copy_srcid.read(); 1705 1838 config_to_cc_send_fifo_inst = r_config_dir_copy_inst.read(); 1706 1839 config_to_cc_send_fifo_put = true; 1707 1840 1708 if ( r_config_dir_count.read() == 1 ) r_config_fsm = CONFIG_INV_WAIT; 1709 else r_config_fsm = CONFIG_HEAP_REQ; 1841 if ( r_config_dir_count.read() == 1 ) // one copy 1842 { 1843 // prepare next iteration 1844 r_config_cmd_lines = r_config_cmd_lines.read() - 1; 1845 r_config_address = r_config_address.read() + (m_words<<2); 1846 r_config_fsm = CONFIG_LOOP; 1847 } 1848 else // several copies 1849 { 1850 r_config_fsm = CONFIG_HEAP_REQ; 1851 } 1710 1852 1711 1853 #if DEBUG_MEMC_CONFIG 1712 1854 if(m_debug) 1713 std::cout << " <MEMC " << name() << " CONFIG_INV _SEND>"1855 std::cout << " <MEMC " << name() << " CONFIG_INVAL_SEND>" 1714 1856 << " Post multi inval request to CC_SEND FSM" 1715 1857 << " / address = " << std::hex << r_config_address.read() … … 1726 1868 { 1727 1869 r_config_fsm = CONFIG_HEAP_SCAN; 1728 r_config_heap_next = r_config_dir_ next_ptr.read();1870 r_config_heap_next = r_config_dir_ptr.read(); 1729 1871 } 1730 1872 … … 1773 1915 if ( m_heap.is_full() ) 1774 1916 { 1775 last_entry.next = r_config_dir_ next_ptr.read();1917 last_entry.next = r_config_dir_ptr.read(); 1776 1918 m_heap.unset_full(); 1777 1919 } … … 1781 1923 } 1782 1924 1783 m_heap.write_free_ptr( r_config_dir_ next_ptr.read() );1925 m_heap.write_free_ptr( r_config_dir_ptr.read() ); 1784 1926 m_heap.write( r_config_heap_next.read(), last_entry ); 1785 r_config_fsm = CONFIG_INV_WAIT; 1927 1928 // prepare next iteration 1929 r_config_cmd_lines = r_config_cmd_lines.read() - 1; 1930 r_config_address = r_config_address.read() + (m_words<<2); 1931 r_config_fsm = CONFIG_LOOP; 1786 1932 1787 1933 #if DEBUG_MEMC_CONFIG … … 1791 1937 #endif 1792 1938 break; 1793 }1794 /////////////////////1795 case CONFIG_INV_WAIT: // wait inval completion to return to LOOP1796 {1797 if ( r_multi_ack_to_config_ack.read() )1798 {1799 r_config_fsm = CONFIG_LOOP;1800 r_config_nlines = r_config_nlines.read() - 1;1801 r_config_address = r_config_address.read() + (m_words<<2);1802 }1803 1804 #if DEBUG_MEMC_CONFIG1805 if(m_debug)1806 std::cout << " <MEMC " << name() << " CONFIG_INV_WAIT> Waiting inval completion "1807 << " done = " << r_multi_ack_to_config_ack.read()1808 << std::endl;1809 #endif1810 break;1811 }1812 1813 ////////////////1814 case CONFIG_RSP: // request TGT_RSP FSM to return response1815 {1816 if ( not r_config_to_tgt_rsp_req.read() )1817 {1818 r_config_to_tgt_rsp_srcid = r_config_srcid.read();1819 r_config_to_tgt_rsp_trdid = r_config_trdid.read();1820 r_config_to_tgt_rsp_pktid = r_config_pktid.read();1821 r_config_to_tgt_rsp_error = false;1822 r_config_to_tgt_rsp_req = true;1823 r_config_fsm = CONFIG_IDLE;1824 1825 #if DEBUG_MEMC_CONFIG1826 if(m_debug)1827 std::cout << " <MEMC " << name() << " CONFIG_RSP> Request TGT_RSP FSM to return response:"1828 << " error = " << r_config_to_tgt_rsp_error.read()1829 << " / rsrcid = " << std::hex << r_config_srcid.read() << std::endl;1830 #endif1831 }1832 break;1833 1834 1939 } 1835 1940 } // end switch r_config_fsm … … 1858 1963 //////////////////////////////////////////////////////////////////////////////////// 1859 1964 1965 //std::cout << std::endl << "read_fsm" << std::endl; 1966 1860 1967 switch(r_read_fsm.read()) 1861 1968 { … … 1863 1970 case READ_IDLE: // waiting a read request 1864 1971 { 1865 if(m_cmd_read_addr_fifo.rok())1866 {1972 if(m_cmd_read_addr_fifo.rok()) 1973 { 1867 1974 1868 1975 #if DEBUG_MEMC_READ 1869 if(m_debug) 1870 std::cout << " <MEMC " << name() << " READ_IDLE> Read request" 1871 << " : address = " << std::hex << m_cmd_read_addr_fifo.read() 1872 << " / srcid = " << m_cmd_read_srcid_fifo.read() 1873 << " / trdid = " << m_cmd_read_trdid_fifo.read() 1874 << " / pktid = " << m_cmd_read_pktid_fifo.read() 1875 << " / nwords = " << std::dec << m_cmd_read_length_fifo.read() << std::endl; 1876 #endif 1877 r_read_fsm = READ_DIR_REQ; 1878 } 1879 break; 1880 } 1881 1976 if(m_debug) 1977 std::cout << " <MEMC " << name() << " READ_IDLE> Read request" 1978 << " : address = " << std::hex << m_cmd_read_addr_fifo.read() 1979 << " / srcid = " << m_cmd_read_srcid_fifo.read() 1980 << " / trdid = " << m_cmd_read_trdid_fifo.read() 1981 << " / pktid = " << m_cmd_read_pktid_fifo.read() 1982 << " / nwords = " << std::dec << m_cmd_read_length_fifo.read() << std::endl; 1983 #endif 1984 r_read_fsm = READ_DIR_REQ; 1985 } 1986 break; 1987 } 1882 1988 ////////////////// 1883 1989 case READ_DIR_REQ: // Get the lock to the directory 1884 1990 { 1885 if(r_alloc_dir_fsm.read() == ALLOC_DIR_READ)1886 {1887 r_read_fsm = READ_DIR_LOCK;1888 }1991 if(r_alloc_dir_fsm.read() == ALLOC_DIR_READ) 1992 { 1993 r_read_fsm = READ_DIR_LOCK; 1994 } 1889 1995 1890 1996 #if DEBUG_MEMC_READ … … 1892 1998 std::cout << " <MEMC " << name() << " READ_DIR_REQ> Requesting DIR lock " << std::endl; 1893 1999 #endif 1894 break;2000 break; 1895 2001 } 1896 2002 … … 1898 2004 case READ_DIR_LOCK: // check directory for hit / miss 1899 2005 { 1900 if(r_alloc_dir_fsm.read() == ALLOC_DIR_READ) 1901 { 2006 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_READ) and 2007 "MEMC ERROR in READ_DIR_LOCK state: Bad DIR allocation"); 2008 1902 2009 size_t way = 0; 1903 DirectoryEntry entry = 1904 m_cache_directory.read(m_cmd_read_addr_fifo.read(), way); 2010 DirectoryEntry entry = m_cache_directory.read(m_cmd_read_addr_fifo.read(), way); 2011 1905 2012 // access the global table ONLY when we have an LL cmd 1906 2013 if((m_cmd_read_pktid_fifo.read() & 0x7) == TYPE_LL) 1907 2014 { 1908 r_read_ll_key = m_llsc_table.ll(m_cmd_read_addr_fifo.read());2015 r_read_ll_key = m_llsc_table.ll(m_cmd_read_addr_fifo.read()); 1909 2016 } 1910 2017 r_read_is_cnt = entry.is_cnt; … … 1915 2022 r_read_count = entry.count; 1916 2023 r_read_copy = entry.owner.srcid; 1917 1918 #if L1_MULTI_CACHE1919 r_read_copy_cache = entry.owner.cache_id;1920 #endif1921 2024 r_read_copy_inst = entry.owner.inst; 1922 2025 r_read_ptr = entry.ptr; // pointer to the heap … … 1928 2031 if(entry.valid) // hit 1929 2032 { 1930 // test if we need to register a new copy in the heap1931 if(entry.is_cnt or (entry.count == 0) or !cached_read)1932 {1933 r_read_fsm = READ_DIR_HIT;1934 }1935 else1936 {1937 r_read_fsm = READ_HEAP_REQ;1938 }2033 // test if we need to register a new copy in the heap 2034 if(entry.is_cnt or (entry.count == 0) or !cached_read) 2035 { 2036 r_read_fsm = READ_DIR_HIT; 2037 } 2038 else 2039 { 2040 r_read_fsm = READ_HEAP_REQ; 2041 } 1939 2042 } 1940 2043 else // miss 1941 2044 { 1942 r_read_fsm = READ_TRT_LOCK;2045 r_read_fsm = READ_TRT_LOCK; 1943 2046 } 1944 2047 … … 1955 2058 } 1956 2059 #endif 1957 } 1958 else 1959 { 1960 std::cout << "VCI_MEM_CACHE ERROR " << name() << " READ_DIR_LOCK state" 1961 << "Bad DIR allocation" << std::endl; 1962 exit(0); 1963 } 1964 break; 1965 } 1966 2060 break; 2061 } 1967 2062 ////////////////// 1968 2063 case READ_DIR_HIT: // read data in cache & update the directory … … 1973 2068 1974 2069 { 1975 if(r_alloc_dir_fsm.read() == ALLOC_DIR_READ) 1976 { 2070 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_READ) and 2071 "MEMC ERROR in READ_DIR_HIT state: Bad DIR allocation"); 2072 1977 2073 // check if this is an instruction read, this means pktid is either 1978 2074 // TYPE_READ_INS_UNC 0bX010 with TSAR encoding … … 1991 2087 m_cache_data.read_line(way, set, r_read_data); 1992 2088 1993 if(m_monitor_ok) check_monitor( m_cmd_read_addr_fifo.read(), r_read_data[0], true);1994 1995 2089 // update the cache directory 1996 2090 DirectoryEntry entry; … … 2004 2098 if(cached_read) // Cached read => we must update the copies 2005 2099 { 2006 if(!is_cnt) // Not counter mode 2007 { 2008 entry.owner.srcid = m_cmd_read_srcid_fifo.read(); 2009 #if L1_MULTI_CACHE 2010 entry.owner.cache_id = m_cmd_read_pktid_fifo.read(); 2011 #endif 2012 entry.owner.inst = inst_read; 2013 entry.count = r_read_count.read() + 1; 2014 } 2015 else // Counter mode 2016 { 2017 entry.owner.srcid = 0; 2018 #if L1_MULTI_CACHE 2019 entry.owner.cache_id = 0; 2020 #endif 2021 entry.owner.inst = false; 2022 entry.count = r_read_count.read() + 1; 2023 } 2100 if(!is_cnt) // Not counter mode 2101 { 2102 entry.owner.srcid = m_cmd_read_srcid_fifo.read(); 2103 entry.owner.inst = inst_read; 2104 entry.count = r_read_count.read() + 1; 2105 } 2106 else // Counter mode 2107 { 2108 entry.owner.srcid = 0; 2109 entry.owner.inst = false; 2110 entry.count = r_read_count.read() + 1; 2111 } 2024 2112 } 2025 2113 else // Uncached read 2026 2114 { 2027 entry.owner.srcid = r_read_copy.read(); 2028 #if L1_MULTI_CACHE 2029 entry.owner.cache_id = r_read_copy_cache.read(); 2030 #endif 2031 entry.owner.inst = r_read_copy_inst.read(); 2032 entry.count = r_read_count.read(); 2115 entry.owner.srcid = r_read_copy.read(); 2116 entry.owner.inst = r_read_copy_inst.read(); 2117 entry.count = r_read_count.read(); 2033 2118 } 2034 2119 2035 2120 #if DEBUG_MEMC_READ 2036 if(m_debug) 2037 std::cout << " <MEMC " << name() << " READ_DIR_HIT> Update directory entry:" 2038 << " addr = " << std::hex << m_cmd_read_addr_fifo.read() 2039 << " / set = " << std::dec << set 2040 << " / way = " << way 2041 << " / owner_id = " << std::hex << entry.owner.srcid 2042 << " / owner_ins = " << std::dec << entry.owner.inst 2043 << " / count = " << entry.count 2044 << " / is_cnt = " << entry.is_cnt << std::endl; 2045 #endif 2046 2121 if(m_debug) 2122 std::cout << " <MEMC " << name() << " READ_DIR_HIT> Update directory entry:" 2123 << " addr = " << std::hex << m_cmd_read_addr_fifo.read() 2124 << " / set = " << std::dec << set 2125 << " / way = " << way 2126 << " / owner_id = " << std::hex << entry.owner.srcid 2127 << " / owner_ins = " << std::dec << entry.owner.inst 2128 << " / count = " << entry.count 2129 << " / is_cnt = " << entry.is_cnt << std::endl; 2130 #endif 2047 2131 m_cache_directory.write(set, way, entry); 2048 2132 r_read_fsm = READ_RSP; 2049 } 2050 break; 2133 break; 2051 2134 } 2052 2135 /////////////////// … … 2080 2163 2081 2164 m_cache_data.read_line(way, set, r_read_data); 2082 2083 if(m_monitor_ok) check_monitor( m_cmd_read_addr_fifo.read(), r_read_data[0], true);2084 2165 2085 2166 // update the cache directory … … 2095 2176 { 2096 2177 entry.owner.srcid = r_read_copy.read(); 2097 #if L1_MULTI_CACHE2098 entry.owner.cache_id = r_read_copy_cache.read();2099 #endif2100 2178 entry.owner.inst = r_read_copy_inst.read(); 2101 2179 entry.ptr = m_heap.next_free_ptr(); // set pointer on the heap … … 2104 2182 { 2105 2183 entry.owner.srcid = 0; 2106 #if L1_MULTI_CACHE2107 entry.owner.cache_id = 0;2108 #endif2109 2184 entry.owner.inst = false; 2110 2185 entry.ptr = 0; … … 2172 2247 HeapEntry heap_entry; 2173 2248 heap_entry.owner.srcid = m_cmd_read_srcid_fifo.read(); 2174 #if L1_MULTI_CACHE2175 heap_entry.owner.cache_id = m_cmd_read_pktid_fifo.read();2176 #endif2177 2249 heap_entry.owner.inst = ((m_cmd_read_pktid_fifo.read() & 0x2) != 0); 2178 2250 … … 2238 2310 HeapEntry last_entry; 2239 2311 last_entry.owner.srcid = 0; 2240 #if L1_MULTI_CACHE2241 last_entry.owner.cache_id = 0;2242 #endif2243 2312 last_entry.owner.inst = false; 2244 2313 … … 2266 2335 case READ_RSP: // request the TGT_RSP FSM to return data 2267 2336 { 2268 if(!r_read_to_tgt_rsp_req)2269 {2270 for(size_t i=0 ; i<m_words ; i++) r_read_to_tgt_rsp_data[i] = r_read_data[i];2271 r_read_to_tgt_rsp_word = m_x[(addr_t) m_cmd_read_addr_fifo.read()];2272 r_read_to_tgt_rsp_length = m_cmd_read_length_fifo.read();2273 r_read_to_tgt_rsp_srcid = m_cmd_read_srcid_fifo.read();2274 r_read_to_tgt_rsp_trdid = m_cmd_read_trdid_fifo.read();2275 r_read_to_tgt_rsp_pktid = m_cmd_read_pktid_fifo.read();2276 r_read_to_tgt_rsp_ll_key = r_read_ll_key.read();2277 cmd_read_fifo_get = true;2278 r_read_to_tgt_rsp_req = true;2279 r_read_fsm = READ_IDLE;2337 if(!r_read_to_tgt_rsp_req) 2338 { 2339 for(size_t i=0 ; i<m_words ; i++) r_read_to_tgt_rsp_data[i] = r_read_data[i]; 2340 r_read_to_tgt_rsp_word = m_x[(addr_t) m_cmd_read_addr_fifo.read()]; 2341 r_read_to_tgt_rsp_length = m_cmd_read_length_fifo.read(); 2342 r_read_to_tgt_rsp_srcid = m_cmd_read_srcid_fifo.read(); 2343 r_read_to_tgt_rsp_trdid = m_cmd_read_trdid_fifo.read(); 2344 r_read_to_tgt_rsp_pktid = m_cmd_read_pktid_fifo.read(); 2345 r_read_to_tgt_rsp_ll_key = r_read_ll_key.read(); 2346 cmd_read_fifo_get = true; 2347 r_read_to_tgt_rsp_req = true; 2348 r_read_fsm = READ_IDLE; 2280 2349 2281 2350 #if DEBUG_MEMC_READ … … 2286 2355 << " / nwords = " << std::dec << m_cmd_read_length_fifo.read() << std::endl; 2287 2356 #endif 2288 }2289 break;2357 } 2358 break; 2290 2359 } 2291 2360 /////////////////// 2292 2361 case READ_TRT_LOCK: // read miss : check the Transaction Table 2293 2362 { 2294 if(r_alloc_trt_fsm.read() == ALLOC_TRT_READ)2295 {2296 size_t index = 0;2297 addr_t addr = (addr_t) m_cmd_read_addr_fifo.read();2298 bool hit_read = m_trt.hit_read(m_nline[addr], index);2299 bool hit_write = m_trt.hit_write(m_nline[addr]);2300 bool wok = !m_trt.full(index);2301 2302 if(hit_read or !wok or hit_write) // missingline already requested or no space2303 {2304 if(!wok)m_cpt_trt_full++;2305 if(hit_read or hit_write) m_cpt_trt_rb++;2306 r_read_fsm = READ_IDLE;2307 }2308 else // missing line is requested to the XRAM2309 {2310 m_cpt_read_miss++;2311 r_read_trt_index = index;2312 r_read_fsm = READ_TRT_SET;2313 }2363 if(r_alloc_trt_fsm.read() == ALLOC_TRT_READ) 2364 { 2365 size_t index = 0; 2366 addr_t addr = (addr_t) m_cmd_read_addr_fifo.read(); 2367 bool hit_read = m_trt.hit_read(m_nline[addr], index); 2368 bool hit_write = m_trt.hit_write(m_nline[addr]); 2369 bool wok = not m_trt.full(index); 2370 2371 if(hit_read or !wok or hit_write) // line already requested or no space 2372 { 2373 if(!wok) m_cpt_trt_full++; 2374 if(hit_read or hit_write) m_cpt_trt_rb++; 2375 r_read_fsm = READ_IDLE; 2376 } 2377 else // missing line is requested to the XRAM 2378 { 2379 m_cpt_read_miss++; 2380 r_read_trt_index = index; 2381 r_read_fsm = READ_TRT_SET; 2382 } 2314 2383 2315 2384 #if DEBUG_MEMC_READ … … 2320 2389 << " / full = " << !wok << std::endl; 2321 2390 #endif 2322 } 2323 break; 2324 } 2325 2391 } 2392 break; 2393 } 2326 2394 ////////////////// 2327 2395 case READ_TRT_SET: // register get transaction in TRT 2328 2396 { 2329 if(r_alloc_trt_fsm.read() == ALLOC_TRT_READ)2330 {2331 m_trt.set(r_read_trt_index.read(),2332 true,2333 2334 2335 2336 2337 true,2338 2339 2340 2341 2342 r_read_ll_key.read());2397 if(r_alloc_trt_fsm.read() == ALLOC_TRT_READ) 2398 { 2399 m_trt.set( r_read_trt_index.read(), 2400 true, // GET 2401 m_nline[(addr_t)(m_cmd_read_addr_fifo.read())], 2402 m_cmd_read_srcid_fifo.read(), 2403 m_cmd_read_trdid_fifo.read(), 2404 m_cmd_read_pktid_fifo.read(), 2405 true, // proc read 2406 m_cmd_read_length_fifo.read(), 2407 m_x[(addr_t)(m_cmd_read_addr_fifo.read())], 2408 std::vector<be_t> (m_words,0), 2409 std::vector<data_t> (m_words,0), 2410 r_read_ll_key.read() ); 2343 2411 #if DEBUG_MEMC_READ 2344 2412 if(m_debug) 2345 std::cout << " <MEMC " << name() << " READ_TRT_SET> Write in Transaction Table:"2413 std::cout << " <MEMC " << name() << " READ_TRT_SET> Set a GET in TRT:" 2346 2414 << " address = " << std::hex << m_cmd_read_addr_fifo.read() 2347 2415 << " / srcid = " << std::hex << m_cmd_read_srcid_fifo.read() << std::endl; 2348 2416 #endif 2349 r_read_fsm = READ_TRT_REQ;2350 }2351 break;2417 r_read_fsm = READ_TRT_REQ; 2418 } 2419 break; 2352 2420 } 2353 2421 … … 2355 2423 case READ_TRT_REQ: // consume the read request in FIFO and send it to IXR_CMD_FSM 2356 2424 { 2357 if(not r_read_to_ixr_cmd_req) 2358 { 2359 cmd_read_fifo_get = true; 2360 r_read_to_ixr_cmd_req = true; 2361 r_read_to_ixr_cmd_nline = m_nline[(addr_t)(m_cmd_read_addr_fifo.read())]; 2362 r_read_to_ixr_cmd_trdid = r_read_trt_index.read(); 2363 r_read_fsm = READ_IDLE; 2425 if(not r_read_to_ixr_cmd_req) 2426 { 2427 cmd_read_fifo_get = true; 2428 r_read_to_ixr_cmd_req = true; 2429 r_read_to_ixr_cmd_index = r_read_trt_index.read(); 2430 r_read_fsm = READ_IDLE; 2364 2431 2365 2432 #if DEBUG_MEMC_READ … … 2368 2435 << std::hex << m_cmd_read_addr_fifo.read() << std::endl; 2369 2436 #endif 2370 }2371 break;2437 } 2438 break; 2372 2439 } 2373 2440 } // end switch read_fsm … … 2387 2454 // If the data is cached by other processors, a coherence transaction must 2388 2455 // be launched (sc requests always require a coherence transaction): 2389 // It is a multicast update if the line is not in counter mode , andthe processor2456 // It is a multicast update if the line is not in counter mode: the processor 2390 2457 // takes the lock protecting the Update Table (UPT) to register this transaction. 2391 // It is a broadcast invalidate if the line is in counter mode.2392 2458 // If the UPT is full, it releases the lock(s) and retry. Then, it sends 2393 2459 // a multi-update request to all owners of the line (but the writer), … … 2395 2461 // does not respond to the writing processor, as this response will be sent by 2396 2462 // the MULTI_ACK FSM when all update responses have been received. 2463 // It is a broadcast invalidate if the line is in counter mode: The line 2464 // should be erased in memory cache, and written in XRAM with a PUT transaction, 2465 // after registration in TRT. 2397 2466 // 2398 2467 // - In case of MISS, the WRITE FSM takes the lock protecting the transaction 2399 2468 // table (TRT). If a read transaction to the XRAM for this line already exists, 2400 2469 // it writes in the TRT (write buffer). Otherwise, if a TRT entry is free, 2401 // the WRITE FSM register a new transaction in TRT, and sends a read linerequest2470 // the WRITE FSM register a new transaction in TRT, and sends a GET request 2402 2471 // to the XRAM. If the TRT is full, it releases the lock, and waits. 2403 2472 // Finally, the WRITE FSM returns an aknowledge response to the writing processor. 2404 2473 ///////////////////////////////////////////////////////////////////////////////////// 2474 2475 //std::cout << std::endl << "write_fsm" << std::endl; 2405 2476 2406 2477 switch(r_write_fsm.read()) … … 2409 2480 case WRITE_IDLE: // copy first word of a write burst in local buffer 2410 2481 { 2411 if(m_cmd_write_addr_fifo.rok()) 2412 { 2413 if((m_cmd_write_pktid_fifo.read() & 0x7) == TYPE_SC) 2414 m_cpt_sc++; 2415 else 2416 { 2417 m_cpt_write++; 2418 m_cpt_write_cells++; 2419 } 2420 2421 // consume a word in the FIFO & write it in the local buffer 2422 cmd_write_fifo_get = true; 2423 size_t index = m_x[(addr_t)(m_cmd_write_addr_fifo.read())]; 2424 2425 r_write_address = (addr_t)(m_cmd_write_addr_fifo.read()); 2426 r_write_word_index = index; 2427 r_write_word_count = 1; 2428 r_write_data[index] = m_cmd_write_data_fifo.read(); 2429 r_write_srcid = m_cmd_write_srcid_fifo.read(); 2430 r_write_trdid = m_cmd_write_trdid_fifo.read(); 2431 r_write_pktid = m_cmd_write_pktid_fifo.read(); 2432 r_write_pending_sc = false; 2433 2434 // initialize the be field for all words 2435 for(size_t word=0 ; word<m_words ; word++) 2436 { 2437 if(word == index) r_write_be[word] = m_cmd_write_be_fifo.read(); 2438 else r_write_be[word] = 0x0; 2439 } 2440 2441 if (m_cmd_write_eop_fifo.read() or ((m_cmd_write_pktid_fifo.read() & 0x7) == TYPE_SC)) 2442 { 2443 r_write_fsm = WRITE_DIR_REQ; 2444 } 2445 else 2446 { 2447 r_write_fsm = WRITE_NEXT; 2448 } 2482 if(m_cmd_write_addr_fifo.rok()) 2483 { 2484 if((m_cmd_write_pktid_fifo.read() & 0x7) == TYPE_SC) 2485 { 2486 m_cpt_sc++; 2487 } 2488 else 2489 { 2490 m_cpt_write++; 2491 m_cpt_write_cells++; 2492 } 2493 2494 // consume a word in the FIFO & write it in the local buffer 2495 cmd_write_fifo_get = true; 2496 size_t index = m_x[(addr_t)(m_cmd_write_addr_fifo.read())]; 2497 2498 r_write_address = (addr_t)(m_cmd_write_addr_fifo.read()); 2499 r_write_word_index = index; 2500 r_write_word_count = 1; 2501 r_write_data[index] = m_cmd_write_data_fifo.read(); 2502 r_write_srcid = m_cmd_write_srcid_fifo.read(); 2503 r_write_trdid = m_cmd_write_trdid_fifo.read(); 2504 r_write_pktid = m_cmd_write_pktid_fifo.read(); 2505 r_write_pending_sc = false; 2506 2507 // initialize the be field for all words 2508 for(size_t word=0 ; word<m_words ; word++) 2509 { 2510 if(word == index) r_write_be[word] = m_cmd_write_be_fifo.read(); 2511 else r_write_be[word] = 0x0; 2512 } 2513 2514 if (m_cmd_write_eop_fifo.read() or ((m_cmd_write_pktid_fifo.read() & 0x7) == TYPE_SC)) 2515 { 2516 r_write_fsm = WRITE_DIR_REQ; 2517 } 2518 else 2519 { 2520 r_write_fsm = WRITE_NEXT; 2521 } 2449 2522 2450 2523 #if DEBUG_MEMC_WRITE … … 2455 2528 << " / data = " << m_cmd_write_data_fifo.read() << std::endl; 2456 2529 #endif 2457 } 2458 break; 2459 } 2460 2530 } 2531 break; 2532 } 2461 2533 //////////////// 2462 2534 case WRITE_NEXT: // copy next word of a write burst in local buffer 2463 2535 { 2464 if(m_cmd_write_addr_fifo.rok())2465 {2536 if(m_cmd_write_addr_fifo.rok()) 2537 { 2466 2538 2467 2539 #if DEBUG_MEMC_WRITE … … 2471 2543 << std::endl; 2472 2544 #endif 2473 m_cpt_write_cells++; 2474 2475 // check that the next word is in the same cache line 2476 if((m_nline[(addr_t)(r_write_address.read())] != 2477 m_nline[(addr_t)(m_cmd_write_addr_fifo.read())])) 2478 { 2479 std::cout << "VCI_MEM_CACHE ERROR " << name() << " WRITE_NEXT state" << std::endl 2480 << "all words in a write burst must be in same cache line" << std::endl; 2481 2482 exit(0); 2483 } 2484 2485 // consume a word in the FIFO & write it in the local buffer 2486 cmd_write_fifo_get = true; 2487 size_t index = r_write_word_index.read() + r_write_word_count.read(); 2488 2489 r_write_be[index] = m_cmd_write_be_fifo.read(); 2490 r_write_data[index] = m_cmd_write_data_fifo.read(); 2491 r_write_word_count = r_write_word_count.read() + 1; 2492 2493 if(m_cmd_write_eop_fifo.read()) 2494 { 2495 r_write_fsm = WRITE_DIR_REQ; 2496 } 2497 } 2498 break; 2499 } 2500 2501 //////////////////// 2502 case WRITE_DIR_REQ: 2503 { 2504 // Get the lock to the directory 2505 // and access the llsc_global_table 2506 if(r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE) 2507 { 2508 /////////////////////////////////////////////////////////////////////// 2509 // SC command treatment 2510 // We test the r_write_pending_sc register to know if we are returning 2511 // from the WAIT state. 2512 // In this case, the SC has already succeed and we cannot consume 2513 // another time from the FIFO. Also, we don't have to test another 2514 // time if the SC has succeed 2515 if(((r_write_pktid.read() & 0x7) == TYPE_SC) and not r_write_pending_sc.read()) 2516 { 2517 if(not m_cmd_write_addr_fifo.rok()) break; 2518 2519 assert(m_cmd_write_eop_fifo.read() and 2520 "Error in VCI_MEM_CACHE : " 2521 "invalid packet format for SC command"); 2522 2523 size_t index = r_write_word_index.read(); 2524 bool sc_success = m_llsc_table.sc(r_write_address.read() , 2545 m_cpt_write_cells++; 2546 2547 // check that the next word is in the same cache line 2548 assert( (m_nline[(addr_t)(r_write_address.read())] == 2549 m_nline[(addr_t)(m_cmd_write_addr_fifo.read())]) and 2550 "MEMC ERROR in WRITE_NEXT state: Illegal write burst"); 2551 2552 // consume a word in the FIFO & write it in the local buffer 2553 cmd_write_fifo_get = true; 2554 size_t index = r_write_word_index.read() + r_write_word_count.read(); 2555 2556 r_write_be[index] = m_cmd_write_be_fifo.read(); 2557 r_write_data[index] = m_cmd_write_data_fifo.read(); 2558 r_write_word_count = r_write_word_count.read() + 1; 2559 2560 if(m_cmd_write_eop_fifo.read()) r_write_fsm = WRITE_DIR_REQ; 2561 } 2562 break; 2563 } 2564 /////////////////// 2565 case WRITE_DIR_REQ: // Get the lock to the directory 2566 // and access the llsc_global_table 2567 { 2568 if( r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE ) 2569 { 2570 if(((r_write_pktid.read() & 0x7) == TYPE_SC) and not r_write_pending_sc.read()) 2571 { 2572 // We enter here if it is a new SC command 2573 // If r_write_pending_sc is set the SC is not new and has already been tested 2574 2575 if(not m_cmd_write_addr_fifo.rok()) break; 2576 2577 assert( m_cmd_write_eop_fifo.read() and 2578 "MEMC ERROR in WRITE_DIR_REQ state: invalid packet format for SC command"); 2579 2580 size_t index = r_write_word_index.read(); 2581 bool sc_success = m_llsc_table.sc(r_write_address.read() , 2525 2582 r_write_data[index].read()); 2526 2583 2527 // consume a word in the FIFO & write it in the local buffer 2528 cmd_write_fifo_get = true; 2529 r_write_data[index] = m_cmd_write_data_fifo.read(); 2530 r_write_sc_fail = not sc_success; 2531 r_write_pending_sc = true; 2532 2533 if(not sc_success) r_write_fsm = WRITE_RSP; 2534 else r_write_fsm = WRITE_DIR_LOCK; 2535 2536 break; 2537 } 2538 2539 /////////////////////////////////////////////////////////////////////// 2540 // WRITE command treatment or SC command returning from the WAIT state 2541 // In the second case, we must access the LL/SC global table to 2542 // erase any possible new reservation when we release the lock on the 2543 // directory 2544 m_llsc_table.sw(m_nline[(addr_t)r_write_address.read()],r_write_word_index.read(),r_write_word_index.read()+r_write_word_count.read()); 2545 2546 r_write_fsm = WRITE_DIR_LOCK; 2547 } 2584 // consume a word in the FIFO & write it in the local buffer 2585 cmd_write_fifo_get = true; 2586 r_write_data[index] = m_cmd_write_data_fifo.read(); 2587 r_write_sc_fail = not sc_success; 2588 r_write_pending_sc = true; 2589 2590 if(not sc_success) r_write_fsm = WRITE_RSP; 2591 else r_write_fsm = WRITE_DIR_LOCK; 2592 } 2593 else 2594 { 2595 // We enter here if it is a SW command or an already tested SC command 2596 2597 m_llsc_table.sw( m_nline[(addr_t)r_write_address.read()], 2598 r_write_word_index.read(), 2599 r_write_word_index.read() + r_write_word_count.read() ); 2600 2601 r_write_fsm = WRITE_DIR_LOCK; 2602 } 2548 2603 2549 2604 #if DEBUG_MEMC_WRITE … … 2552 2607 << std::endl; 2553 2608 #endif 2554 2555 break; 2556 } 2557 2609 } 2610 break; 2611 } 2558 2612 //////////////////// 2559 2613 case WRITE_DIR_LOCK: // access directory to check hit/miss 2560 2614 { 2561 if(r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE) 2562 { 2615 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE) and 2616 "MEMC ERROR in ALLOC_DIR_LOCK state: Bad DIR allocation"); 2617 2563 2618 size_t way = 0; 2564 2619 DirectoryEntry entry(m_cache_directory.read(r_write_address.read(), way)); … … 2566 2621 if(entry.valid) // hit 2567 2622 { 2568 // copy directory entry in local buffer in case of hit 2569 r_write_is_cnt = entry.is_cnt; 2570 r_write_lock = entry.lock; 2571 r_write_tag = entry.tag; 2572 r_write_copy = entry.owner.srcid; 2573 #if L1_MULTI_CACHE 2574 r_write_copy_cache = entry.owner.cache_id; 2575 #endif 2576 r_write_copy_inst = entry.owner.inst; 2577 r_write_count = entry.count; 2578 r_write_ptr = entry.ptr; 2579 r_write_way = way; 2580 2581 if(entry.is_cnt and entry.count) 2582 { 2583 r_write_fsm = WRITE_DIR_READ; 2584 } 2585 else 2586 { 2587 r_write_fsm = WRITE_DIR_HIT; 2588 } 2623 // copy directory entry in local buffer in case of hit 2624 r_write_is_cnt = entry.is_cnt; 2625 r_write_lock = entry.lock; 2626 r_write_tag = entry.tag; 2627 r_write_copy = entry.owner.srcid; 2628 r_write_copy_inst = entry.owner.inst; 2629 r_write_count = entry.count; 2630 r_write_ptr = entry.ptr; 2631 r_write_way = way; 2632 2633 if(entry.is_cnt and entry.count) r_write_fsm = WRITE_BC_DIR_READ; 2634 else r_write_fsm = WRITE_DIR_HIT; 2589 2635 } 2590 2636 else // miss 2591 2637 { 2592 r_write_fsm = WRITE_MISS_TRT_LOCK;2638 r_write_fsm = WRITE_MISS_TRT_LOCK; 2593 2639 } 2594 2640 … … 2607 2653 } 2608 2654 #endif 2609 } 2610 else 2611 { 2612 std::cout << "VCI_MEM_CACHE ERROR " << name() 2613 << " WRITE_DIR_LOCK state" << std::endl 2614 << "bad DIR allocation" << std::endl; 2615 2616 exit(0); 2617 } 2618 break; 2619 } 2620 //////////////////// 2621 case WRITE_DIR_READ: // read the cache and complete the buffer when be!=0xF 2622 { 2623 // update local buffer 2624 size_t set = m_y[(addr_t)(r_write_address.read())]; 2625 size_t way = r_write_way.read(); 2626 for(size_t word=0 ; word<m_words ; word++) 2627 { 2628 data_t mask = 0; 2629 if(r_write_be[word].read() & 0x1) mask = mask | 0x000000FF; 2630 if(r_write_be[word].read() & 0x2) mask = mask | 0x0000FF00; 2631 if(r_write_be[word].read() & 0x4) mask = mask | 0x00FF0000; 2632 if(r_write_be[word].read() & 0x8) mask = mask | 0xFF000000; 2633 2634 // complete only if mask is not null (for energy consumption) 2635 r_write_data[word] = (r_write_data[word].read() & mask) | 2636 (m_cache_data.read(way, set, word) & ~mask); 2637 2638 } // end for 2639 2640 // test if a coherence broadcast is required 2641 r_write_fsm = WRITE_BC_TRT_LOCK; 2642 2643 #if DEBUG_MEMC_WRITE 2644 if(m_debug) 2645 std::cout << " <MEMC " << name() << " WRITE_DIR_READ>" 2646 << " Read the cache to complete local buffer" << std::endl; 2647 #endif 2648 break; 2649 } 2650 2655 break; 2656 } 2651 2657 /////////////////// 2652 case WRITE_DIR_HIT: 2653 { 2654 // update the cache directory 2655 // update directory with Dirty bit 2656 DirectoryEntry entry; 2657 entry.valid = true; 2658 entry.dirty = true; 2659 entry.tag = r_write_tag.read(); 2660 entry.is_cnt = r_write_is_cnt.read(); 2661 entry.lock = r_write_lock.read(); 2662 entry.owner.srcid = r_write_copy.read(); 2663 #if L1_MULTI_CACHE 2664 entry.owner.cache_id = r_write_copy_cache.read(); 2665 #endif 2666 entry.owner.inst = r_write_copy_inst.read(); 2667 entry.count = r_write_count.read(); 2668 entry.ptr = r_write_ptr.read(); 2669 2670 size_t set = m_y[(addr_t)(r_write_address.read())]; 2671 size_t way = r_write_way.read(); 2672 2673 // update directory 2674 m_cache_directory.write(set, way, entry); 2675 2676 // owner is true when the the first registered copy is the writer itself 2677 bool owner = (((r_write_copy.read() == r_write_srcid.read()) 2678 #if L1_MULTI_CACHE 2679 and(r_write_copy_cache.read() ==r_write_pktid.read()) 2680 #endif 2681 ) and not r_write_copy_inst.read()); 2682 2683 // no_update is true when there is no need for coherence transaction 2684 // (tests for sc requests) 2685 bool no_update = ( (r_write_count.read() == 0) or 2658 case WRITE_DIR_HIT: // update the cache directory with Dirty bit 2659 // and update data cache 2660 { 2661 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE) and 2662 "MEMC ERROR in ALLOC_DIR_HIT state: Bad DIR allocation"); 2663 2664 DirectoryEntry entry; 2665 entry.valid = true; 2666 entry.dirty = true; 2667 entry.tag = r_write_tag.read(); 2668 entry.is_cnt = r_write_is_cnt.read(); 2669 entry.lock = r_write_lock.read(); 2670 entry.owner.srcid = r_write_copy.read(); 2671 entry.owner.inst = r_write_copy_inst.read(); 2672 entry.count = r_write_count.read(); 2673 entry.ptr = r_write_ptr.read(); 2674 2675 size_t set = m_y[(addr_t)(r_write_address.read())]; 2676 size_t way = r_write_way.read(); 2677 2678 // update directory 2679 m_cache_directory.write(set, way, entry); 2680 2681 // owner is true when the the first registered copy is the writer itself 2682 bool owner = ( (r_write_copy.read() == r_write_srcid.read()) 2683 and not r_write_copy_inst.read() ); 2684 2685 // no_update is true when there is no need for coherence transaction 2686 bool no_update = ( (r_write_count.read() == 0) or 2686 2687 (owner and (r_write_count.read() ==1) and 2687 2688 (r_write_pktid.read() != TYPE_SC))); 2688 2689 2689 // write data in the cache if no coherence transaction 2690 if(no_update) 2691 { 2692 for(size_t word=0 ; word<m_words ; word++) 2693 { 2694 m_cache_data.write(way, set, word, r_write_data[word].read(), r_write_be[word].read()); 2695 2696 if(m_monitor_ok) 2697 { 2698 addr_t address = (r_write_address.read() & ~(addr_t) 0x3F) | word<<2; 2699 check_monitor( address, r_write_data[word].read(), false); 2700 } 2701 } 2702 } 2703 2704 if(owner and not no_update and(r_write_pktid.read() != TYPE_SC)) 2705 { 2706 r_write_count = r_write_count.read() - 1; 2707 } 2708 2709 if(no_update) 2710 // Write transaction completed 2711 { 2712 r_write_fsm = WRITE_RSP; 2713 } 2714 else 2715 // coherence update required 2716 { 2717 if(!r_write_to_cc_send_multi_req.read() and 2718 !r_write_to_cc_send_brdcast_req.read()) 2719 { 2720 r_write_fsm = WRITE_UPT_LOCK; 2721 } 2722 else 2723 { 2724 r_write_fsm = WRITE_WAIT; 2725 } 2726 } 2690 // write data in the cache if no coherence transaction 2691 if(no_update) 2692 { 2693 for(size_t word=0 ; word<m_words ; word++) 2694 { 2695 m_cache_data.write( way, 2696 set, 2697 word, 2698 r_write_data[word].read(), 2699 r_write_be[word].read()); 2700 } 2701 } 2702 2703 if(owner and not no_update and(r_write_pktid.read() != TYPE_SC)) 2704 { 2705 r_write_count = r_write_count.read() - 1; 2706 } 2707 2708 if(no_update) // Write transaction completed 2709 { 2710 r_write_fsm = WRITE_RSP; 2711 } 2712 else // coherence update required 2713 { 2714 if(!r_write_to_cc_send_multi_req.read() and 2715 !r_write_to_cc_send_brdcast_req.read()) 2716 { 2717 r_write_fsm = WRITE_UPT_LOCK; 2718 } 2719 else 2720 { 2721 r_write_fsm = WRITE_WAIT; 2722 } 2723 } 2727 2724 2728 2725 #if DEBUG_MEMC_WRITE 2729 2726 if(m_debug) 2730 2727 { 2731 if(no_update) 2732 { 2733 std::cout << " <MEMC " << name() 2734 << " WRITE_DIR_HIT> Write into cache / No coherence transaction" 2735 << std::endl; 2736 } 2737 else 2738 { 2739 std::cout << " <MEMC " << name() << " WRITE_DIR_HIT> Coherence update required:" 2740 << " is_cnt = " << r_write_is_cnt.read() 2741 << " nb_copies = " << std::dec << r_write_count.read() << std::endl; 2742 if(owner) std::cout << " ... but the first copy is the writer" << std::endl; 2743 } 2728 if(no_update) 2729 { 2730 std::cout << " <MEMC " << name() 2731 << " WRITE_DIR_HIT> Write into cache / No coherence transaction" << std::endl; 2744 2732 } 2745 #endif 2746 break; 2733 else 2734 { 2735 std::cout << " <MEMC " << name() << " WRITE_DIR_HIT> Coherence update required:" 2736 << " is_cnt = " << r_write_is_cnt.read() 2737 << " nb_copies = " << std::dec << r_write_count.read() << std::endl; 2738 if(owner) std::cout << " ... but the first copy is the writer" << std::endl; 2739 } 2740 } 2741 #endif 2742 break; 2747 2743 } 2748 2744 //////////////////// 2749 2745 case WRITE_UPT_LOCK: // Try to register the update request in UPT 2750 2746 { 2751 if(r_alloc_upt_fsm.read() == ALLOC_UPT_WRITE) 2752 { 2753 bool wok = false; 2754 size_t index = 0; 2755 size_t srcid = r_write_srcid.read(); 2756 size_t trdid = r_write_trdid.read(); 2757 size_t pktid = r_write_pktid.read(); 2758 addr_t nline = m_nline[(addr_t)(r_write_address.read())]; 2759 size_t nb_copies = r_write_count.read(); 2760 size_t set = m_y[(addr_t)(r_write_address.read())]; 2761 size_t way = r_write_way.read(); 2762 2763 wok = m_upt.set(true, // it's an update transaction 2764 false, // it's not a broadcast 2765 true, // response required 2766 false, // no acknowledge required 2767 srcid, 2768 trdid, 2769 pktid, 2770 nline, 2771 nb_copies, 2772 index); 2773 if(wok) // write data in cache 2774 { 2775 for(size_t word=0 ; word<m_words ; word++) 2776 { 2777 m_cache_data.write(way, 2778 set, 2779 word, 2780 r_write_data[word].read(), 2781 r_write_be[word].read()); 2782 2783 if(m_monitor_ok) 2747 if(r_alloc_upt_fsm.read() == ALLOC_UPT_WRITE) 2748 { 2749 bool wok = false; 2750 size_t index = 0; 2751 size_t srcid = r_write_srcid.read(); 2752 size_t trdid = r_write_trdid.read(); 2753 size_t pktid = r_write_pktid.read(); 2754 addr_t nline = m_nline[(addr_t)(r_write_address.read())]; 2755 size_t nb_copies = r_write_count.read(); 2756 size_t set = m_y[(addr_t)(r_write_address.read())]; 2757 size_t way = r_write_way.read(); 2758 2759 wok = m_upt.set( true, // it's an update transaction 2760 false, // it's not a broadcast 2761 true, // response required 2762 false, // no acknowledge required 2763 srcid, 2764 trdid, 2765 pktid, 2766 nline, 2767 nb_copies, 2768 index); 2769 2770 if( wok ) // write data in cache 2784 2771 { 2785 addr_t address = (r_write_address.read() & ~(addr_t) 0x3F) | word<<2; 2786 check_monitor( address, r_write_data[word].read(), false); 2772 for(size_t word=0 ; word<m_words ; word++) 2773 { 2774 m_cache_data.write( way, 2775 set, 2776 word, 2777 r_write_data[word].read(), 2778 r_write_be[word].read()); 2779 } 2787 2780 } 2788 }2789 }2790 2781 2791 2782 #if DEBUG_MEMC_WRITE 2792 if(m_debug )2783 if(m_debug and wok) 2793 2784 { 2794 if(wok) 2795 { 2796 std::cout << " <MEMC " << name() 2797 << " WRITE_UPT_LOCK> Register the multicast update in UPT / " 2798 << " nb_copies = " << r_write_count.read() << std::endl; 2799 } 2785 std::cout << " <MEMC " << name() 2786 << " WRITE_UPT_LOCK> Register the multicast update in UPT / " 2787 << " nb_copies = " << r_write_count.read() << std::endl; 2800 2788 } 2801 2789 #endif 2802 r_write_upt_index = index;2803 //releases the lock protecting UPT and the DIR if no entry...2804 if(wok) r_write_fsm = WRITE_UPT_HEAP_LOCK;2805 else r_write_fsm = WRITE_WAIT;2806 }2807 break;2790 r_write_upt_index = index; 2791 // releases the lock protecting UPT and the DIR if no entry... 2792 if(wok) r_write_fsm = WRITE_UPT_HEAP_LOCK; 2793 else r_write_fsm = WRITE_WAIT; 2794 } 2795 break; 2808 2796 } 2809 2797 … … 2847 2835 for(size_t i=min ; i<max ; i++) r_write_to_cc_send_data[i] = r_write_data[i]; 2848 2836 2849 if((r_write_copy.read() != r_write_srcid.read()) or(r_write_pktid.read() == TYPE_SC) or 2850 #if L1_MULTI_CACHE 2851 (r_write_copy_cache.read() != r_write_pktid.read()) or 2852 #endif 2853 r_write_copy_inst.read()) 2837 if( (r_write_copy.read() != r_write_srcid.read()) or 2838 (r_write_pktid.read() == TYPE_SC) or r_write_copy_inst.read()) 2854 2839 { 2855 2840 // put the first srcid in the fifo … … 2857 2842 write_to_cc_send_fifo_inst = r_write_copy_inst.read(); 2858 2843 write_to_cc_send_fifo_srcid = r_write_copy.read(); 2859 #if L1_MULTI_CACHE2860 write_to_cc_send_fifo_cache_id= r_write_copy_cache.read();2861 #endif2862 2844 if(r_write_count.read() == 1) 2863 2845 { … … 2910 2892 bool dec_upt_counter; 2911 2893 2912 if(((entry.owner.srcid != r_write_srcid.read()) or (r_write_pktid.read() == TYPE_SC)) or 2913 #if L1_MULTI_CACHE 2914 (entry.owner.cache_id != r_write_pktid.read()) or 2915 #endif 2916 entry.owner.inst) // put the next srcid in the fifo 2894 // put the next srcid in the fifo 2895 if( (entry.owner.srcid != r_write_srcid.read()) or 2896 (r_write_pktid.read() == TYPE_SC) or entry.owner.inst) 2917 2897 { 2918 2898 dec_upt_counter = false; … … 2920 2900 write_to_cc_send_fifo_inst = entry.owner.inst; 2921 2901 write_to_cc_send_fifo_srcid = entry.owner.srcid; 2922 #if L1_MULTI_CACHE2923 write_to_cc_send_fifo_cache_id = entry.owner.cache_id;2924 #endif2925 2902 2926 2903 #if DEBUG_MEMC_WRITE … … 2992 2969 2993 2970 /////////////// 2994 case WRITE_RSP: 2995 { 2996 // Post a request to TGT_RSP FSM to acknowledge the write 2997 // In order to increase the Write requests throughput, 2998 // we don't wait to return in the IDLE state to consume 2999 // a new request in the write FIFO 3000 2971 case WRITE_RSP: // Post a request to TGT_RSP FSM to acknowledge the write 2972 // In order to increase the Write requests throughput, 2973 // we don't wait to return in the IDLE state to consume 2974 // a new request in the write FIFO 2975 { 3001 2976 if(!r_write_to_tgt_rsp_req.read()) 3002 2977 { … … 3086 3061 bool hit_read = m_trt.hit_read(m_nline[addr], hit_index); 3087 3062 bool hit_write = m_trt.hit_write(m_nline[addr]); 3088 bool wok = !m_trt.full(wok_index);3063 bool wok = not m_trt.full(wok_index); 3089 3064 3090 3065 if(hit_read) // register the modified data in TRT … … 3170 3145 data_vector.push_back(r_write_data[i]); 3171 3146 } 3172 m_trt.write_data_mask( r_write_trt_index.read(),3173 3174 data_vector);3147 m_trt.write_data_mask( r_write_trt_index.read(), 3148 be_vector, 3149 data_vector ); 3175 3150 r_write_fsm = WRITE_RSP; 3176 3151 … … 3182 3157 break; 3183 3158 } 3184 3185 3159 ///////////////////////// 3186 3160 case WRITE_MISS_XRAM_REQ: // send a GET request to IXR_CMD FSM 3187 3161 { 3188 if( !r_write_to_ixr_cmd_req)3162 if( not r_write_to_ixr_cmd_req.read() ) 3189 3163 { 3190 3164 r_write_to_ixr_cmd_req = true; 3191 r_write_to_ixr_cmd_write = false; 3192 r_write_to_ixr_cmd_nline = m_nline[(addr_t)(r_write_address.read())]; 3193 r_write_to_ixr_cmd_trdid = r_write_trt_index.read(); 3165 r_write_to_ixr_cmd_put = false; 3166 r_write_to_ixr_cmd_index = r_write_trt_index.read(); 3194 3167 r_write_fsm = WRITE_RSP; 3195 3168 … … 3201 3174 break; 3202 3175 } 3203 3204 3176 /////////////////////// 3205 case WRITE_BC_TRT_LOCK: // Check TRT not full 3206 { 3207 if(r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE) 3208 { 3209 size_t wok_index = 0; 3210 bool wok = !m_trt.full(wok_index); 3211 if(wok) // set a new entry in TRT 3212 { 3213 r_write_trt_index = wok_index; 3214 r_write_fsm = WRITE_BC_IVT_LOCK; 3215 } 3216 else // wait an empty entry in TRT 3217 { 3218 r_write_fsm = WRITE_WAIT; 3219 } 3177 case WRITE_BC_DIR_READ: // enter this state if a broadcast-inval is required 3178 // the cache line must be erased in mem-cache, and written 3179 // into XRAM. we read the cache and complete the buffer 3180 { 3181 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE) and 3182 "MEMC ERROR in WRITE_BC_DIR_READ state: Bad DIR allocation"); 3183 3184 // update local buffer 3185 size_t set = m_y[(addr_t)(r_write_address.read())]; 3186 size_t way = r_write_way.read(); 3187 for(size_t word=0 ; word<m_words ; word++) 3188 { 3189 data_t mask = 0; 3190 if(r_write_be[word].read() & 0x1) mask = mask | 0x000000FF; 3191 if(r_write_be[word].read() & 0x2) mask = mask | 0x0000FF00; 3192 if(r_write_be[word].read() & 0x4) mask = mask | 0x00FF0000; 3193 if(r_write_be[word].read() & 0x8) mask = mask | 0xFF000000; 3194 3195 // complete only if mask is not null (for energy consumption) 3196 r_write_data[word] = (r_write_data[word].read() & mask) | 3197 (m_cache_data.read(way, set, word) & ~mask); 3198 } // end for 3199 3200 r_write_fsm = WRITE_BC_TRT_LOCK; 3201 3202 #if DEBUG_MEMC_WRITE 3203 if(m_debug) 3204 std::cout << " <MEMC " << name() << " WRITE_BC_DIR_READ>" 3205 << " Read the cache to complete local buffer" << std::endl; 3206 #endif 3207 break; 3208 } 3209 /////////////////////// 3210 case WRITE_BC_TRT_LOCK: // get TRT lock to check TRT not full 3211 { 3212 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE) and 3213 "MEMC ERROR in WRITE_BC_TRT_LOCK state: Bad DIR allocation"); 3214 3215 if(r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE) 3216 { 3217 size_t wok_index = 0; 3218 bool wok = not m_trt.full(wok_index); 3219 if( wok ) 3220 { 3221 r_write_trt_index = wok_index; 3222 r_write_fsm = WRITE_BC_IVT_LOCK; 3223 } 3224 else // wait an empty slot in TRT 3225 { 3226 r_write_fsm = WRITE_WAIT; 3227 } 3220 3228 3221 3229 #if DEBUG_MEMC_WRITE … … 3224 3232 << " : wok = " << wok << " / index = " << wok_index << std::endl; 3225 3233 #endif 3226 } 3227 break; 3228 } 3229 3234 } 3235 break; 3236 } 3230 3237 ////////////////////// 3231 case WRITE_BC_IVT_LOCK: // register BC transaction in IVT 3232 { 3233 if(r_alloc_ivt_fsm.read() == ALLOC_IVT_WRITE) 3234 { 3235 bool wok = false; 3236 size_t index = 0; 3237 size_t srcid = r_write_srcid.read(); 3238 size_t trdid = r_write_trdid.read(); 3239 size_t pktid = r_write_pktid.read(); 3240 addr_t nline = m_nline[(addr_t)(r_write_address.read())]; 3241 size_t nb_copies = r_write_count.read(); 3242 3243 wok = m_ivt.set(false, // it's an inval transaction 3244 true, // it's a broadcast 3245 true, // response required 3246 false, // no acknowledge required 3247 srcid, 3248 trdid, 3249 pktid, 3250 nline, 3251 nb_copies, 3252 index); 3253 3238 case WRITE_BC_IVT_LOCK: // get IVT lock and register BC transaction in IVT 3239 { 3240 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE) and 3241 "MEMC ERROR in WRITE_BC_IVT_LOCK state: Bad DIR allocation"); 3242 3243 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE) and 3244 "MEMC ERROR in WRITE_BC_IVT_LOCK state: Bad TRT allocation"); 3245 3246 if(r_alloc_ivt_fsm.read() == ALLOC_IVT_WRITE) 3247 { 3248 bool wok = false; 3249 size_t index = 0; 3250 size_t srcid = r_write_srcid.read(); 3251 size_t trdid = r_write_trdid.read(); 3252 size_t pktid = r_write_pktid.read(); 3253 addr_t nline = m_nline[(addr_t)(r_write_address.read())]; 3254 size_t nb_copies = r_write_count.read(); 3255 3256 wok = m_ivt.set(false, // it's an inval transaction 3257 true, // it's a broadcast 3258 true, // response required 3259 false, // no acknowledge required 3260 srcid, 3261 trdid, 3262 pktid, 3263 nline, 3264 nb_copies, 3265 index); 3254 3266 #if DEBUG_MEMC_WRITE 3255 3267 if( m_debug and wok ) … … 3257 3269 << " / nb_copies = " << r_write_count.read() << std::endl; 3258 3270 #endif 3259 r_write_upt_index = index; 3260 3261 if(wok) r_write_fsm = WRITE_BC_DIR_INVAL; 3262 else r_write_fsm = WRITE_WAIT; 3263 } 3264 break; 3265 } 3266 3271 r_write_upt_index = index; 3272 3273 if( wok ) r_write_fsm = WRITE_BC_DIR_INVAL; 3274 else r_write_fsm = WRITE_WAIT; 3275 } 3276 break; 3277 } 3267 3278 //////////////////////// 3268 case WRITE_BC_DIR_INVAL: 3269 { 3270 // Register a put transaction to XRAM in TRT 3271 // and invalidate the line in directory 3272 if((r_alloc_trt_fsm.read() != ALLOC_TRT_WRITE) or 3273 (r_alloc_ivt_fsm.read() != ALLOC_IVT_WRITE) or 3274 (r_alloc_dir_fsm.read() != ALLOC_DIR_WRITE)) 3275 { 3276 std::cout << "VCI_MEM_CACHE ERROR " << name() << " WRITE_BC_DIR_INVAL state" << std::endl; 3277 std::cout << "bad TRT, DIR, or IVT allocation" << std::endl; 3278 exit(0); 3279 } 3280 3281 // register a write request to XRAM in TRT 3282 m_trt.set(r_write_trt_index.read(), 3283 false, // write request to XRAM 3284 m_nline[(addr_t)(r_write_address.read())], 3285 0, 3286 0, 3287 0, 3288 false, // not a processor read 3289 0, // not a single word 3290 0, // word index 3291 std::vector<be_t> (m_words,0), 3292 std::vector<data_t> (m_words,0)); 3293 3294 // invalidate directory entry 3295 DirectoryEntry entry; 3296 entry.valid = false; 3297 entry.dirty = false; 3298 entry.tag = 0; 3299 entry.is_cnt = false; 3300 entry.lock = false; 3301 entry.owner.srcid = 0; 3302 #if L1_MULTI_CACHE 3303 entry.owner.cache_id= 0; 3304 #endif 3305 entry.owner.inst = false; 3306 entry.ptr = 0; 3307 entry.count = 0; 3308 size_t set = m_y[(addr_t)(r_write_address.read())]; 3309 size_t way = r_write_way.read(); 3310 3311 m_cache_directory.write(set, way, entry); 3279 case WRITE_BC_DIR_INVAL: // Register a put transaction in TRT 3280 // and invalidate the line in directory 3281 { 3282 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE) and 3283 "MEMC ERROR in WRITE_BC_DIR_INVAL state: Bad DIR allocation"); 3284 3285 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE) and 3286 "MEMC ERROR in WRITE_BC_DIR_INVAL state: Bad TRT allocation"); 3287 3288 assert( (r_alloc_ivt_fsm.read() == ALLOC_IVT_WRITE) and 3289 "MEMC ERROR in WRITE_BC_DIR_INVAL state: Bad IVT allocation"); 3290 3291 // register PUT request in TRT 3292 std::vector<data_t> data_vector; 3293 data_vector.clear(); 3294 for(size_t i=0; i<m_words; i++) data_vector.push_back(r_write_data[i].read()); 3295 m_trt.set( r_write_trt_index.read(), 3296 false, // PUT request 3297 m_nline[(addr_t)(r_write_address.read())], 3298 0, // unused 3299 0, // unused 3300 0, // unused 3301 false, // not a processor read 3302 0, // unused 3303 0, // unused 3304 std::vector<be_t> (m_words,0), 3305 data_vector ); 3306 3307 // invalidate directory entry 3308 DirectoryEntry entry; 3309 entry.valid = false; 3310 entry.dirty = false; 3311 entry.tag = 0; 3312 entry.is_cnt = false; 3313 entry.lock = false; 3314 entry.owner.srcid = 0; 3315 entry.owner.inst = false; 3316 entry.ptr = 0; 3317 entry.count = 0; 3318 size_t set = m_y[(addr_t)(r_write_address.read())]; 3319 size_t way = r_write_way.read(); 3320 3321 m_cache_directory.write(set, way, entry); 3312 3322 3313 3323 #if DEBUG_MEMC_WRITE 3314 3324 if(m_debug) 3315 std::cout << " <MEMC " << name() << " WRITE_BC_DIR_INVAL> Inval idate the directory entry: @ ="3316 << r_write_address.read() << " / register the put transaction in TRT:"<< std::endl;3317 #endif 3318 r_write_fsm = WRITE_BC_CC_SEND;3319 break;3325 std::cout << " <MEMC " << name() << " WRITE_BC_DIR_INVAL> Inval DIR and register in TRT:" 3326 << " address = " << r_write_address.read() << std::endl; 3327 #endif 3328 r_write_fsm = WRITE_BC_CC_SEND; 3329 break; 3320 3330 } 3321 3331 … … 3323 3333 case WRITE_BC_CC_SEND: // Post a coherence broadcast request to CC_SEND FSM 3324 3334 { 3325 if(!r_write_to_cc_send_multi_req.read() and !r_write_to_cc_send_brdcast_req.read())3326 {3327 r_write_to_cc_send_multi_req = false;3328 r_write_to_cc_send_brdcast_req = true;3329 r_write_to_cc_send_trdid = r_write_upt_index.read();3330 r_write_to_cc_send_nline = m_nline[(addr_t)(r_write_address.read())];3331 r_write_to_cc_send_index = 0;3332 r_write_to_cc_send_count = 0;3333 3334 for(size_t i=0; i<m_words ; i++)3335 {3336 r_write_to_cc_send_be[i]=0;3337 r_write_to_cc_send_data[i] = 0;3338 }3339 r_write_fsm = WRITE_BC_XRAM_REQ;3335 if(!r_write_to_cc_send_multi_req.read() and !r_write_to_cc_send_brdcast_req.read()) 3336 { 3337 r_write_to_cc_send_multi_req = false; 3338 r_write_to_cc_send_brdcast_req = true; 3339 r_write_to_cc_send_trdid = r_write_upt_index.read(); 3340 r_write_to_cc_send_nline = m_nline[(addr_t)(r_write_address.read())]; 3341 r_write_to_cc_send_index = 0; 3342 r_write_to_cc_send_count = 0; 3343 3344 for(size_t i=0; i<m_words ; i++) // Ã quoi sert ce for? (AG) 3345 { 3346 r_write_to_cc_send_be[i]=0; 3347 r_write_to_cc_send_data[i] = 0; 3348 } 3349 r_write_fsm = WRITE_BC_XRAM_REQ; 3340 3350 3341 3351 #if DEBUG_MEMC_WRITE … … 3344 3354 << " WRITE_BC_CC_SEND> Post a broadcast request to CC_SEND FSM" << std::endl; 3345 3355 #endif 3346 }3347 break;3356 } 3357 break; 3348 3358 } 3349 3359 3350 3360 /////////////////////// 3351 case WRITE_BC_XRAM_REQ: // Post a put request to IXR_CMD FSM 3352 { 3353 if(!r_write_to_ixr_cmd_req) 3354 { 3355 r_write_to_ixr_cmd_req = true; 3356 r_write_to_ixr_cmd_write = true; 3357 r_write_to_ixr_cmd_nline = m_nline[(addr_t)(r_write_address.read())]; 3358 r_write_to_ixr_cmd_trdid = r_write_trt_index.read(); 3359 3360 for(size_t i=0; i<m_words; i++) r_write_to_ixr_cmd_data[i] = r_write_data[i]; 3361 3362 r_write_fsm = WRITE_IDLE; 3361 case WRITE_BC_XRAM_REQ: // Post a PUT request to IXR_CMD FSM 3362 { 3363 if( not r_write_to_ixr_cmd_req.read() ) 3364 { 3365 r_write_to_ixr_cmd_req = true; 3366 r_write_to_ixr_cmd_put = true; 3367 r_write_to_ixr_cmd_index = r_write_trt_index.read(); 3368 r_write_fsm = WRITE_IDLE; 3363 3369 3364 3370 #if DEBUG_MEMC_WRITE … … 3367 3373 << " WRITE_BC_XRAM_REQ> Post a put request to IXR_CMD FSM" << std::endl; 3368 3374 #endif 3369 }3370 break;3375 } 3376 break; 3371 3377 } 3372 3378 } // end switch r_write_fsm … … 3376 3382 /////////////////////////////////////////////////////////////////////// 3377 3383 // The IXR_CMD fsm controls the command packets to the XRAM : 3378 // It handles requests from the READ, WRITE, CAS, XRAM_RSP FSMs3379 // with a round-robin priority.3384 // It handles requests from 5 FSMs with a round-robin priority: 3385 // READ > WRITE > CAS > XRAM_RSP > CONFIG 3380 3386 // 3381 // - It sends a single flit VCI read request to the XRAM in case of MISS 3382 // posted by the READ, WRITE or CAS FSMs : the TRDID field contains 3383 // the Transaction Tab index. 3384 // The VCI response is a multi-flit packet : the N cells contain 3385 // the N data words. 3387 // - It sends a single flit VCI read to the XRAM in case of 3388 // GET request posted by the READ, WRITE or CAS FSMs. 3389 // - It sends a multi-flit VCI write in case of PUT request posted by 3390 // the XRAM_RSP, WRITE, CAS, or CONFIG FSMs. 3386 3391 // 3387 // - It sends a multi-flit VCI write when the XRAM_RSP FSM, WRITE FSM 3388 // or CAS FSM request to save a dirty line to the XRAM. 3389 // The VCI response is a single flit packet. 3392 // For each client, there is three steps: 3393 // - IXR_CMD_*_IDLE : round-robin allocation to a client 3394 // - IXR_CMD_*_TRT : access to TRT for address and data 3395 // - IXR_CMD_*_SEND : send the PUT or GET VCI command 3396 // 3397 // The address and data to be written (for a PUT) are stored in TRT. 3398 // The trdid field contains always the TRT entry index. 3390 3399 //////////////////////////////////////////////////////////////////////// 3400 3401 //std::cout << std::endl << "ixr_cmd_fsm" << std::endl; 3391 3402 3392 3403 switch(r_ixr_cmd_fsm.read()) 3393 3404 { 3394 /////////////////////// /3405 /////////////////////// 3395 3406 case IXR_CMD_READ_IDLE: 3396 3407 { 3397 if (r_write_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_WRITE; 3398 else if(r_cas_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_CAS; 3399 else if(r_xram_rsp_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_XRAM; 3400 else if(r_read_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_READ; 3408 if (r_write_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_WRITE_TRT; 3409 else if(r_cas_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_CAS_TRT; 3410 else if(r_xram_rsp_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_XRAM_TRT; 3411 else if(r_config_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_CONFIG_TRT; 3412 else if(r_read_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_READ_TRT; 3401 3413 break; 3402 3414 } … … 3404 3416 case IXR_CMD_WRITE_IDLE: 3405 3417 { 3406 if (r_cas_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_CAS; 3407 else if(r_xram_rsp_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_XRAM; 3408 else if(r_read_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_READ; 3409 else if(r_write_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_WRITE; 3418 if (r_cas_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_CAS_TRT; 3419 else if(r_xram_rsp_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_XRAM_TRT; 3420 else if(r_config_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_CONFIG_TRT; 3421 else if(r_read_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_READ_TRT; 3422 else if(r_write_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_WRITE_TRT; 3410 3423 break; 3411 3424 } 3425 ////////////////////// 3426 case IXR_CMD_CAS_IDLE: 3427 { 3428 if (r_xram_rsp_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_XRAM_TRT; 3429 else if(r_config_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_CONFIG_TRT; 3430 else if(r_read_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_READ_TRT; 3431 else if(r_write_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_WRITE_TRT; 3432 else if(r_cas_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_CAS_TRT; 3433 break; 3434 } 3435 /////////////////////// 3436 case IXR_CMD_XRAM_IDLE: 3437 { 3438 if (r_config_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_CONFIG_TRT; 3439 else if(r_read_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_READ_TRT; 3440 else if(r_write_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_WRITE_TRT; 3441 else if(r_cas_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_CAS_TRT; 3442 else if(r_xram_rsp_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_XRAM_TRT; 3443 break; 3444 } 3445 ///////////////////////// 3446 case IXR_CMD_CONFIG_IDLE: 3447 { 3448 if (r_read_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_READ_TRT; 3449 else if(r_write_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_WRITE_TRT; 3450 else if(r_cas_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_CAS_TRT; 3451 else if(r_xram_rsp_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_XRAM_TRT; 3452 else if(r_config_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_CONFIG_TRT; 3453 break; 3454 } 3455 3456 ////////////////////// 3457 case IXR_CMD_READ_TRT: // access TRT for a GET 3458 { 3459 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_CMD ) 3460 { 3461 TransactionTabEntry entry = m_trt.read( r_read_to_ixr_cmd_index.read() ); 3462 r_ixr_cmd_address = entry.nline * (m_words<<2); 3463 r_ixr_cmd_trdid = r_read_to_ixr_cmd_index.read(); 3464 r_ixr_cmd_get = true; 3465 r_ixr_cmd_word = 0; 3466 r_ixr_cmd_fsm = IXR_CMD_READ_SEND; 3467 for( size_t i=0 ; i<m_words ; i++ ) r_ixr_cmd_wdata[i] = entry.wdata[i]; 3468 3469 #if DEBUG_MEMC_IXR_CMD 3470 if(m_debug) 3471 std::cout << " <MEMC " << name() << " IXR_CMD_READ_TRT> TRT access" 3472 << " index = " << std::dec << r_read_to_ixr_cmd_index.read() 3473 << " / address = " << std::hex << (entry.nline*(m_words<<2)) << std::endl; 3474 #endif 3475 } 3476 break; 3477 } 3478 /////////////////////// 3479 case IXR_CMD_WRITE_TRT: // access TRT for a PUT or a GET 3480 { 3481 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_CMD ) 3482 { 3483 TransactionTabEntry entry = m_trt.read( r_write_to_ixr_cmd_index.read() ); 3484 r_ixr_cmd_address = entry.nline * (m_words<<2); 3485 r_ixr_cmd_trdid = r_write_to_ixr_cmd_index.read(); 3486 r_ixr_cmd_get = entry.xram_read; 3487 r_ixr_cmd_word = 0; 3488 r_ixr_cmd_fsm = IXR_CMD_WRITE_SEND; 3489 for( size_t i=0 ; i<m_words ; i++ ) r_ixr_cmd_wdata[i] = entry.wdata[i]; 3490 3491 #if DEBUG_MEMC_IXR_CMD 3492 if(m_debug) 3493 std::cout << " <MEMC " << name() << " IXR_CMD_WRITE_TRT> TRT access" 3494 << " index = " << std::dec << r_write_to_ixr_cmd_index.read() 3495 << " / address = " << std::hex << (entry.nline*(m_words<<2)) << std::endl; 3496 #endif 3497 } 3498 break; 3499 } 3500 ///////////////////// 3501 case IXR_CMD_CAS_TRT: // access TRT for a PUT or a GET 3502 { 3503 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_CMD ) 3504 { 3505 TransactionTabEntry entry = m_trt.read( r_cas_to_ixr_cmd_index.read() ); 3506 r_ixr_cmd_address = entry.nline * (m_words<<2); 3507 r_ixr_cmd_trdid = r_cas_to_ixr_cmd_index.read(); 3508 r_ixr_cmd_get = entry.xram_read; 3509 r_ixr_cmd_word = 0; 3510 r_ixr_cmd_fsm = IXR_CMD_CAS_SEND; 3511 for( size_t i=0 ; i<m_words ; i++ ) r_ixr_cmd_wdata[i] = entry.wdata[i]; 3512 3513 #if DEBUG_MEMC_IXR_CMD 3514 if(m_debug) 3515 std::cout << " <MEMC " << name() << " IXR_CMD_CAS_TRT> TRT access" 3516 << " index = " << std::dec << r_cas_to_ixr_cmd_index.read() 3517 << " / address = " << std::hex << (entry.nline*(m_words<<2)) << std::endl; 3518 #endif 3519 } 3520 break; 3521 } 3522 ////////////////////// 3523 case IXR_CMD_XRAM_TRT: // access TRT for a PUT 3524 { 3525 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_CMD ) 3526 { 3527 TransactionTabEntry entry = m_trt.read( r_xram_rsp_to_ixr_cmd_index.read() ); 3528 r_ixr_cmd_address = entry.nline * (m_words<<2); 3529 r_ixr_cmd_trdid = r_xram_rsp_to_ixr_cmd_index.read(); 3530 r_ixr_cmd_get = false; 3531 r_ixr_cmd_word = 0; 3532 r_ixr_cmd_fsm = IXR_CMD_XRAM_SEND; 3533 for( size_t i=0 ; i<m_words ; i++ ) r_ixr_cmd_wdata[i] = entry.wdata[i]; 3534 3535 #if DEBUG_MEMC_IXR_CMD 3536 if(m_debug) 3537 std::cout << " <MEMC " << name() << " IXR_CMD_XRAM_TRT> TRT access" 3538 << " index = " << std::dec << r_xram_rsp_to_ixr_cmd_index.read() 3539 << " / address = " << std::hex << (entry.nline*(m_words<<2)) << std::endl; 3540 #endif 3541 } 3542 break; 3543 } 3412 3544 //////////////////////// 3413 case IXR_CMD_CAS_IDLE: 3414 { 3415 if (r_xram_rsp_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_XRAM; 3416 else if(r_read_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_READ; 3417 else if(r_write_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_WRITE; 3418 else if(r_cas_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_CAS; 3419 break; 3545 case IXR_CMD_CONFIG_TRT: // access TRT for a PUT 3546 { 3547 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_CMD ) 3548 { 3549 TransactionTabEntry entry = m_trt.read( r_config_to_ixr_cmd_index.read() ); 3550 r_ixr_cmd_address = entry.nline * (m_words<<2); 3551 r_ixr_cmd_trdid = r_config_to_ixr_cmd_index.read(); 3552 r_ixr_cmd_get = false; 3553 r_ixr_cmd_word = 0; 3554 r_ixr_cmd_fsm = IXR_CMD_CONFIG_SEND; 3555 for( size_t i=0 ; i<m_words ; i++ ) r_ixr_cmd_wdata[i] = entry.wdata[i]; 3556 3557 #if DEBUG_MEMC_IXR_CMD 3558 if(m_debug) 3559 std::cout << " <MEMC " << name() << " IXR_CMD_CONFIG_TRT> TRT access" 3560 << " index = " << std::dec << r_config_to_ixr_cmd_index.read() 3561 << " / address = " << std::hex << (entry.nline*(m_words<<2)) << std::endl; 3562 #endif 3563 } 3564 break; 3565 } 3566 3567 /////////////////////// 3568 case IXR_CMD_READ_SEND: // send a get from READ FSM 3569 { 3570 if(p_vci_ixr.cmdack) 3571 { 3572 r_ixr_cmd_fsm = IXR_CMD_READ_IDLE; 3573 r_read_to_ixr_cmd_req = false; 3574 3575 #if DEBUG_MEMC_IXR_CMD 3576 if(m_debug) 3577 std::cout << " <MEMC " << name() << " IXR_CMD_READ_SEND> GET request:" << std::hex 3578 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2) << std::endl; 3579 #endif 3580 } 3581 break; 3420 3582 } 3421 3583 //////////////////////// 3422 case IXR_CMD_ XRAM_IDLE:3423 { 3424 if (r_read_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_READ;3425 else if(r_write_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_WRITE;3426 else if(r_cas_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_CAS;3427 else if(r_xram_rsp_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_XRAM;3428 break;3429 }3430 ////////////////// // send a get from READ FSM3431 case IXR_CMD_READ:3432 {3433 if(p_vci_ixr.cmdack)3434 {3435 r_ixr_cmd_fsm = IXR_CMD_READ_IDLE;3436 r_read_to_ixr_cmd_req = false;3584 case IXR_CMD_WRITE_SEND: // send a put or get from WRITE FSM 3585 { 3586 if(p_vci_ixr.cmdack) 3587 { 3588 if(r_write_to_ixr_cmd_put.read()) // PUT 3589 { 3590 if(r_ixr_cmd_word.read() == (m_words - 2)) 3591 { 3592 r_ixr_cmd_fsm = IXR_CMD_WRITE_IDLE; 3593 r_write_to_ixr_cmd_req = false; 3594 } 3595 else 3596 { 3597 r_ixr_cmd_word = r_ixr_cmd_word.read() + 2; 3598 } 3437 3599 3438 3600 #if DEBUG_MEMC_IXR_CMD 3439 3601 if(m_debug) 3440 std::cout << " <MEMC " << name() << " IXR_CMD_READ>" 3441 << " Send a get request to xram / address = " << std::hex 3442 << (addr_t)(r_read_to_ixr_cmd_nline.read()*m_words*4) << std::endl; 3443 #endif 3444 } 3445 break; 3446 } 3447 /////////////////// 3448 case IXR_CMD_WRITE: // send a put or get from WRITE FSM 3449 { 3450 if(p_vci_ixr.cmdack) 3451 { 3452 if(r_write_to_ixr_cmd_write.read()) // PUT 3453 { 3454 if(r_ixr_cmd_cpt.read() == (m_words - 2)) 3455 { 3456 r_ixr_cmd_cpt = 0; 3457 r_ixr_cmd_fsm = IXR_CMD_WRITE_IDLE; 3458 r_write_to_ixr_cmd_req = false; 3459 } 3460 else 3461 { 3462 r_ixr_cmd_cpt = r_ixr_cmd_cpt + 2; 3463 } 3602 std::cout << " <MEMC " << name() << " IXR_CMD_WRITE_SEND> PUT request:" << std::hex 3603 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2) << std::endl; 3604 #endif 3605 } 3606 else // GET 3607 { 3608 r_ixr_cmd_fsm = IXR_CMD_WRITE_IDLE; 3609 r_write_to_ixr_cmd_req = false; 3464 3610 3465 3611 #if DEBUG_MEMC_IXR_CMD 3466 3612 if(m_debug) 3467 std::cout << " <MEMC " << name() << " IXR_CMD_WRITE>" 3468 << " Send a put request to xram / address = " << std::hex 3469 << (addr_t)((r_write_to_ixr_cmd_nline.read() * m_words + 3470 r_ixr_cmd_cpt.read()) * 4 ) << std::endl; 3471 #endif 3472 } 3473 else // GET 3474 { 3475 r_ixr_cmd_fsm = IXR_CMD_WRITE_IDLE; 3476 r_write_to_ixr_cmd_req = false; 3613 std::cout << " <MEMC " << name() << " IXR_CMD_WRITE_SEND> GET request:" << std::hex 3614 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2) << std::endl; 3615 #endif 3616 } 3617 } 3618 break; 3619 } 3620 ////////////////////// 3621 case IXR_CMD_CAS_SEND: // send a put or get command from CAS FSM 3622 { 3623 if(p_vci_ixr.cmdack) 3624 { 3625 if(r_cas_to_ixr_cmd_put.read()) // PUT 3626 { 3627 if(r_ixr_cmd_word.read() == (m_words - 2)) 3628 { 3629 r_ixr_cmd_fsm = IXR_CMD_CAS_IDLE; 3630 r_cas_to_ixr_cmd_req = false; 3631 } 3632 else 3633 { 3634 r_ixr_cmd_word = r_ixr_cmd_word.read() + 2; 3635 } 3477 3636 3478 3637 #if DEBUG_MEMC_IXR_CMD 3479 3638 if(m_debug) 3480 std::cout << " <MEMC " << name() << " IXR_CMD_WRITE>" 3481 << " Send a get request to xram / address = " << std::hex 3482 << (addr_t)(r_write_to_ixr_cmd_nline.read()*m_words*4) << std::endl; 3483 #endif 3484 } 3485 } 3486 break; 3487 } 3488 ///////////////// 3489 case IXR_CMD_CAS: // send a put or get command from CAS FSM 3490 { 3491 if(p_vci_ixr.cmdack) 3492 { 3493 if(r_cas_to_ixr_cmd_write.read()) // PUT 3494 { 3495 if(r_ixr_cmd_cpt.read() == (m_words - 2)) 3496 { 3497 r_ixr_cmd_cpt = 0; 3498 r_ixr_cmd_fsm = IXR_CMD_CAS_IDLE; 3499 r_cas_to_ixr_cmd_req = false; 3500 } 3501 else 3502 { 3503 r_ixr_cmd_cpt = r_ixr_cmd_cpt + 2; 3504 } 3639 std::cout << " <MEMC " << name() << " IXR_CMD_CAS_SEND> PUT request:" << std::hex 3640 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2) << std::endl; 3641 #endif 3642 } 3643 else // GET 3644 { 3645 r_ixr_cmd_fsm = IXR_CMD_CAS_IDLE; 3646 r_cas_to_ixr_cmd_req = false; 3505 3647 3506 3648 #if DEBUG_MEMC_IXR_CMD 3507 3649 if(m_debug) 3508 std::cout << " <MEMC " << name() << " IXR_CMD_CAS>" 3509 << " Send a put request to xram / address = " << std::hex 3510 << (addr_t)( (r_cas_to_ixr_cmd_nline.read() * m_words + 3511 r_ixr_cmd_cpt.read()) * 4 ) << std::endl; 3512 #endif 3513 } 3514 else // GET 3515 { 3516 r_ixr_cmd_fsm = IXR_CMD_CAS_IDLE; 3517 r_cas_to_ixr_cmd_req = false; 3650 std::cout << " <MEMC " << name() << " IXR_CMD_CAS_SEND> GET request:" << std::hex 3651 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2) << std::endl; 3652 #endif 3653 } 3654 } 3655 break; 3656 } 3657 /////////////////////// 3658 case IXR_CMD_XRAM_SEND: // send a put from XRAM_RSP FSM 3659 { 3660 if(p_vci_ixr.cmdack) 3661 { 3662 if(r_ixr_cmd_word.read() == (m_words - 2)) 3663 { 3664 r_ixr_cmd_fsm = IXR_CMD_XRAM_IDLE; 3665 r_xram_rsp_to_ixr_cmd_req = false; 3666 } 3667 else 3668 { 3669 r_ixr_cmd_word = r_ixr_cmd_word.read() + 2; 3670 } 3518 3671 3519 3672 #if DEBUG_MEMC_IXR_CMD 3520 3673 if(m_debug) 3521 std::cout << " <MEMC " << name() << " IXR_CMD_CAS>" 3522 << " Send a get request to xram / address = " << std::hex 3523 << (addr_t)(r_cas_to_ixr_cmd_nline.read()*m_words*4) << std::endl; 3524 #endif 3525 } 3526 } 3527 break; 3528 } 3529 ////////////////// 3530 case IXR_CMD_XRAM: // send a put from XRAM_RSP FSM 3531 { 3532 if(p_vci_ixr.cmdack) 3533 { 3534 if(r_ixr_cmd_cpt.read() == (m_words - 2)) 3535 { 3536 r_ixr_cmd_cpt = 0; 3537 r_ixr_cmd_fsm = IXR_CMD_XRAM_IDLE; 3538 r_xram_rsp_to_ixr_cmd_req = false; 3539 } 3540 else 3541 { 3542 r_ixr_cmd_cpt = r_ixr_cmd_cpt + 2; 3543 } 3674 std::cout << " <MEMC " << name() << " IXR_CMD_XRAM_SEND> PUT request:" << std::hex 3675 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2) << std::endl; 3676 #endif 3677 } 3678 break; 3679 } 3680 ///////////////////////// 3681 case IXR_CMD_CONFIG_SEND: // send a put from CONFIG FSM 3682 { 3683 if(p_vci_ixr.cmdack) 3684 { 3685 if(r_ixr_cmd_word.read() == (m_words - 2)) 3686 { 3687 r_ixr_cmd_fsm = IXR_CMD_CONFIG_IDLE; 3688 r_config_to_ixr_cmd_req = false; 3689 } 3690 else 3691 { 3692 r_ixr_cmd_word = r_ixr_cmd_word.read() + 2; 3693 } 3544 3694 3545 3695 #if DEBUG_MEMC_IXR_CMD 3546 3696 if(m_debug) 3547 std::cout << " <MEMC " << name() << " IXR_CMD_XRAM>" 3548 << " Send a put request to xram / address = " << std::hex 3549 << (addr_t)( (r_xram_rsp_to_ixr_cmd_nline.read() * m_words + 3550 r_ixr_cmd_cpt.read()) * 4 ) << std::endl; 3551 #endif 3552 } 3553 break; 3554 } 3555 3697 std::cout << " <MEMC " << name() << " IXR_CMD_CONFIG_SEND> PUT request:" << std::hex 3698 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2) << std::endl; 3699 #endif 3700 } 3701 break; 3702 } 3556 3703 } // end switch r_ixr_cmd_fsm 3557 3704 … … 3560 3707 //////////////////////////////////////////////////////////////////////////// 3561 3708 // The IXR_RSP FSM receives the response packets from the XRAM, 3562 // for both put transaction, and gettransaction.3709 // for both PUT transaction, and GET transaction. 3563 3710 // 3564 // - A response to a putrequest is a single-cell VCI packet.3565 // The T ransaction Tabindex is contained in the RTRDID field.3711 // - A response to a PUT request is a single-cell VCI packet. 3712 // The TRT index is contained in the RTRDID field. 3566 3713 // The FSM takes the lock protecting the TRT, and the corresponding 3567 // entry is erased. 3714 // entry is erased. If an acknowledge was required (in case of software SYNC) 3715 // the r_config_rsp_lines counter is decremented. 3568 3716 // 3569 // - A response to a getrequest is a multi-cell VCI packet.3570 // The T ransaction Tabindex is contained in the RTRDID field.3717 // - A response to a GET request is a multi-cell VCI packet. 3718 // The TRT index is contained in the RTRDID field. 3571 3719 // The N cells contain the N words of the cache line in the RDATA field. 3572 3720 // The FSM takes the lock protecting the TRT to store the line in the TRT 3573 3721 // (taking into account the write requests already stored in the TRT). 3574 // When the line is completely written, the corresponding rok signal is set. 3722 // When the line is completely written, the r_ixr_rsp_to_xram_rsp_rok[index] 3723 // signal is set to inform the XRAM_RSP FSM. 3575 3724 /////////////////////////////////////////////////////////////////////////////// 3725 3726 //std::cout << std::endl << "ixr_rsp_fsm" << std::endl; 3576 3727 3577 3728 switch(r_ixr_rsp_fsm.read()) 3578 3729 { 3579 ////////////////// 3580 case IXR_RSP_IDLE: // test transaction type: PUT/GET 3581 { 3582 if(p_vci_ixr.rspval.read()) 3583 { 3584 r_ixr_rsp_cpt = 0; 3585 r_ixr_rsp_trt_index = p_vci_ixr.rtrdid.read(); 3586 if(p_vci_ixr.reop.read() and !(p_vci_ixr.rerror.read() &0x1)) // PUT transaction 3587 { 3588 r_ixr_rsp_fsm = IXR_RSP_ACK; 3730 ////////////////// 3731 case IXR_RSP_IDLE: // test transaction type: PUT/GET 3732 { 3733 if(p_vci_ixr.rspval.read()) 3734 { 3735 r_ixr_rsp_cpt = 0; 3736 r_ixr_rsp_trt_index = p_vci_ixr.rtrdid.read(); 3737 3738 assert( ((p_vci_ixr.rerror.read() & 0x1) == 0) and 3739 "MEMC ERROR in IXR_RSP state: XRAM response error !"); 3740 3741 if(p_vci_ixr.reop.read()) // PUT 3742 { 3743 r_ixr_rsp_fsm = IXR_RSP_TRT_ERASE; 3589 3744 3590 3745 #if DEBUG_MEMC_IXR_RSP … … 3593 3748 << " IXR_RSP_IDLE> Response from XRAM to a put transaction" << std::endl; 3594 3749 #endif 3595 }3596 else // GET transaction3597 {3598 r_ixr_rsp_fsm = IXR_RSP_TRT_READ;3750 } 3751 else // GET 3752 { 3753 r_ixr_rsp_fsm = IXR_RSP_TRT_READ; 3599 3754 3600 3755 #if DEBUG_MEMC_IXR_RSP … … 3603 3758 << " IXR_RSP_IDLE> Response from XRAM to a get transaction" << std::endl; 3604 3759 #endif 3605 } 3606 } 3607 break; 3608 } 3609 ///////////////// 3610 case IXR_RSP_ACK: // Aknowledge the VCI response for a PUT 3611 { 3612 if(p_vci_ixr.rspval.read()) r_ixr_rsp_fsm = IXR_RSP_TRT_ERASE; 3613 3614 #if DEBUG_MEMC_IXR_RSP 3615 if(m_debug) 3616 std::cout << " <MEMC " << name() << " IXR_RSP_ACK>" << std::endl; 3617 #endif 3618 break; 3619 } 3620 //////////////////////// 3621 case IXR_RSP_TRT_ERASE: // erase the entry in the TRT 3622 { 3623 if(r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_RSP) 3624 { 3625 m_trt.erase(r_ixr_rsp_trt_index.read()); 3626 r_ixr_rsp_fsm = IXR_RSP_IDLE; 3760 } 3761 } 3762 break; 3763 } 3764 //////////////////////// 3765 case IXR_RSP_TRT_ERASE: // erase the entry in the TRT 3766 // decrease the line counter if config request 3767 { 3768 if(r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_RSP) 3769 { 3770 size_t index = r_ixr_rsp_trt_index.read(); 3771 if (m_trt.is_config(index) ) r_config_rsp_lines = r_config_rsp_lines.read() - 1; 3772 m_trt.erase(index); 3773 r_ixr_rsp_fsm = IXR_RSP_IDLE; 3627 3774 3628 3775 #if DEBUG_MEMC_IXR_RSP … … 3631 3778 << r_ixr_rsp_trt_index.read() << std::endl; 3632 3779 #endif 3633 }3634 break;3635 }3636 //////////////////////3637 case IXR_RSP_TRT_READ: // write a 64 bits data in theTRT3638 {3639 if((r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_RSP) and p_vci_ixr.rspval)3640 {3641 size_t index = r_ixr_rsp_trt_index.read();3642 bool eop = p_vci_ixr.reop.read();3643 wide_data_t data = p_vci_ixr.rdata.read();3644 bool error = ((p_vci_ixr.rerror.read() & 0x1) == 1);3645 3646 assert(((eop == (r_ixr_rsp_cpt.read() == (m_words-2))) or p_vci_ixr.rerror.read()) 3647 and "Error in VCI_MEM_CACHE : invalid length for a response from XRAM");3648 3649 m_trt.write_rsp( index, 3650 r_ixr_rsp_cpt.read(),3651 data,3652 error);3653 3654 r_ixr_rsp_cpt = r_ixr_rsp_cpt.read()+ 2;3655 3656 if(eop)3657 {3658 r_ixr_rsp_to_xram_rsp_rok[r_ixr_rsp_trt_index.read()]=true;3659 r_ixr_rsp_fsm = IXR_RSP_IDLE;3660 }3780 } 3781 break; 3782 } 3783 ////////////////////// 3784 case IXR_RSP_TRT_READ: // write a 64 bits data word in TRT 3785 { 3786 if((r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_RSP) and p_vci_ixr.rspval) 3787 { 3788 size_t index = r_ixr_rsp_trt_index.read(); 3789 size_t word = r_ixr_rsp_cpt.read(); 3790 bool eop = p_vci_ixr.reop.read(); 3791 wide_data_t data = p_vci_ixr.rdata.read(); 3792 bool error = ((p_vci_ixr.rerror.read() & 0x1) == 1); 3793 3794 assert(((eop == (word == (m_words-2))) or error) and 3795 "MEMC ERROR in IXR_RSP_TRT_READ state : invalid response from XRAM"); 3796 3797 m_trt.write_rsp( index, 3798 word, 3799 data ); 3800 3801 r_ixr_rsp_cpt = word + 2; 3802 3803 if( eop ) 3804 { 3805 r_ixr_rsp_to_xram_rsp_rok[r_ixr_rsp_trt_index.read()] = true; 3806 r_ixr_rsp_fsm = IXR_RSP_IDLE; 3807 } 3661 3808 3662 3809 #if DEBUG_MEMC_IXR_RSP 3663 3810 if(m_debug) 3664 std::cout << " <MEMC " << name() << " IXR_RSP_TRT_READ> Writing a wordin TRT : "3811 std::cout << " <MEMC " << name() << " IXR_RSP_TRT_READ> Writing 2 words in TRT : " 3665 3812 << " index = " << std::dec << index 3666 << " / word = " << r_ixr_rsp_cpt.read()3813 << " / word = " << word 3667 3814 << " / data = " << std::hex << data << std::endl; 3668 3815 #endif 3669 }3670 break;3671 }3816 } 3817 break; 3818 } 3672 3819 } // end swich r_ixr_rsp_fsm 3673 3820 … … 3675 3822 // XRAM_RSP FSM 3676 3823 //////////////////////////////////////////////////////////////////////////// 3677 // The XRAM_RSP FSM handles the incoming cache lines from the XRAM.3824 // The XRAM_RSP FSM handles the incoming cache lines after an XRAM GET. 3678 3825 // The cache line has been written in the TRT by the IXR_CMD_FSM. 3679 3826 // As the IXR_RSP FSM and the XRAM_RSP FSM are running in parallel, 3680 // there is as many flip-flops r_ixr_rsp_to_xram_rsp_rok[i] 3681 // as the number of entries in the TRT, that are handled with 3682 // a round-robin priority... 3827 // there is as many flip-flops r_ixr_rsp_to_xram_rsp_rok[i] as the number 3828 // of entries in the TRT, that are handled with a round-robin priority... 3683 3829 // 3684 // When a response is available, the corresponding TRT entry 3685 // is copied in a local buffer to be written in the cache. 3686 // The FSM takes the lock protecting the TRT, and the lock protecting the DIR. 3687 // It selects a cache slot and writes the line in the cache. 3830 // The FSM takes the lock protecting TRT, and the lock protecting DIR. 3831 // The selected TRT entry is copied in the local buffer r_xram_rsp_trt_buf. 3832 // It selects a cache slot and save the victim line in another local buffer 3833 // r_xram_rsp_victim_***. 3834 // It writes the line extracted from TRT in the cache. 3688 3835 // If it was a read MISS, the XRAM_RSP FSM send a request to the TGT_RSP 3689 3836 // FSM to return the cache line to the registered processor. … … 3695 3842 /////////////////////////////////////////////////////////////////////////////// 3696 3843 3844 //std::cout << std::endl << "xram_rsp_fsm" << std::endl; 3845 3697 3846 switch(r_xram_rsp_fsm.read()) 3698 3847 { … … 3700 3849 case XRAM_RSP_IDLE: // scan the XRAM responses / select a TRT index (round robin) 3701 3850 { 3702 size_t ptr= r_xram_rsp_trt_index.read();3703 size_t lines = m_trt_lines;3704 for(size_t i=0 ; i<lines ; i++)3705 {3706 size_t index = (i+ptr+1) %lines;3707 if(r_ixr_rsp_to_xram_rsp_rok[index])3708 {3709 r_xram_rsp_trt_index = index;3710 r_ixr_rsp_to_xram_rsp_rok[index] = false;3711 r_xram_rsp_fsm = XRAM_RSP_DIR_LOCK;3851 size_t old = r_xram_rsp_trt_index.read(); 3852 size_t lines = m_trt_lines; 3853 for(size_t i=0 ; i<lines ; i++) 3854 { 3855 size_t index = (i+old+1) %lines; 3856 if(r_ixr_rsp_to_xram_rsp_rok[index]) 3857 { 3858 r_xram_rsp_trt_index = index; 3859 r_ixr_rsp_to_xram_rsp_rok[index] = false; 3860 r_xram_rsp_fsm = XRAM_RSP_DIR_LOCK; 3712 3861 3713 3862 #if DEBUG_MEMC_XRAM_RSP … … 3717 3866 << " index = " << std::dec << index << std::endl; 3718 3867 #endif 3719 break;3720 }3721 }3722 break;3868 break; 3869 } 3870 } 3871 break; 3723 3872 } 3724 3873 /////////////////////// … … 3726 3875 // Copy the TRT entry in a local buffer 3727 3876 { 3728 if((r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP) and 3729 (r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP)) 3730 { 3731 // copy the TRT entry in the r_xram_rsp_trt_buf local buffer 3732 size_t index = r_xram_rsp_trt_index.read(); 3733 r_xram_rsp_trt_buf.copy( m_trt.read(index) ); 3734 3735 r_xram_rsp_fsm = XRAM_RSP_TRT_COPY; 3877 if( (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP) and 3878 (r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP) ) 3879 { 3880 // copy the TRT entry in the r_xram_rsp_trt_buf local buffer 3881 size_t index = r_xram_rsp_trt_index.read(); 3882 r_xram_rsp_trt_buf.copy( m_trt.read(index) ); 3883 r_xram_rsp_fsm = XRAM_RSP_TRT_COPY; 3736 3884 3737 3885 #if DEBUG_MEMC_XRAM_RSP … … 3740 3888 << " Get access to DIR and TRT" << std::endl; 3741 3889 #endif 3742 }3743 break;3890 } 3891 break; 3744 3892 } 3745 3893 /////////////////////// … … 3747 3895 // and copy it in a local buffer 3748 3896 { 3749 if ( (r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP) and 3750 (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP) ) 3751 { 3897 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP) and 3898 "MEMC ERROR in XRAM_RSP_TRT_COPY state: Bad DIR allocation"); 3899 3900 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP) and 3901 "MEMC ERROR in XRAM_RSP_TRT_COPY state: Bad TRT allocation"); 3902 3752 3903 // selects & extracts a victim line from cache 3753 3904 size_t way = 0; … … 3758 3909 bool inval = (victim.count and victim.valid) ; 3759 3910 3760 // copy the victim line in a local buffer 3911 // copy the victim line in a local buffer (both data dir) 3761 3912 m_cache_data.read_line(way, set, r_xram_rsp_victim_data); 3762 3913 3763 3914 r_xram_rsp_victim_copy = victim.owner.srcid; 3764 3765 #if L1_MULTI_CACHE3766 r_xram_rsp_victim_copy_cache= victim.owner.cache_id;3767 #endif3768 3915 r_xram_rsp_victim_copy_inst = victim.owner.inst; 3769 3916 r_xram_rsp_victim_count = victim.count; … … 3776 3923 r_xram_rsp_victim_dirty = victim.dirty; 3777 3924 3778 if(!r_xram_rsp_trt_buf.rerror) 3779 { 3780 r_xram_rsp_fsm = XRAM_RSP_INVAL_LOCK; 3781 } 3782 else 3783 { 3784 r_xram_rsp_fsm = XRAM_RSP_ERROR_ERASE; 3785 } 3925 if( not r_xram_rsp_trt_buf.rerror ) r_xram_rsp_fsm = XRAM_RSP_IVT_LOCK; 3926 else r_xram_rsp_fsm = XRAM_RSP_ERROR_ERASE; 3786 3927 3787 3928 #if DEBUG_MEMC_XRAM_RSP 3788 3929 if(m_debug) 3789 3930 std::cout << " <MEMC " << name() << " XRAM_RSP_TRT_COPY>" 3790 << " Select a slot: "3931 << " Select a victim slot: " 3791 3932 << " way = " << std::dec << way 3792 3933 << " / set = " << set 3793 3934 << " / inval_required = " << inval << std::endl; 3794 3935 #endif 3795 }3796 else3797 {3798 std::cout << "VCI_MEM_CACHE ERROR " << name() << " XRAM_RSP_TRT_COPY"3799 << " bad TRT or DIR allocation" << std::endl;3800 exit(0);3801 }3802 break;3803 } 3804 /////////////////////////3805 case XRAM_RSP_INVAL_LOCK: // Take the IVT lock to check a possible pending inval3806 { 3807 if(r_alloc_ivt_fsm == ALLOC_IVT_XRAM_RSP)3808 {3809 size_t index = 0;3810 if(m_ivt.search_inval(r_xram_rsp_trt_buf.nline, index)) // pending inval3811 {3812 r_xram_rsp_fsm = XRAM_RSP_INVAL_WAIT;3936 break; 3937 } 3938 /////////////////////// 3939 case XRAM_RSP_IVT_LOCK: // Keep DIR and TRT locks and take the IVT lock 3940 // to check a possible pending inval 3941 { 3942 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP) and 3943 "MEMC ERROR in XRAM_RSP_IVT_LOCK state: Bad DIR allocation"); 3944 3945 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP) and 3946 "MEMC ERROR in XRAM_RSP_IVT_LOCK state: Bad TRT allocation"); 3947 3948 if(r_alloc_ivt_fsm == ALLOC_IVT_XRAM_RSP) 3949 { 3950 size_t index = 0; 3951 if(m_ivt.search_inval(r_xram_rsp_trt_buf.nline, index)) // pending inval 3952 { 3953 r_xram_rsp_fsm = XRAM_RSP_INVAL_WAIT; 3813 3954 3814 3955 #if DEBUG_MEMC_XRAM_RSP 3815 3956 if(m_debug) 3816 std::cout << " <MEMC " << name() << " XRAM_RSP_I NVAL_LOCK>"3957 std::cout << " <MEMC " << name() << " XRAM_RSP_IVT_LOCK>" 3817 3958 << " Get acces to IVT, but line invalidation registered" 3818 << " / nline = " << std::hex << r_xram_rsp_trt_buf.nline3959 << " / address = " << std::hex << r_xram_rsp_trt_buf.nline*m_words*4 3819 3960 << " / index = " << std::dec << index << std::endl; 3820 3961 #endif 3821 3962 3822 }3823 else if(m_ivt.is_full() and r_xram_rsp_victim_inval.read()) // IVT full3824 {3825 r_xram_rsp_fsm = XRAM_RSP_INVAL_WAIT;3963 } 3964 else if(m_ivt.is_full() and r_xram_rsp_victim_inval.read()) // IVT full 3965 { 3966 r_xram_rsp_fsm = XRAM_RSP_INVAL_WAIT; 3826 3967 3827 3968 #if DEBUG_MEMC_XRAM_RSP 3828 3969 if(m_debug) 3829 std::cout << " <MEMC " << name() << " XRAM_RSP_I NVAL_LOCK>"3970 std::cout << " <MEMC " << name() << " XRAM_RSP_IVT_LOCK>" 3830 3971 << " Get acces to IVT, but inval required and IVT full" << std::endl; 3831 3972 #endif 3832 }3833 else3834 {3835 r_xram_rsp_fsm = XRAM_RSP_DIR_UPDT;3973 } 3974 else 3975 { 3976 r_xram_rsp_fsm = XRAM_RSP_DIR_UPDT; 3836 3977 3837 3978 #if DEBUG_MEMC_XRAM_RSP 3838 3979 if(m_debug) 3839 std::cout << " <MEMC " << name() << " XRAM_RSP_I NVAL_LOCK>"3840 << " Get acces to IVT " << std::endl;3841 #endif 3842 }3843 }3844 break;3980 std::cout << " <MEMC " << name() << " XRAM_RSP_IVT_LOCK>" 3981 << " Get acces to IVT / no pending inval request" << std::endl; 3982 #endif 3983 } 3984 } 3985 break; 3845 3986 } 3846 3987 ///////////////////////// … … 3853 3994 << " Release all locks and retry" << std::endl; 3854 3995 #endif 3855 r_xram_rsp_fsm = XRAM_RSP_DIR_LOCK;3856 break;3996 r_xram_rsp_fsm = XRAM_RSP_DIR_LOCK; 3997 break; 3857 3998 } 3858 3999 /////////////////////// 3859 case XRAM_RSP_DIR_UPDT: // updates the cache (both data & directory) 3860 // and possibly set an inval request in IVT 3861 { 3862 // check if this is an instruction read, this means pktid is either 3863 // TYPE_READ_INS_UNC 0bX010 with TSAR encoding 3864 // TYPE_READ_INS_MISS 0bX011 with TSAR encoding 3865 bool inst_read = (r_xram_rsp_trt_buf.pktid & 0x2) and r_xram_rsp_trt_buf.proc_read; 3866 3867 // check if this is a cached read, this means pktid is either 3868 // TYPE_READ_DATA_MISS 0bX001 with TSAR encoding 3869 // TYPE_READ_INS_MISS 0bX011 with TSAR encoding 3870 bool cached_read = (r_xram_rsp_trt_buf.pktid & 0x1) and r_xram_rsp_trt_buf.proc_read; 3871 3872 bool dirty = false; 3873 3874 // update cache data 3875 size_t set = r_xram_rsp_victim_set.read(); 3876 size_t way = r_xram_rsp_victim_way.read(); 3877 for(size_t word=0; word<m_words ; word++) 3878 { 3879 m_cache_data.write(way, set, word, r_xram_rsp_trt_buf.wdata[word]); 3880 3881 dirty = dirty or (r_xram_rsp_trt_buf.wdata_be[word] != 0); 3882 3883 if(m_monitor_ok) 3884 { 3885 addr_t address = r_xram_rsp_trt_buf.nline<<6 | word<<2; 3886 check_monitor( address, r_xram_rsp_trt_buf.wdata[word], false); 3887 } 3888 } 3889 3890 // update cache directory 3891 DirectoryEntry entry; 3892 entry.valid = true; 3893 entry.is_cnt = false; 3894 entry.lock = false; 3895 entry.dirty = dirty; 3896 entry.tag = r_xram_rsp_trt_buf.nline / m_sets; 3897 entry.ptr = 0; 3898 if(cached_read) 3899 { 3900 entry.owner.srcid = r_xram_rsp_trt_buf.srcid; 3901 #if L1_MULTI_CACHE 3902 entry.owner.cache_id= r_xram_rsp_trt_buf.pktid; 3903 #endif 3904 entry.owner.inst = inst_read; 3905 entry.count = 1; 3906 } 3907 else 3908 { 3909 entry.owner.srcid = 0; 3910 #if L1_MULTI_CACHE 3911 entry.owner.cache_id = 0; 3912 #endif 3913 entry.owner.inst = 0; 3914 entry.count = 0; 3915 } 3916 m_cache_directory.write(set, way, entry); 3917 3918 // request an invalidattion request in IVT for victim line 3919 if(r_xram_rsp_victim_inval.read()) 3920 { 3921 bool broadcast = r_xram_rsp_victim_is_cnt.read(); 3922 size_t index = 0; 3923 size_t count_copies = r_xram_rsp_victim_count.read(); 3924 3925 bool wok = m_ivt.set(false, // it's an inval transaction 3926 broadcast, // set broadcast bit 3927 false, // no response required 3928 false, // no acknowledge required 3929 0, // srcid 3930 0, // trdid 3931 0, // pktid 3932 r_xram_rsp_victim_nline.read(), 3933 count_copies, 3934 index); 3935 3936 r_xram_rsp_ivt_index = index; 3937 3938 if(!wok) 3939 { 3940 std::cout << "VCI_MEM_CACHE ERROR " << name() << " XRAM_RSP_DIR_UPDT" 3941 << " invalidate_tab entry free but write unsuccessful" << std::endl; 3942 exit(0); 3943 } 3944 } 4000 case XRAM_RSP_DIR_UPDT: // updates the cache (both data & directory), 4001 // erases the TRT entry if victim not dirty, 4002 // and set inval request in IVT if required 4003 { 4004 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP) and 4005 "MEMC ERROR in XRAM_RSP_DIR_UPDT state: Bad DIR allocation"); 4006 4007 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP) and 4008 "MEMC ERROR in XRAM_RSP_DIR_UPDT state: Bad TRT allocation"); 4009 4010 assert( (r_alloc_ivt_fsm.read() == ALLOC_IVT_XRAM_RSP) and 4011 "MEMC ERROR in XRAM_RSP_DIR_UPDT state: Bad IVT allocation"); 4012 4013 // check if this is an instruction read, this means pktid is either 4014 // TYPE_READ_INS_UNC 0bX010 with TSAR encoding 4015 // TYPE_READ_INS_MISS 0bX011 with TSAR encoding 4016 bool inst_read = (r_xram_rsp_trt_buf.pktid & 0x2) and r_xram_rsp_trt_buf.proc_read; 4017 4018 // check if this is a cached read, this means pktid is either 4019 // TYPE_READ_DATA_MISS 0bX001 with TSAR encoding 4020 // TYPE_READ_INS_MISS 0bX011 with TSAR encoding 4021 bool cached_read = (r_xram_rsp_trt_buf.pktid & 0x1) and r_xram_rsp_trt_buf.proc_read; 4022 4023 bool dirty = false; 4024 4025 // update cache data 4026 size_t set = r_xram_rsp_victim_set.read(); 4027 size_t way = r_xram_rsp_victim_way.read(); 4028 4029 for(size_t word=0; word<m_words ; word++) 4030 { 4031 m_cache_data.write(way, set, word, r_xram_rsp_trt_buf.wdata[word]); 4032 dirty = dirty or (r_xram_rsp_trt_buf.wdata_be[word] != 0); 4033 } 4034 4035 // update cache directory 4036 DirectoryEntry entry; 4037 entry.valid = true; 4038 entry.is_cnt = false; 4039 entry.lock = false; 4040 entry.dirty = dirty; 4041 entry.tag = r_xram_rsp_trt_buf.nline / m_sets; 4042 entry.ptr = 0; 4043 if(cached_read) 4044 { 4045 entry.owner.srcid = r_xram_rsp_trt_buf.srcid; 4046 entry.owner.inst = inst_read; 4047 entry.count = 1; 4048 } 4049 else 4050 { 4051 entry.owner.srcid = 0; 4052 entry.owner.inst = 0; 4053 entry.count = 0; 4054 } 4055 m_cache_directory.write(set, way, entry); 4056 4057 // register invalid request in IVT for victim line if required 4058 if(r_xram_rsp_victim_inval.read()) 4059 { 4060 bool broadcast = r_xram_rsp_victim_is_cnt.read(); 4061 size_t index = 0; 4062 size_t count_copies = r_xram_rsp_victim_count.read(); 4063 4064 bool wok = m_ivt.set(false, // it's an inval transaction 4065 broadcast, // set broadcast bit 4066 false, // no response required 4067 false, // no acknowledge required 4068 0, // srcid 4069 0, // trdid 4070 0, // pktid 4071 r_xram_rsp_victim_nline.read(), 4072 count_copies, 4073 index); 4074 4075 r_xram_rsp_ivt_index = index; 4076 4077 assert( wok and 4078 "MEMC ERROR in XRAM_RSP_DIR_UPDT state: IVT should not be full"); 4079 } 3945 4080 3946 4081 #if DEBUG_MEMC_XRAM_RSP … … 3956 4091 << " / is_cnt = " << entry.is_cnt << std::endl; 3957 4092 if(r_xram_rsp_victim_inval.read()) 3958 std::cout << " Invalidation request for victim line"3959 << std::hex << r_xram_rsp_victim_nline.read() 4093 std::cout << " Invalidation request for address " 4094 << std::hex << r_xram_rsp_victim_nline.read()*m_words*4 3960 4095 << " / broadcast = " << r_xram_rsp_victim_is_cnt.read() << std::endl; 3961 4096 } 3962 4097 #endif 3963 4098 3964 // If the victim is not dirty, we don't need another XRAM put transaction, 3965 // and we can erase the TRT entry 3966 if(!r_xram_rsp_victim_dirty.read()) m_trt.erase(r_xram_rsp_trt_index.read()); 3967 3968 // Next state 3969 if(r_xram_rsp_victim_dirty.read()) r_xram_rsp_fsm = XRAM_RSP_TRT_DIRTY; 3970 else if(r_xram_rsp_trt_buf.proc_read) r_xram_rsp_fsm = XRAM_RSP_DIR_RSP; 3971 else if(r_xram_rsp_victim_inval.read()) r_xram_rsp_fsm = XRAM_RSP_INVAL; 3972 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 3973 break; 4099 // If the victim is not dirty, we don't need to reuse the TRT entry for 4100 // another PUT transaction, and we can erase the TRT entry 4101 if( not r_xram_rsp_victim_dirty.read() ) 4102 { 4103 m_trt.erase(r_xram_rsp_trt_index.read()); 4104 } 4105 4106 // Next state 4107 if(r_xram_rsp_victim_dirty.read()) r_xram_rsp_fsm = XRAM_RSP_TRT_DIRTY; 4108 else if(r_xram_rsp_trt_buf.proc_read) r_xram_rsp_fsm = XRAM_RSP_DIR_RSP; 4109 else if(r_xram_rsp_victim_inval.read()) r_xram_rsp_fsm = XRAM_RSP_INVAL; 4110 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 4111 break; 3974 4112 } 3975 4113 //////////////////////// 3976 4114 case XRAM_RSP_TRT_DIRTY: // set the TRT entry (PUT to XRAM) if the victim is dirty 3977 4115 { 3978 if(r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP) 3979 { 3980 m_trt.set(r_xram_rsp_trt_index.read(), 3981 false, // write to XRAM 3982 r_xram_rsp_victim_nline.read(), // line index 3983 0, 3984 0, 3985 0, 3986 false, 3987 0, 3988 0, 3989 std::vector<be_t> (m_words,0), 3990 std::vector<data_t> (m_words,0)); 4116 if(r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP) 4117 { 4118 std::vector<data_t> data_vector; 4119 data_vector.clear(); 4120 for(size_t i=0; i<m_words; i++) 4121 { 4122 data_vector.push_back(r_xram_rsp_victim_data[i].read()); 4123 } 4124 m_trt.set( r_xram_rsp_trt_index.read(), 4125 false, // PUT 4126 r_xram_rsp_victim_nline.read(), // line index 4127 0, // unused 4128 0, // unused 4129 0, // unused 4130 false, // not proc_read 4131 0, // unused 4132 0, // unused 4133 std::vector<be_t>(m_words,0xF), 4134 data_vector); 3991 4135 3992 4136 #if DEBUG_MEMC_XRAM_RSP … … 3994 4138 std::cout << " <MEMC " << name() << " XRAM_RSP_TRT_DIRTY>" 3995 4139 << " Set TRT entry for the put transaction" 3996 << " / dirty victim line = " << r_xram_rsp_victim_nline.read() << std::endl;3997 #endif 3998 if(r_xram_rsp_trt_buf.proc_read) r_xram_rsp_fsm = XRAM_RSP_DIR_RSP;3999 else if(r_xram_rsp_victim_inval.read()) r_xram_rsp_fsm = XRAM_RSP_INVAL;4000 else r_xram_rsp_fsm = XRAM_RSP_WRITE_DIRTY;4001 }4002 break;4140 << " / address = " << (r_xram_rsp_victim_nline.read()*m_words*4) << std::endl; 4141 #endif 4142 if(r_xram_rsp_trt_buf.proc_read) r_xram_rsp_fsm = XRAM_RSP_DIR_RSP; 4143 else if(r_xram_rsp_victim_inval.read()) r_xram_rsp_fsm = XRAM_RSP_INVAL; 4144 else r_xram_rsp_fsm = XRAM_RSP_WRITE_DIRTY; 4145 } 4146 break; 4003 4147 } 4004 4148 ////////////////////// 4005 4149 case XRAM_RSP_DIR_RSP: // Request a response to TGT_RSP FSM 4006 4150 { 4007 if(!r_xram_rsp_to_tgt_rsp_req.read())4008 {4009 r_xram_rsp_to_tgt_rsp_srcid = r_xram_rsp_trt_buf.srcid;4010 r_xram_rsp_to_tgt_rsp_trdid = r_xram_rsp_trt_buf.trdid;4011 r_xram_rsp_to_tgt_rsp_pktid = r_xram_rsp_trt_buf.pktid;4012 for(size_t i=0; i < m_words; i++)4013 {4014 r_xram_rsp_to_tgt_rsp_data[i] = r_xram_rsp_trt_buf.wdata[i];4015 }4016 r_xram_rsp_to_tgt_rsp_word = r_xram_rsp_trt_buf.word_index;4017 r_xram_rsp_to_tgt_rsp_length = r_xram_rsp_trt_buf.read_length;4018 r_xram_rsp_to_tgt_rsp_ll_key = r_xram_rsp_trt_buf.ll_key;4019 r_xram_rsp_to_tgt_rsp_rerror = false;4020 r_xram_rsp_to_tgt_rsp_req = true;4021 4022 if(r_xram_rsp_victim_inval) r_xram_rsp_fsm = XRAM_RSP_INVAL;4023 else if(r_xram_rsp_victim_dirty) r_xram_rsp_fsm = XRAM_RSP_WRITE_DIRTY;4024 elser_xram_rsp_fsm = XRAM_RSP_IDLE;4151 if ( not r_xram_rsp_to_tgt_rsp_req.read() ) 4152 { 4153 r_xram_rsp_to_tgt_rsp_srcid = r_xram_rsp_trt_buf.srcid; 4154 r_xram_rsp_to_tgt_rsp_trdid = r_xram_rsp_trt_buf.trdid; 4155 r_xram_rsp_to_tgt_rsp_pktid = r_xram_rsp_trt_buf.pktid; 4156 for(size_t i=0; i < m_words; i++) 4157 { 4158 r_xram_rsp_to_tgt_rsp_data[i] = r_xram_rsp_trt_buf.wdata[i]; 4159 } 4160 r_xram_rsp_to_tgt_rsp_word = r_xram_rsp_trt_buf.word_index; 4161 r_xram_rsp_to_tgt_rsp_length = r_xram_rsp_trt_buf.read_length; 4162 r_xram_rsp_to_tgt_rsp_ll_key = r_xram_rsp_trt_buf.ll_key; 4163 r_xram_rsp_to_tgt_rsp_rerror = false; 4164 r_xram_rsp_to_tgt_rsp_req = true; 4165 4166 if(r_xram_rsp_victim_inval.read()) r_xram_rsp_fsm = XRAM_RSP_INVAL; 4167 else if(r_xram_rsp_victim_dirty.read()) r_xram_rsp_fsm = XRAM_RSP_WRITE_DIRTY; 4168 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 4025 4169 4026 4170 #if DEBUG_MEMC_XRAM_RSP … … 4032 4176 << " / nwords = " << std::dec << r_xram_rsp_trt_buf.read_length << std::endl; 4033 4177 #endif 4034 }4035 break;4178 } 4179 break; 4036 4180 } 4037 4181 //////////////////// … … 4051 4195 xram_rsp_to_cc_send_fifo_srcid = r_xram_rsp_victim_copy.read(); 4052 4196 xram_rsp_to_cc_send_fifo_inst = r_xram_rsp_victim_copy_inst.read(); 4053 #if L1_MULTI_CACHE4054 xram_rsp_to_cc_send_fifo_cache_id = r_xram_rsp_victim_copy_cache.read();4055 #endif4056 4197 xram_rsp_to_cc_send_fifo_put = multi_req; 4057 r_xram_rsp_next_ptr 4198 r_xram_rsp_next_ptr = r_xram_rsp_victim_ptr.read(); 4058 4199 4059 4200 if(r_xram_rsp_victim_dirty) r_xram_rsp_fsm = XRAM_RSP_WRITE_DIRTY; … … 4065 4206 std::cout << " <MEMC " << name() << " XRAM_RSP_INVAL>" 4066 4207 << " Send an inval request to CC_SEND FSM" 4067 << " / victim line = " << r_xram_rsp_victim_nline.read()<< std::endl;4208 << " / address = " << r_xram_rsp_victim_nline.read()*m_words*4 << std::endl; 4068 4209 #endif 4069 4210 } … … 4073 4214 case XRAM_RSP_WRITE_DIRTY: // send a write request to IXR_CMD FSM 4074 4215 { 4075 if(!r_xram_rsp_to_ixr_cmd_req.read()) 4076 { 4077 r_xram_rsp_to_ixr_cmd_req = true; 4078 r_xram_rsp_to_ixr_cmd_nline = r_xram_rsp_victim_nline.read(); 4079 r_xram_rsp_to_ixr_cmd_trdid = r_xram_rsp_trt_index.read(); 4080 for(size_t i=0; i<m_words ; i++) 4081 { 4082 r_xram_rsp_to_ixr_cmd_data[i] = r_xram_rsp_victim_data[i]; 4083 } 4084 m_cpt_write_dirty++; 4085 4086 bool multi_req = !r_xram_rsp_victim_is_cnt.read() and r_xram_rsp_victim_inval.read(); 4087 bool not_last_multi_req = multi_req and (r_xram_rsp_victim_count.read() != 1); 4088 4089 if(not_last_multi_req) r_xram_rsp_fsm = XRAM_RSP_HEAP_REQ; 4090 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 4216 if ( not r_xram_rsp_to_ixr_cmd_req.read() ) 4217 { 4218 r_xram_rsp_to_ixr_cmd_req = true; 4219 r_xram_rsp_to_ixr_cmd_index = r_xram_rsp_trt_index.read(); 4220 4221 m_cpt_write_dirty++; 4222 4223 bool multi_req = not r_xram_rsp_victim_is_cnt.read() and 4224 r_xram_rsp_victim_inval.read(); 4225 bool not_last_multi_req = multi_req and (r_xram_rsp_victim_count.read() != 1); 4226 4227 if(not_last_multi_req) r_xram_rsp_fsm = XRAM_RSP_HEAP_REQ; 4228 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 4091 4229 4092 4230 #if DEBUG_MEMC_XRAM_RSP … … 4094 4232 std::cout << " <MEMC " << name() << " XRAM_RSP_WRITE_DIRTY>" 4095 4233 << " Send the put request to IXR_CMD FSM" 4096 << " / victim line = " << r_xram_rsp_victim_nline.read()<< std::endl;4097 #endif 4098 }4099 break;4234 << " / address = " << r_xram_rsp_victim_nline.read()*m_words*4 << std::endl; 4235 #endif 4236 } 4237 break; 4100 4238 } 4101 4239 ///////////////////////// 4102 4240 case XRAM_RSP_HEAP_REQ: // Get the lock to the HEAP 4103 4241 { 4104 if(r_alloc_heap_fsm.read() == ALLOC_HEAP_XRAM_RSP)4105 {4106 r_xram_rsp_fsm = XRAM_RSP_HEAP_ERASE;4107 }4242 if(r_alloc_heap_fsm.read() == ALLOC_HEAP_XRAM_RSP) 4243 { 4244 r_xram_rsp_fsm = XRAM_RSP_HEAP_ERASE; 4245 } 4108 4246 4109 4247 #if DEBUG_MEMC_XRAM_RSP … … 4112 4250 << " Requesting HEAP lock" << std::endl; 4113 4251 #endif 4114 break;4252 break; 4115 4253 } 4116 4254 ///////////////////////// … … 4122 4260 4123 4261 xram_rsp_to_cc_send_fifo_srcid = entry.owner.srcid; 4124 #if L1_MULTI_CACHE4125 xram_rsp_to_cc_send_fifo_cache_id = entry.owner.cache_id;4126 #endif4127 4262 xram_rsp_to_cc_send_fifo_inst = entry.owner.inst; 4128 4263 xram_rsp_to_cc_send_fifo_put = true; … … 4168 4303 HeapEntry last_entry; 4169 4304 last_entry.owner.srcid = 0; 4170 #if L1_MULTI_CACHE4171 last_entry.owner.cache_id = 0;4172 #endif4173 4305 last_entry.owner.inst = false; 4174 4306 if(m_heap.is_full()) … … 4194 4326 break; 4195 4327 } 4196 // 4328 ////////////////////////// 4197 4329 case XRAM_RSP_ERROR_ERASE: // erase TRT entry in case of error 4198 4330 { … … 4247 4379 //////////////////////////////////////////////////////////////////////////////////// 4248 4380 4381 //std::cout << std::endl << "cleanup_fsm" << std::endl; 4382 4249 4383 switch(r_cleanup_fsm.read()) 4250 4384 { 4251 ////////////////// 4252 case CLEANUP_IDLE: // Get first DSPIN flit of the CLEANUP command 4253 { 4254 if(not m_cc_receive_to_cleanup_fifo.rok()) break; 4255 4256 uint64_t flit = m_cc_receive_to_cleanup_fifo.read(); 4257 4258 uint32_t srcid = 4259 DspinDhccpParam::dspin_get( 4260 flit, 4261 DspinDhccpParam::CLEANUP_SRCID); 4262 4263 uint8_t type = 4264 DspinDhccpParam::dspin_get( 4265 flit, 4266 DspinDhccpParam::P2M_TYPE); 4267 4268 r_cleanup_way_index = 4269 DspinDhccpParam::dspin_get( 4270 flit, 4271 DspinDhccpParam::CLEANUP_WAY_INDEX); 4272 4273 r_cleanup_nline = 4274 DspinDhccpParam::dspin_get( 4275 flit, 4276 DspinDhccpParam::CLEANUP_NLINE_MSB) << 32; 4277 4278 r_cleanup_inst = (type == DspinDhccpParam::TYPE_CLEANUP_INST); 4279 r_cleanup_srcid = srcid; 4280 4281 if(srcid >= m_initiators) 4282 { 4283 std::cout 4284 << "VCI_MEM_CACHE ERROR " << name() 4285 << " CLEANUP_IDLE state" << std::endl 4286 << "illegal srcid for cleanup request" << std::endl; 4287 4288 exit(0); 4289 } 4290 4291 m_cpt_cleanup++; 4292 cc_receive_to_cleanup_fifo_get = true; 4293 r_cleanup_fsm = CLEANUP_GET_NLINE; 4385 ////////////////// 4386 case CLEANUP_IDLE: // Get first DSPIN flit of the CLEANUP command 4387 { 4388 if(not m_cc_receive_to_cleanup_fifo.rok()) break; 4389 4390 uint64_t flit = m_cc_receive_to_cleanup_fifo.read(); 4391 4392 uint32_t srcid = DspinDhccpParam::dspin_get( flit, 4393 DspinDhccpParam::CLEANUP_SRCID); 4394 4395 uint8_t type = DspinDhccpParam::dspin_get( flit, 4396 DspinDhccpParam::P2M_TYPE); 4397 4398 r_cleanup_way_index = DspinDhccpParam::dspin_get( flit, 4399 DspinDhccpParam::CLEANUP_WAY_INDEX); 4400 4401 r_cleanup_nline = DspinDhccpParam::dspin_get( flit, 4402 DspinDhccpParam::CLEANUP_NLINE_MSB) << 32; 4403 4404 r_cleanup_inst = (type == DspinDhccpParam::TYPE_CLEANUP_INST); 4405 r_cleanup_srcid = srcid; 4406 4407 assert( (srcid < m_initiators) and 4408 "MEMC ERROR in CLEANUP_IDLE state : illegal SRCID value"); 4409 4410 m_cpt_cleanup++; 4411 cc_receive_to_cleanup_fifo_get = true; 4412 r_cleanup_fsm = CLEANUP_GET_NLINE; 4294 4413 4295 4414 #if DEBUG_MEMC_CLEANUP … … 4297 4416 std::cout << " <MEMC " << name() 4298 4417 << " CLEANUP_IDLE> Cleanup request:" << std::hex 4299 << " /owner_id = " << srcid4418 << " owner_id = " << srcid 4300 4419 << " / owner_ins = " << (type == DspinDhccpParam::TYPE_CLEANUP_INST) << std::endl; 4301 4420 #endif 4302 break; 4303 } 4304 4305 /////////////////////// 4306 case CLEANUP_GET_NLINE: // GET second DSPIN flit of the cleanup command 4307 { 4308 if(not m_cc_receive_to_cleanup_fifo.rok()) break; 4309 4310 uint64_t flit = m_cc_receive_to_cleanup_fifo.read(); 4311 4312 addr_t nline = r_cleanup_nline.read() | 4313 DspinDhccpParam::dspin_get(flit, DspinDhccpParam::CLEANUP_NLINE_LSB); 4314 4315 cc_receive_to_cleanup_fifo_get = true; 4316 r_cleanup_nline = nline; 4317 r_cleanup_fsm = CLEANUP_DIR_REQ; 4421 break; 4422 } 4423 /////////////////////// 4424 case CLEANUP_GET_NLINE: // GET second DSPIN flit of the cleanup command 4425 { 4426 if(not m_cc_receive_to_cleanup_fifo.rok()) break; 4427 4428 uint64_t flit = m_cc_receive_to_cleanup_fifo.read(); 4429 4430 addr_t nline = r_cleanup_nline.read() | 4431 DspinDhccpParam::dspin_get(flit, DspinDhccpParam::CLEANUP_NLINE_LSB); 4432 4433 cc_receive_to_cleanup_fifo_get = true; 4434 r_cleanup_nline = nline; 4435 r_cleanup_fsm = CLEANUP_DIR_REQ; 4318 4436 4319 4437 #if DEBUG_MEMC_CLEANUP … … 4321 4439 std::cout << " <MEMC " << name() 4322 4440 << " CLEANUP_GET_NLINE> Cleanup request:" 4323 << " / address = " << std::hex << nline * m_words * 4 << std::endl; 4324 #endif 4325 break; 4326 } 4327 4328 ///////////////////// 4329 case CLEANUP_DIR_REQ: // Get the lock to the directory 4330 { 4331 if(r_alloc_dir_fsm.read() != ALLOC_DIR_CLEANUP) break; 4332 4333 r_cleanup_fsm = CLEANUP_DIR_LOCK; 4441 << " address = " << std::hex << nline * m_words * 4 << std::endl; 4442 #endif 4443 break; 4444 } 4445 ///////////////////// 4446 case CLEANUP_DIR_REQ: // Get the lock to the directory 4447 { 4448 if(r_alloc_dir_fsm.read() != ALLOC_DIR_CLEANUP) break; 4449 4450 r_cleanup_fsm = CLEANUP_DIR_LOCK; 4334 4451 4335 4452 #if DEBUG_MEMC_CLEANUP … … 4337 4454 std::cout << " <MEMC " << name() << " CLEANUP_DIR_REQ> Requesting DIR lock" << std::endl; 4338 4455 #endif 4339 break;