- Timestamp:
- Aug 9, 2013, 11:00:05 AM (11 years ago)
- Location:
- trunk/modules/vci_mem_cache
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/modules/vci_mem_cache/caba/source/include/mem_cache_directory.h
r449 r489 6 6 #include <cassert> 7 7 #include "arithmetics.h" 8 9 // !!!10 // The L1_MULTI_CACHE mechanism does no longer work with the new pktid encoding11 // of TSAR. Turning the define below to a non null value will cause the memcache12 // to behave in an unpredicted way.13 // TODO Either remove the mechanism from the mem cache or update its behaviour.14 15 #define L1_MULTI_CACHE 016 8 17 9 //#define RANDOM_EVICTION … … 46 38 bool inst; // Is the owner an ICache ? 47 39 size_t srcid; // The SRCID of the owner 48 #if L1_MULTI_CACHE49 size_t cache_id; // In multi_cache configuration50 #endif51 40 52 41 //////////////////////// 53 42 // Constructors 54 43 //////////////////////// 55 Owner(bool i_inst 56 ,size_t i_srcid 57 #if L1_MULTI_CACHE 58 ,size_t i_cache_id 59 #endif 60 ){ 44 Owner(bool i_inst, 45 size_t i_srcid) 46 { 61 47 inst = i_inst; 62 48 srcid = i_srcid; 63 #if L1_MULTI_CACHE 64 cache_id= i_cache_id; 65 #endif 66 } 67 68 Owner(const Owner &a){ 49 } 50 51 Owner(const Owner &a) 52 { 69 53 inst = a.inst; 70 54 srcid = a.srcid; 71 #if L1_MULTI_CACHE 72 cache_id= a.cache_id; 73 #endif 74 } 75 76 Owner(){ 55 } 56 57 Owner() 58 { 77 59 inst = false; 78 60 srcid = 0; 79 #if L1_MULTI_CACHE80 cache_id= 0;81 #endif82 61 } 83 62 // end constructors … … 114 93 owner.inst = 0; 115 94 owner.srcid = 0; 116 #if L1_MULTI_CACHE117 owner.cache_id= 0;118 #endif119 95 ptr = 0; 120 96 } … … 171 147 << " ; Count = " << count 172 148 << " ; Owner = " << owner.srcid 173 #if L1_MULTI_CACHE174 << "." << owner.cache_id175 #endif176 149 << " " << owner.inst 177 150 << " ; Pointer = " << ptr << std::endl; … … 322 295 // - entry : the entry value 323 296 ///////////////////////////////////////////////////////////////////// 324 void write(const size_t &set, const size_t &way, const DirectoryEntry &entry) 297 void write( const size_t &set, 298 const size_t &way, 299 const DirectoryEntry &entry) 325 300 { 326 301 assert( (set<m_sets) … … 368 343 DirectoryEntry select(const size_t &set, size_t &way) 369 344 { 370 assert( (set < m_sets)345 assert( (set < m_sets) 371 346 && "Cache Directory : (select) The set index is invalid"); 372 347 373 for(size_t i=0; i<m_ways; i++){ 374 if(!m_dir_tab[set][i].valid){ 375 way=i; 376 return DirectoryEntry(m_dir_tab[set][way]); 348 // looking for an empty slot 349 for(size_t i=0; i<m_ways; i++) 350 { 351 if( not m_dir_tab[set][i].valid ) 352 { 353 way=i; 354 return DirectoryEntry(m_dir_tab[set][way]); 355 } 377 356 } 378 }379 357 380 358 #ifdef RANDOM_EVICTION 381 lfsr = (lfsr >> 1) ^ ((-(lfsr & 1)) & 0xd0000001);382 way = lfsr % m_ways;383 return DirectoryEntry(m_dir_tab[set][way]);359 lfsr = (lfsr >> 1) ^ ((-(lfsr & 1)) & 0xd0000001); 360 way = lfsr % m_ways; 361 return DirectoryEntry(m_dir_tab[set][way]); 384 362 #endif 385 363 386 for(size_t i=0; i<m_ways; i++){ 387 if(!(m_lru_tab[set][i].recent) && !(m_dir_tab[set][i].lock)){ 388 way=i; 389 return DirectoryEntry(m_dir_tab[set][way]); 364 // looking for a not locked and not recently used entry 365 for(size_t i=0; i<m_ways; i++) 366 { 367 if((not m_lru_tab[set][i].recent) && (not m_dir_tab[set][i].lock) ) 368 { 369 way=i; 370 return DirectoryEntry(m_dir_tab[set][way]); 371 } 390 372 } 391 } 392 for(size_t i=0; i<m_ways; i++){ 393 if( !(m_lru_tab[set][i].recent) && (m_dir_tab[set][i].lock)){ 394 way=i; 395 return DirectoryEntry(m_dir_tab[set][way]); 373 374 // looking for a locked not recently used entry 375 for(size_t i=0; i<m_ways; i++) 376 { 377 if( (not m_lru_tab[set][i].recent) && (m_dir_tab[set][i].lock)) 378 { 379 way=i; 380 return DirectoryEntry(m_dir_tab[set][way]); 381 } 396 382 } 397 } 398 for(size_t i=0; i<m_ways; i++){ 399 if( (m_lru_tab[set][i].recent) && !(m_dir_tab[set][i].lock)){ 400 way=i; 401 return DirectoryEntry(m_dir_tab[set][way]); 383 384 // looking for a recently used entry not locked 385 for(size_t i=0; i<m_ways; i++) 386 { 387 if( (m_lru_tab[set][i].recent) && (not m_dir_tab[set][i].lock)) 388 { 389 way=i; 390 return DirectoryEntry(m_dir_tab[set][way]); 391 } 402 392 } 403 } 404 way = 0; 405 return DirectoryEntry(m_dir_tab[set][0]); 393 394 // select way 0 (even if entry is locked and recently used) 395 way = 0; 396 return DirectoryEntry(m_dir_tab[set][0]); 406 397 } // end select() 407 398 … … 437 428 //////////////////////// 438 429 HeapEntry() 439 :owner(false,0 440 #if L1_MULTI_CACHE 441 ,0 442 #endif 443 ) 430 :owner(false,0) 444 431 { 445 432 next = 0; … … 449 436 // Constructor 450 437 //////////////////////// 451 HeapEntry(const HeapEntry &entry){ 438 HeapEntry(const HeapEntry &entry) 439 { 452 440 owner.inst = entry.owner.inst; 453 441 owner.srcid = entry.owner.srcid; 454 #if L1_MULTI_CACHE455 owner.cache_id = entry.owner.cache_id;456 #endif457 442 next = entry.next; 458 443 } // end constructor … … 461 446 // The copy() function copies an existing source entry to a target 462 447 ///////////////////////////////////////////////////////////////////// 463 void copy(const HeapEntry &entry){ 448 void copy(const HeapEntry &entry) 449 { 464 450 owner.inst = entry.owner.inst; 465 451 owner.srcid = entry.owner.srcid; 466 #if L1_MULTI_CACHE467 owner.cache_id = entry.owner.cache_id;468 #endif469 452 next = entry.next; 470 453 } // end copy() … … 477 460 << " -- owner.inst : " << std::dec << owner.inst << std::endl 478 461 << " -- owner.srcid : " << std::dec << owner.srcid << std::endl 479 #if L1_MULTI_CACHE480 << " -- owner.cache_id : " << std::dec << owner.cache_id << std::endl481 #endif482 462 << " -- next : " << std::dec << next << std::endl; 483 463 … … 640 620 // Cache Data 641 621 //////////////////////////////////////////////////////////////////////// 642 class CacheData { 622 class CacheData 623 { 643 624 private: 644 625 const uint32_t m_sets; … … 650 631 public: 651 632 633 /////////////////////////////////////////////////////// 652 634 CacheData(uint32_t ways, uint32_t sets, uint32_t words) 653 : m_sets(sets), m_ways(ways), m_words(words) {654 635 : m_sets(sets), m_ways(ways), m_words(words) 636 { 655 637 m_cache_data = new uint32_t ** [ways]; 656 for ( size_t i=0 ; i < ways ; i++ ) { 657 m_cache_data[i] = new uint32_t * [sets]; 638 for ( size_t i=0 ; i < ways ; i++ ) 639 { 640 m_cache_data[i] = new uint32_t * [sets]; 658 641 } 659 for ( size_t i=0; i<ways; i++ ) { 660 for ( size_t j=0; j<sets; j++ ) { 661 m_cache_data[i][j] = new uint32_t [words]; 662 } 642 for ( size_t i=0; i<ways; i++ ) 643 { 644 for ( size_t j=0; j<sets; j++ ) 645 { 646 m_cache_data[i][j] = new uint32_t [words]; 647 } 663 648 } 664 } 665 666 ~CacheData() { 667 for(size_t i=0; i<m_ways ; i++){ 668 for(size_t j=0; j<m_sets ; j++){ 649 } 650 //////////// 651 ~CacheData() 652 { 653 for(size_t i=0; i<m_ways ; i++) 654 { 655 for(size_t j=0; j<m_sets ; j++) 656 { 669 657 delete [] m_cache_data[i][j]; 670 658 } 671 659 } 672 for(size_t i=0; i<m_ways ; i++){ 660 for(size_t i=0; i<m_ways ; i++) 661 { 673 662 delete [] m_cache_data[i]; 674 663 } 675 664 delete [] m_cache_data; 676 665 } 677 678 uint32_t read ( 679 const uint32_t &way, 680 const uint32_t &set, 681 const uint32_t &word) const { 682 683 assert((set < m_sets ) && "Cache data error: Trying to read a wrong set" ); 684 assert((way < m_ways ) && "Cache data error: Trying to read a wrong way" ); 685 assert((word < m_words) && "Cache data error: Trying to read a wrong word"); 686 687 return m_cache_data[way][set][word]; 688 } 689 690 void read_line( 691 const uint32_t &way, 692 const uint32_t &set, 693 sc_core::sc_signal<uint32_t> * cache_line) 694 { 695 assert((set < m_sets ) && "Cache data error: Trying to read a wrong set" ); 696 assert((way < m_ways ) && "Cache data error: Trying to read a wrong way" ); 697 698 for (uint32_t word=0; word<m_words; word++) 699 cache_line[word].write(m_cache_data[way][set][word]); 700 } 701 702 void write ( 703 const uint32_t &way, 704 const uint32_t &set, 705 const uint32_t &word, 706 const uint32_t &data, 707 const uint32_t &be = 0xF) { 708 709 assert((set < m_sets ) && "Cache data error: Trying to write a wrong set" ); 710 assert((way < m_ways ) && "Cache data error: Trying to write a wrong way" ); 711 assert((word < m_words) && "Cache data error: Trying to write a wrong word"); 712 assert((be <= 0xF ) && "Cache data error: Trying to write a wrong word cell"); 713 714 if (be == 0x0) return; 715 716 if (be == 0xF) { 717 m_cache_data[way][set][word] = data; 718 return; 719 } 720 721 uint32_t mask = 0; 722 if (be & 0x1) mask = mask | 0x000000FF; 723 if (be & 0x2) mask = mask | 0x0000FF00; 724 if (be & 0x4) mask = mask | 0x00FF0000; 725 if (be & 0x8) mask = mask | 0xFF000000; 726 727 m_cache_data[way][set][word] = 728 (data & mask) | (m_cache_data[way][set][word] & ~mask); 666 ////////////////////////////////////////// 667 uint32_t read ( const uint32_t &way, 668 const uint32_t &set, 669 const uint32_t &word) const 670 { 671 assert((set < m_sets ) && "Cache data error: Trying to read a wrong set" ); 672 assert((way < m_ways ) && "Cache data error: Trying to read a wrong way" ); 673 assert((word < m_words) && "Cache data error: Trying to read a wrong word"); 674 675 return m_cache_data[way][set][word]; 676 } 677 ////////////////////////////////////////// 678 void read_line( const uint32_t &way, 679 const uint32_t &set, 680 sc_core::sc_signal<uint32_t> * cache_line) 681 { 682 assert((set < m_sets ) && "Cache data error: Trying to read a wrong set" ); 683 assert((way < m_ways ) && "Cache data error: Trying to read a wrong way" ); 684 685 for (uint32_t word=0; word<m_words; word++) 686 cache_line[word].write(m_cache_data[way][set][word]); 687 } 688 ///////////////////////////////////////// 689 void write ( const uint32_t &way, 690 const uint32_t &set, 691 const uint32_t &word, 692 const uint32_t &data, 693 const uint32_t &be = 0xF) 694 { 695 696 assert((set < m_sets ) && "Cache data error: Trying to write a wrong set" ); 697 assert((way < m_ways ) && "Cache data error: Trying to write a wrong way" ); 698 assert((word < m_words) && "Cache data error: Trying to write a wrong word"); 699 assert((be <= 0xF ) && "Cache data error: Trying to write a wrong be"); 700 701 if (be == 0x0) return; 702 703 if (be == 0xF) 704 { 705 m_cache_data[way][set][word] = data; 706 return; 707 } 708 709 uint32_t mask = 0; 710 if (be & 0x1) mask = mask | 0x000000FF; 711 if (be & 0x2) mask = mask | 0x0000FF00; 712 if (be & 0x4) mask = mask | 0x00FF0000; 713 if (be & 0x8) mask = mask | 0xFF000000; 714 715 m_cache_data[way][set][word] = 716 (data & mask) | (m_cache_data[way][set][word] & ~mask); 729 717 } 730 718 }; // end class CacheData -
trunk/modules/vci_mem_cache/caba/source/include/vci_mem_cache.h
r483 r489 25 25 * SOCLIB_LGPL_HEADER_END 26 26 * 27 * Maintainers: alain eric.guthmuller@polytechnique.edu 27 * Maintainers: alain.greiner@lip6.fr 28 * eric.guthmuller@polytechnique.edu 28 29 * cesar.fuguet-tortolero@lip6.fr 29 30 * alexandre.joannou@lip6.fr … … 150 151 MULTI_ACK_UPT_LOCK, 151 152 MULTI_ACK_UPT_CLEAR, 152 MULTI_ACK_WRITE_RSP, 153 MULTI_ACK_CONFIG_ACK 153 MULTI_ACK_WRITE_RSP 154 154 }; 155 155 … … 159 159 CONFIG_IDLE, 160 160 CONFIG_LOOP, 161 CONFIG_WAIT, 161 162 CONFIG_RSP, 162 163 CONFIG_DIR_REQ, 163 164 CONFIG_DIR_ACCESS, 164 CONFIG_ DIR_IVT_LOCK,165 CONFIG_IVT_LOCK, 165 166 CONFIG_BC_SEND, 166 CONFIG_BC_WAIT, 167 CONFIG_INV_SEND, 167 CONFIG_INVAL_SEND, 168 168 CONFIG_HEAP_REQ, 169 169 CONFIG_HEAP_SCAN, 170 170 CONFIG_HEAP_LAST, 171 CONFIG_INV_WAIT 171 CONFIG_TRT_LOCK, 172 CONFIG_TRT_SET, 173 CONFIG_PUT_REQ 172 174 }; 173 175 … … 197 199 WRITE_DIR_REQ, 198 200 WRITE_DIR_LOCK, 199 WRITE_DIR_READ,200 201 WRITE_DIR_HIT, 201 202 WRITE_UPT_LOCK, … … 209 210 WRITE_MISS_TRT_SET, 210 211 WRITE_MISS_XRAM_REQ, 212 WRITE_BC_DIR_READ, 211 213 WRITE_BC_TRT_LOCK, 212 214 WRITE_BC_IVT_LOCK, … … 221 223 { 222 224 IXR_RSP_IDLE, 223 IXR_RSP_ACK,224 225 IXR_RSP_TRT_ERASE, 225 226 IXR_RSP_TRT_READ … … 235 236 XRAM_RSP_DIR_UPDT, 236 237 XRAM_RSP_DIR_RSP, 237 XRAM_RSP_I NVAL_LOCK,238 XRAM_RSP_IVT_LOCK, 238 239 XRAM_RSP_INVAL_WAIT, 239 240 XRAM_RSP_INVAL, … … 253 254 IXR_CMD_CAS_IDLE, 254 255 IXR_CMD_XRAM_IDLE, 255 IXR_CMD_READ, 256 IXR_CMD_WRITE, 257 IXR_CMD_CAS, 258 IXR_CMD_XRAM 256 IXR_CMD_CONFIG_IDLE, 257 IXR_CMD_READ_TRT, 258 IXR_CMD_WRITE_TRT, 259 IXR_CMD_CAS_TRT, 260 IXR_CMD_XRAM_TRT, 261 IXR_CMD_CONFIG_TRT, 262 IXR_CMD_READ_SEND, 263 IXR_CMD_WRITE_SEND, 264 IXR_CMD_CAS_SEND, 265 IXR_CMD_XRAM_SEND, 266 IXR_CMD_CONFIG_SEND 259 267 }; 260 268 … … 302 310 CLEANUP_IVT_CLEAR, 303 311 CLEANUP_WRITE_RSP, 304 CLEANUP_CONFIG_ACK,305 312 CLEANUP_SEND_CLACK 306 313 }; … … 325 332 ALLOC_TRT_CAS, 326 333 ALLOC_TRT_XRAM_RSP, 327 ALLOC_TRT_IXR_RSP 334 ALLOC_TRT_IXR_RSP, 335 ALLOC_TRT_CONFIG, 336 ALLOC_TRT_IXR_CMD 328 337 }; 329 338 … … 386 395 }; 387 396 388 /* Configuration commands */ 389 enum cmd_config_type_e 390 { 391 CMD_CONFIG_INVAL = 0, 392 CMD_CONFIG_SYNC = 1 393 }; 394 395 // debug variables (for each FSM) 397 // debug variables 396 398 bool m_debug; 397 399 bool m_debug_previous_valid; 398 400 size_t m_debug_previous_count; 399 401 bool m_debug_previous_dirty; 400 sc_signal<data_t>* m_debug_previous_data; 401 sc_signal<data_t>* m_debug_data; 402 403 bool m_monitor_ok; 404 addr_t m_monitor_base; 405 addr_t m_monitor_length; 402 data_t * m_debug_previous_data; 403 data_t * m_debug_data; 406 404 407 405 // instrumentation counters … … 531 529 uint32_t m_broadcast_boundaries; 532 530 533 //////////////////////////////////////////////////534 // Registers controlled by the TGT_CMD fsm535 //////////////////////////////////////////////////536 537 sc_signal<int> r_tgt_cmd_fsm;538 539 531 // Fifo between TGT_CMD fsm and READ fsm 540 532 GenericFifo<addr_t> m_cmd_read_addr_fifo; … … 580 572 sc_signal<size_t> r_tgt_cmd_config_cmd; 581 573 574 ////////////////////////////////////////////////// 575 // Registers controlled by the TGT_CMD fsm 576 ////////////////////////////////////////////////// 577 578 sc_signal<int> r_tgt_cmd_fsm; 579 sc_signal<size_t> r_tgt_cmd_srcid; // srcid for response to config 580 sc_signal<size_t> r_tgt_cmd_trdid; // trdid for response to config 581 sc_signal<size_t> r_tgt_cmd_pktid; // pktid for response to config 582 582 583 /////////////////////////////////////////////////////// 583 584 // Registers controlled by the CONFIG fsm 584 585 /////////////////////////////////////////////////////// 585 586 586 sc_signal<int> r_config_fsm; // FSM state 587 sc_signal<bool> r_config_lock; // lock protecting exclusive access 588 sc_signal<int> r_config_cmd; // config request status 589 sc_signal<addr_t> r_config_address; // target buffer physical address 590 sc_signal<size_t> r_config_srcid; // config request srcid 591 sc_signal<size_t> r_config_trdid; // config request trdid 592 sc_signal<size_t> r_config_pktid; // config request pktid 593 sc_signal<size_t> r_config_nlines; // number of lines covering the buffer 594 sc_signal<size_t> r_config_dir_way; // DIR: selected way 595 sc_signal<size_t> r_config_dir_count; // DIR: number of copies 596 sc_signal<bool> r_config_dir_is_cnt; // DIR: counter mode (broadcast required) 597 sc_signal<size_t> r_config_dir_copy_srcid; // DIR: first copy SRCID 598 sc_signal<bool> r_config_dir_copy_inst; // DIR: first copy L1 type 599 sc_signal<size_t> r_config_dir_next_ptr; // DIR: index of next copy in HEAP 600 sc_signal<size_t> r_config_heap_next; // current pointer to scan HEAP 601 602 sc_signal<size_t> r_config_ivt_index; // IVT index 587 sc_signal<int> r_config_fsm; // FSM state 588 sc_signal<bool> r_config_lock; // lock protecting exclusive access 589 sc_signal<int> r_config_cmd; // config request type 590 sc_signal<addr_t> r_config_address; // target buffer physical address 591 sc_signal<size_t> r_config_srcid; // config request srcid 592 sc_signal<size_t> r_config_trdid; // config request trdid 593 sc_signal<size_t> r_config_pktid; // config request pktid 594 sc_signal<size_t> r_config_cmd_lines; // number of lines to be handled 595 sc_signal<size_t> r_config_rsp_lines; // number of lines not completed 596 sc_signal<size_t> r_config_dir_way; // DIR: selected way 597 sc_signal<bool> r_config_dir_lock; // DIR: locked entry 598 sc_signal<size_t> r_config_dir_count; // DIR: number of copies 599 sc_signal<bool> r_config_dir_is_cnt; // DIR: counter mode (broadcast) 600 sc_signal<size_t> r_config_dir_copy_srcid; // DIR: first copy SRCID 601 sc_signal<bool> r_config_dir_copy_inst; // DIR: first copy L1 type 602 sc_signal<size_t> r_config_dir_ptr; // DIR: index of next copy in HEAP 603 sc_signal<size_t> r_config_heap_next; // current pointer to scan HEAP 604 sc_signal<size_t> r_config_trt_index; // selected entry in TRT 605 sc_signal<size_t> r_config_ivt_index; // selected entry in IVT 606 607 // Buffer between CONFIG fsm and IXR_CMD fsm 608 sc_signal<bool> r_config_to_ixr_cmd_req; // valid request 609 sc_signal<size_t> r_config_to_ixr_cmd_index; // TRT index 610 603 611 604 612 // Buffer between CONFIG fsm and TGT_RSP fsm (send a done response to L1 cache) … … 617 625 GenericFifo<size_t> m_config_to_cc_send_srcid_fifo; // fifo for owners srcid 618 626 619 #if L1_MULTI_CACHE620 GenericFifo<size_t> m_config_to_cc_send_cache_id_fifo; // fifo for cache_id621 #endif622 623 627 /////////////////////////////////////////////////////// 624 628 // Registers controlled by the READ fsm 625 629 /////////////////////////////////////////////////////// 626 630 627 sc_signal<int> r_read_fsm; // FSM state 628 sc_signal<size_t> r_read_copy; // Srcid of the first copy 629 sc_signal<size_t> r_read_copy_cache; // Srcid of the first copy 630 sc_signal<bool> r_read_copy_inst; // Type of the first copy 631 sc_signal<tag_t> r_read_tag; // cache line tag (in directory) 632 sc_signal<bool> r_read_is_cnt; // is_cnt bit (in directory) 633 sc_signal<bool> r_read_lock; // lock bit (in directory) 634 sc_signal<bool> r_read_dirty; // dirty bit (in directory) 635 sc_signal<size_t> r_read_count; // number of copies 636 sc_signal<size_t> r_read_ptr; // pointer to the heap 637 sc_signal<data_t> * r_read_data; // data (one cache line) 638 sc_signal<size_t> r_read_way; // associative way (in cache) 639 sc_signal<size_t> r_read_trt_index; // Transaction Table index 640 sc_signal<size_t> r_read_next_ptr; // Next entry to point to 641 sc_signal<bool> r_read_last_free; // Last free entry 642 sc_signal<addr_t> r_read_ll_key; // LL key from the llsc_global_table 643 644 // Buffer between READ fsm and IXR_CMD fsm (ask a missing cache line to XRAM) 645 sc_signal<bool> r_read_to_ixr_cmd_req; // valid request 646 sc_signal<addr_t> r_read_to_ixr_cmd_nline; // cache line index 647 sc_signal<size_t> r_read_to_ixr_cmd_trdid; // index in Transaction Table 631 sc_signal<int> r_read_fsm; // FSM state 632 sc_signal<size_t> r_read_copy; // Srcid of the first copy 633 sc_signal<size_t> r_read_copy_cache; // Srcid of the first copy 634 sc_signal<bool> r_read_copy_inst; // Type of the first copy 635 sc_signal<tag_t> r_read_tag; // cache line tag (in directory) 636 sc_signal<bool> r_read_is_cnt; // is_cnt bit (in directory) 637 sc_signal<bool> r_read_lock; // lock bit (in directory) 638 sc_signal<bool> r_read_dirty; // dirty bit (in directory) 639 sc_signal<size_t> r_read_count; // number of copies 640 sc_signal<size_t> r_read_ptr; // pointer to the heap 641 sc_signal<data_t> * r_read_data; // data (one cache line) 642 sc_signal<size_t> r_read_way; // associative way (in cache) 643 sc_signal<size_t> r_read_trt_index; // Transaction Table index 644 sc_signal<size_t> r_read_next_ptr; // Next entry to point to 645 sc_signal<bool> r_read_last_free; // Last free entry 646 sc_signal<addr_t> r_read_ll_key; // LL key from llsc_global_table 647 648 // Buffer between READ fsm and IXR_CMD fsm 649 sc_signal<bool> r_read_to_ixr_cmd_req; // valid request 650 sc_signal<size_t> r_read_to_ixr_cmd_index; // TRT index 648 651 649 652 // Buffer between READ fsm and TGT_RSP fsm (send a hit read response to L1 cache) 650 sc_signal<bool> r_read_to_tgt_rsp_req; // valid request651 sc_signal<size_t> r_read_to_tgt_rsp_srcid; // Transaction srcid652 sc_signal<size_t> r_read_to_tgt_rsp_trdid; // Transaction trdid653 sc_signal<size_t> r_read_to_tgt_rsp_pktid; // Transaction pktid654 sc_signal<data_t> * r_read_to_tgt_rsp_data; // data (one cache line)655 sc_signal<size_t> r_read_to_tgt_rsp_word; // first word of the response656 sc_signal<size_t> r_read_to_tgt_rsp_length; // length of the response657 sc_signal<addr_t> r_read_to_tgt_rsp_ll_key; // LL key from thellsc_global_table653 sc_signal<bool> r_read_to_tgt_rsp_req; // valid request 654 sc_signal<size_t> r_read_to_tgt_rsp_srcid; // Transaction srcid 655 sc_signal<size_t> r_read_to_tgt_rsp_trdid; // Transaction trdid 656 sc_signal<size_t> r_read_to_tgt_rsp_pktid; // Transaction pktid 657 sc_signal<data_t> * r_read_to_tgt_rsp_data; // data (one cache line) 658 sc_signal<size_t> r_read_to_tgt_rsp_word; // first word of the response 659 sc_signal<size_t> r_read_to_tgt_rsp_length; // length of the response 660 sc_signal<addr_t> r_read_to_tgt_rsp_ll_key; // LL key from llsc_global_table 658 661 659 662 /////////////////////////////////////////////////////////////// … … 661 664 /////////////////////////////////////////////////////////////// 662 665 663 sc_signal<int> r_write_fsm; // FSM state664 sc_signal<addr_t> r_write_address; // first word address665 sc_signal<size_t> r_write_word_index; // first word index in line666 sc_signal<size_t> r_write_word_count; // number of words in line667 sc_signal<size_t> r_write_srcid; // transaction srcid668 sc_signal<size_t> r_write_trdid; // transaction trdid669 sc_signal<size_t> r_write_pktid; // transaction pktid670 sc_signal<data_t> * r_write_data; // data (one cache line)671 sc_signal<be_t> * r_write_be; // one byte enable per word672 sc_signal<bool> r_write_byte; // (BE != 0X0) and (BE != 0xF)673 sc_signal<bool> r_write_is_cnt; // is_cnt bit (in directory)674 sc_signal<bool> r_write_lock; // lock bit (in directory)675 sc_signal<tag_t> r_write_tag; // cache line tag (in directory)676 sc_signal<size_t> r_write_copy; // first owner of the line677 sc_signal<size_t> r_write_copy_cache; // first owner of the line678 sc_signal<bool> r_write_copy_inst; // is this owner a ICache ?679 sc_signal<size_t> r_write_count; // number of copies680 sc_signal<size_t> r_write_ptr; // pointer to the heap681 sc_signal<size_t> r_write_next_ptr; // next pointer to the heap682 sc_signal<bool> r_write_to_dec; // need to decrement update counter683 sc_signal<size_t> r_write_way; // way of the line684 sc_signal<size_t> r_write_trt_index; // index in Transaction Table685 sc_signal<size_t> r_write_upt_index; // index in Update Table686 sc_signal<bool> r_write_sc_fail; // sc command failed687 sc_signal<bool> r_write_pending_sc; // sc command pending666 sc_signal<int> r_write_fsm; // FSM state 667 sc_signal<addr_t> r_write_address; // first word address 668 sc_signal<size_t> r_write_word_index; // first word index in line 669 sc_signal<size_t> r_write_word_count; // number of words in line 670 sc_signal<size_t> r_write_srcid; // transaction srcid 671 sc_signal<size_t> r_write_trdid; // transaction trdid 672 sc_signal<size_t> r_write_pktid; // transaction pktid 673 sc_signal<data_t> * r_write_data; // data (one cache line) 674 sc_signal<be_t> * r_write_be; // one byte enable per word 675 sc_signal<bool> r_write_byte; // (BE != 0X0) and (BE != 0xF) 676 sc_signal<bool> r_write_is_cnt; // is_cnt bit (in directory) 677 sc_signal<bool> r_write_lock; // lock bit (in directory) 678 sc_signal<tag_t> r_write_tag; // cache line tag (in directory) 679 sc_signal<size_t> r_write_copy; // first owner of the line 680 sc_signal<size_t> r_write_copy_cache; // first owner of the line 681 sc_signal<bool> r_write_copy_inst; // is this owner a ICache ? 682 sc_signal<size_t> r_write_count; // number of copies 683 sc_signal<size_t> r_write_ptr; // pointer to the heap 684 sc_signal<size_t> r_write_next_ptr; // next pointer to the heap 685 sc_signal<bool> r_write_to_dec; // need to decrement update counter 686 sc_signal<size_t> r_write_way; // way of the line 687 sc_signal<size_t> r_write_trt_index; // index in Transaction Table 688 sc_signal<size_t> r_write_upt_index; // index in Update Table 689 sc_signal<bool> r_write_sc_fail; // sc command failed 690 sc_signal<bool> r_write_pending_sc; // sc command pending 688 691 689 692 // Buffer between WRITE fsm and TGT_RSP fsm (acknowledge a write command from L1) … … 694 697 sc_signal<bool> r_write_to_tgt_rsp_sc_fail; // sc command failed 695 698 696 // Buffer between WRITE fsm and IXR_CMD fsm (ask a missing cache line to XRAM) 697 sc_signal<bool> r_write_to_ixr_cmd_req; // valid request 698 sc_signal<bool> r_write_to_ixr_cmd_write; // write request 699 sc_signal<addr_t> r_write_to_ixr_cmd_nline; // cache line index 700 sc_signal<data_t> * r_write_to_ixr_cmd_data; // cache line data 701 sc_signal<size_t> r_write_to_ixr_cmd_trdid; // index in Transaction Table 699 // Buffer between WRITE fsm and IXR_CMD fsm 700 sc_signal<bool> r_write_to_ixr_cmd_req; // valid request 701 sc_signal<bool> r_write_to_ixr_cmd_put; // request type (GET/PUT) 702 sc_signal<size_t> r_write_to_ixr_cmd_index; // TRT index 702 703 703 704 // Buffer between WRITE fsm and CC_SEND fsm (Update/Invalidate L1 caches) … … 713 714 GenericFifo<size_t> m_write_to_cc_send_srcid_fifo; // fifo for srcids 714 715 715 #if L1_MULTI_CACHE716 GenericFifo<size_t> m_write_to_cc_send_cache_id_fifo; // fifo for srcids717 #endif718 719 716 // Buffer between WRITE fsm and MULTI_ACK fsm (Decrement UPT entry) 720 717 sc_signal<bool> r_write_to_multi_ack_req; // valid request … … 732 729 sc_signal<addr_t> r_multi_ack_nline; // pending write nline 733 730 734 // signaling completion of multi-inval to CONFIG fsm735 sc_signal<bool> r_multi_ack_to_config_ack;736 737 731 // Buffer between MULTI_ACK fsm and TGT_RSP fsm (complete write/update transaction) 738 732 sc_signal<bool> r_multi_ack_to_tgt_rsp_req; // valid request … … 751 745 sc_signal<addr_t> r_cleanup_nline; // cache line index 752 746 753 #if L1_MULTI_CACHE754 sc_signal<size_t> r_cleanup_pktid; // transaction pktid755 #endif756 747 757 748 sc_signal<copy_t> r_cleanup_copy; // first copy … … 780 771 sc_signal<size_t> r_cleanup_index; // index of the INVAL line (in the UPT) 781 772 782 // signaling completion of broadcast-inval to CONFIG fsm783 sc_signal<bool> r_cleanup_to_config_ack;784 785 773 // Buffer between CLEANUP fsm and TGT_RSP fsm (acknowledge a write command from L1) 786 774 sc_signal<bool> r_cleanup_to_tgt_rsp_req; // valid request … … 793 781 /////////////////////////////////////////////////////// 794 782 795 sc_signal<int> r_cas_fsm; // FSM state796 sc_signal<data_t> r_cas_wdata; // write data word797 sc_signal<data_t> * r_cas_rdata; // read data word798 sc_signal<uint32_t> r_cas_lfsr; // lfsr for random introducing799 sc_signal<size_t> r_cas_cpt; // size of command800 sc_signal<copy_t> r_cas_copy; // Srcid of the first copy801 sc_signal<copy_t> r_cas_copy_cache; // Srcid of the first copy802 sc_signal<bool> r_cas_copy_inst; // Type of the first copy803 sc_signal<size_t> r_cas_count; // number of copies804 sc_signal<size_t> r_cas_ptr; // pointer to the heap805 sc_signal<size_t> r_cas_next_ptr; // next pointer to the heap806 sc_signal<bool> r_cas_is_cnt; // is_cnt bit (in directory)807 sc_signal<bool> r_cas_dirty; // dirty bit (in directory)808 sc_signal<size_t> r_cas_way; // way in directory809 sc_signal<size_t> r_cas_set; // set in directory810 sc_signal<data_t> r_cas_tag; // cache line tag (in directory)811 sc_signal<size_t> r_cas_trt_index; // Transaction Table index812 sc_signal<size_t> r_cas_upt_index; // Update Table index813 sc_signal<data_t> * r_cas_data; // cache line data814 815 // Buffer between CAS fsm and IXR_CMD fsm (XRAM write)783 sc_signal<int> r_cas_fsm; // FSM state 784 sc_signal<data_t> r_cas_wdata; // write data word 785 sc_signal<data_t> * r_cas_rdata; // read data word 786 sc_signal<uint32_t> r_cas_lfsr; // lfsr for random introducing 787 sc_signal<size_t> r_cas_cpt; // size of command 788 sc_signal<copy_t> r_cas_copy; // Srcid of the first copy 789 sc_signal<copy_t> r_cas_copy_cache; // Srcid of the first copy 790 sc_signal<bool> r_cas_copy_inst; // Type of the first copy 791 sc_signal<size_t> r_cas_count; // number of copies 792 sc_signal<size_t> r_cas_ptr; // pointer to the heap 793 sc_signal<size_t> r_cas_next_ptr; // next pointer to the heap 794 sc_signal<bool> r_cas_is_cnt; // is_cnt bit (in directory) 795 sc_signal<bool> r_cas_dirty; // dirty bit (in directory) 796 sc_signal<size_t> r_cas_way; // way in directory 797 sc_signal<size_t> r_cas_set; // set in directory 798 sc_signal<data_t> r_cas_tag; // cache line tag (in directory) 799 sc_signal<size_t> r_cas_trt_index; // Transaction Table index 800 sc_signal<size_t> r_cas_upt_index; // Update Table index 801 sc_signal<data_t> * r_cas_data; // cache line data 802 803 // Buffer between CAS fsm and IXR_CMD fsm 816 804 sc_signal<bool> r_cas_to_ixr_cmd_req; // valid request 817 sc_signal<addr_t> r_cas_to_ixr_cmd_nline; // cache line index 818 sc_signal<size_t> r_cas_to_ixr_cmd_trdid; // index in Transaction Table 819 sc_signal<bool> r_cas_to_ixr_cmd_write; // write request 820 sc_signal<data_t> * r_cas_to_ixr_cmd_data; // cache line data 821 805 sc_signal<bool> r_cas_to_ixr_cmd_put; // request type (GET/PUT) 806 sc_signal<size_t> r_cas_to_ixr_cmd_index; // TRT index 822 807 823 808 // Buffer between CAS fsm and TGT_RSP fsm … … 840 825 GenericFifo<size_t> m_cas_to_cc_send_srcid_fifo; // fifo for srcids 841 826 842 #if L1_MULTI_CACHE843 GenericFifo<size_t> m_cas_to_cc_send_cache_id_fifo; // fifo for srcids844 #endif845 846 827 //////////////////////////////////////////////////// 847 828 // Registers controlled by the IXR_RSP fsm 848 829 //////////////////////////////////////////////////// 849 830 850 sc_signal<int> r_ixr_rsp_fsm; // FSM state 851 sc_signal<size_t> r_ixr_rsp_trt_index; // TRT entry index 852 sc_signal<size_t> r_ixr_rsp_cpt; // word counter 831 sc_signal<int> r_ixr_rsp_fsm; // FSM state 832 sc_signal<size_t> r_ixr_rsp_trt_index; // TRT entry index 833 sc_signal<size_t> r_ixr_rsp_cpt; // word counter 834 835 // Buffer between IXR_RSP fsm and CONFIG fsm (response from the XRAM) 836 sc_signal<bool> r_ixr_rsp_to_config_ack; // one single bit 853 837 854 838 // Buffer between IXR_RSP fsm and XRAM_RSP fsm (response from the XRAM) 855 sc_signal<bool> * r_ixr_rsp_to_xram_rsp_rok; // A xram response is ready839 sc_signal<bool> * r_ixr_rsp_to_xram_rsp_rok; // one bit per TRT entry 856 840 857 841 //////////////////////////////////////////////////// … … 896 880 GenericFifo<size_t> m_xram_rsp_to_cc_send_srcid_fifo; // fifo for srcids 897 881 898 #if L1_MULTI_CACHE 899 GenericFifo<size_t> m_xram_rsp_to_cc_send_cache_id_fifo; // fifo for srcids 900 #endif 901 902 // Buffer between XRAM_RSP fsm and IXR_CMD fsm (XRAM write) 882 // Buffer between XRAM_RSP fsm and IXR_CMD fsm 903 883 sc_signal<bool> r_xram_rsp_to_ixr_cmd_req; // Valid request 904 sc_signal<addr_t> r_xram_rsp_to_ixr_cmd_nline; // cache line index 905 sc_signal<data_t> * r_xram_rsp_to_ixr_cmd_data; // cache line data 906 sc_signal<size_t> r_xram_rsp_to_ixr_cmd_trdid; // index in transaction table 884 sc_signal<size_t> r_xram_rsp_to_ixr_cmd_index; // TRT index 907 885 908 886 //////////////////////////////////////////////////// … … 911 889 912 890 sc_signal<int> r_ixr_cmd_fsm; 913 sc_signal<size_t> r_ixr_cmd_cpt; 891 sc_signal<size_t> r_ixr_cmd_word; // word index for a put 892 sc_signal<size_t> r_ixr_cmd_trdid; // TRT index value 893 sc_signal<addr_t> r_ixr_cmd_address; // address to XRAM 894 sc_signal<data_t> * r_ixr_cmd_wdata; // cache line buffer 895 sc_signal<bool> r_ixr_cmd_get; // transaction type (PUT/GET) 914 896 915 897 //////////////////////////////////////////////////// -
trunk/modules/vci_mem_cache/caba/source/include/xram_transaction.h
r422 r489 34 34 bool rerror; // error returned by xram 35 35 data_t ll_key; // LL key returned by the llsc_global_table 36 bool config; // transaction required by CONFIG FSM 36 37 37 38 ///////////////////////////////////////////////////////////////////// … … 42 43 valid = false; 43 44 rerror = false; 45 config = false; 44 46 } 45 47 … … 80 82 rerror = source.rerror; 81 83 ll_key = source.ll_key; 84 config = source.config; 82 85 } 83 86 … … 87 90 void print() 88 91 { 92 std::cout << "------- TRT entry -------" << std::endl; 89 93 std::cout << "valid = " << valid << std::endl; 90 94 std::cout << "xram_read = " << xram_read << std::endl; … … 96 100 std::cout << "read_length = " << read_length << std::endl; 97 101 std::cout << "word_index = " << word_index << std::endl; 98 for(size_t i=0; i<wdata_be.size() ; i++){ 99 std::cout << "wdata_be [" << i <<"] = " << wdata_be[i] << std::endl; 100 } 101 for(size_t i=0; i<wdata.size() ; i++){ 102 std::cout << "wdata [" << i <<"] = " << wdata[i] << std::endl; 103 } 102 for(size_t i=0; i<wdata_be.size() ; i++) 103 { 104 std::cout << "wdata_be[" << std::dec << i << "] = " 105 << std::hex << wdata_be[i] << std::endl; 106 } 107 for(size_t i=0; i<wdata.size() ; i++) 108 { 109 std::cout << "wdata[" << std::dec << i << "] = " 110 << std::hex << wdata[i] << std::endl; 111 } 112 std::cout << "rerror = " << rerror << std::endl; 113 std::cout << "ll_key = " << ll_key << std::endl; 114 std::cout << "config = " << config << std::endl; 104 115 std::cout << std::endl; 105 std::cout << "rerror = " << rerror << std::endl;106 116 } 107 117 … … 114 124 wdata_be.clear(); 115 125 wdata.clear(); 116 valid=false; 117 rerror=false; 118 } 119 120 TransactionTabEntry(const TransactionTabEntry &source){ 126 valid = false; 127 rerror = false; 128 config = false; 129 } 130 131 TransactionTabEntry(const TransactionTabEntry &source) 132 { 121 133 valid = source.valid; 122 134 xram_read = source.xram_read; … … 132 144 rerror = source.rerror; 133 145 ll_key = source.ll_key; 146 config = source.config; 134 147 } 135 148 … … 197 210 delete [] tab; 198 211 } 199 200 212 ///////////////////////////////////////////////////////////////////// 201 213 // The size() function returns the size of the tab … … 205 217 return size_tab; 206 218 } 207 208 219 ///////////////////////////////////////////////////////////////////// 209 220 // The init() function initializes the transaction tab entries … … 211 222 void init() 212 223 { 213 for ( size_t i=0; i<size_tab; i++) { 224 for ( size_t i=0; i<size_tab; i++) 225 { 214 226 tab[i].init(); 215 227 } 216 228 } 217 218 229 ///////////////////////////////////////////////////////////////////// 219 230 // The print() function prints a transaction tab entry … … 223 234 void print(const size_t index) 224 235 { 225 assert( (index < size_tab) 226 && "Invalid Transaction Tab Entry"); 236 assert( (index < size_tab) and 237 "MEMC ERROR: The selected entry is out of range in TRT write_data_mask()"); 238 227 239 tab[index].print(); 228 240 return; 229 241 } 230 231 242 ///////////////////////////////////////////////////////////////////// 232 243 // The read() function returns a transaction tab entry. … … 236 247 TransactionTabEntry read(const size_t index) 237 248 { 238 assert( (index < size_tab) 239 && "Invalid Transaction Tab Entry"); 249 assert( (index < size_tab) and 250 "MEMC ERROR: Invalid Transaction Tab Entry"); 251 240 252 return tab[index]; 241 253 } 242 243 254 ///////////////////////////////////////////////////////////////////// 244 255 // The full() function returns the state of the transaction tab … … 249 260 bool full(size_t &index) 250 261 { 251 for(size_t i=0; i<size_tab; i++){ 252 if(!tab[i].valid){ 262 for(size_t i=0; i<size_tab; i++) 263 { 264 if(!tab[i].valid) 265 { 253 266 index=i; 254 267 return false; … … 257 270 return true; 258 271 } 259 260 272 ///////////////////////////////////////////////////////////////////// 261 273 // The hit_read() function checks if an XRAM read transaction exists … … 268 280 bool hit_read(const addr_t nline,size_t &index) 269 281 { 270 for(size_t i=0; i<size_tab; i++){ 271 if((tab[i].valid && (nline==tab[i].nline)) && (tab[i].xram_read)) { 282 for(size_t i=0; i<size_tab; i++) 283 { 284 if((tab[i].valid && (nline==tab[i].nline)) && (tab[i].xram_read)) 285 { 272 286 index=i; 273 287 return true; … … 276 290 return false; 277 291 } 278 279 292 /////////////////////////////////////////////////////////////////////// 280 293 // The hit_write() function looks if an XRAM write transaction exists … … 286 299 bool hit_write(const addr_t nline) 287 300 { 288 for(size_t i=0; i<size_tab; i++){ 289 if(tab[i].valid && (nline==tab[i].nline) && !(tab[i].xram_read)) { 301 for(size_t i=0; i<size_tab; i++) 302 { 303 if(tab[i].valid && (nline==tab[i].nline) && !(tab[i].xram_read)) 304 { 290 305 return true; 291 306 } … … 293 308 return false; 294 309 } 295 296 310 ///////////////////////////////////////////////////////////////////// 297 311 // The write_data_mask() function writes a vector of data (a line). … … 307 321 const std::vector<data_t> &data) 308 322 { 309 assert( (index < size_tab) 310 && "Invalid Transaction Tab Entry"); 311 assert(be.size()==tab[index].wdata_be.size() 312 && "Bad data mask in write_data_mask in TransactionTab"); 313 assert(data.size()==tab[index].wdata.size() 314 && "Bad data in write_data_mask in TransactionTab"); 315 316 for(size_t i=0; i<tab[index].wdata_be.size() ; i++) { 323 assert( (index < size_tab) and 324 "MEMC ERROR: The selected entry is out of range in TRT write_data_mask()"); 325 326 assert( (be.size()==tab[index].wdata_be.size()) and 327 "MEMC ERROR: Bad be size in TRT write_data_mask()"); 328 329 assert( (data.size()==tab[index].wdata.size()) and 330 "MEMC ERROR: Bad data size in TRT write_data_mask()"); 331 332 for(size_t i=0; i<tab[index].wdata_be.size() ; i++) 333 { 317 334 tab[index].wdata_be[i] = tab[index].wdata_be[i] | be[i]; 318 335 data_t mask = be_to_mask(be[i]); … … 320 337 } 321 338 } 322 323 339 ///////////////////////////////////////////////////////////////////// 324 340 // The set() function registers a transaction (read or write) … … 337 353 // - data_be : the mask of the data to write (in case of write) 338 354 // - ll_key : the ll key (if any) returned by the llsc_global_table 355 // - config : transaction required by config FSM 339 356 ///////////////////////////////////////////////////////////////////// 340 357 void set(const size_t index, … … 349 366 const std::vector<be_t> &data_be, 350 367 const std::vector<data_t> &data, 351 const data_t ll_key = 0) 352 { 353 assert( (index < size_tab) 354 && "The selected entry is out of range in set() Transaction Tab"); 355 assert(data_be.size()==tab[index].wdata_be.size() 356 && "Bad data_be argument in set() TransactionTab"); 357 assert(data.size()==tab[index].wdata.size() 358 && "Bad data argument in set() TransactionTab"); 368 const data_t ll_key = 0, 369 const bool config = false) 370 { 371 assert( (index < size_tab) and 372 "MEMC ERROR: The selected entry is out of range in TRT set()"); 373 374 assert( (data_be.size()==tab[index].wdata_be.size()) and 375 "MEMC ERROR: Bad data_be argument in TRT set()"); 376 377 assert( (data.size()==tab[index].wdata.size()) and 378 "MEMC ERROR: Bad data argument in TRT set()"); 359 379 360 380 tab[index].valid = true; … … 368 388 tab[index].word_index = word_index; 369 389 tab[index].ll_key = ll_key; 390 tab[index].config = config; 370 391 for(size_t i=0; i<tab[index].wdata.size(); i++) 371 392 { … … 380 401 // The BE field in TRT is taken into account. 381 402 // Arguments : 382 // - index : the index of the transaction in the transaction tab 383 // - word_index : the index of the data in the line 384 // - data : a 64 bits value 385 // - error : invalid data 403 // - index : index of the entry in TRT 404 // - word : index of the 32 bits word in the line 405 // - data : 64 bits value (first data right) 386 406 ///////////////////////////////////////////////////////////////////// 387 407 void write_rsp(const size_t index, 388 408 const size_t word, 389 const wide_data_t data, 390 const bool rerror) 409 const wide_data_t data) 391 410 { 392 411 data_t value; 393 412 data_t mask; 394 413 395 if ( index >= size_tab ) 396 { 397 std::cout << "VCI_MEM_CACHE ERRROR " << tab_name 398 << " TRT entry out of range in write_rsp()" << std::endl; 399 exit(0); 400 } 401 if ( word > tab[index].wdata_be.size() ) 402 { 403 std::cout << "VCI_MEM_CACHE ERRROR " << tab_name 404 << " Bad word_index in write_rsp() in TRT" << std::endl; 405 exit(0); 406 } 407 if ( not tab[index].valid ) 408 { 409 std::cout << "VCI_MEM_CACHE ERRROR " << tab_name 410 << " TRT Entry invalid in write_rsp()" << std::endl; 411 exit(0); 412 } 413 if ( not tab[index].xram_read ) 414 { 415 std::cout << "VCI_MEM_CACHE ERRROR " << tab_name 416 << " TRT entry is not an XRAM GET in write_rsp()" << std::endl; 417 exit(0); 418 } 414 assert( (index < size_tab) and 415 "MEMC ERROR: The selected entry is out of range in TRT write_rsp()"); 416 417 assert( (word < tab[index].wdata_be.size()) and 418 "MEMC ERROR: Bad word index in TRT write_rsp()"); 419 420 assert( (tab[index].valid) and 421 "MEMC ERROR: TRT entry not valid in TRT write_rsp()"); 422 423 assert( (tab[index].xram_read ) and 424 "MEMC ERROR: TRT entry is not a GET in TRT write_rsp()"); 419 425 420 426 // first 32 bits word … … 427 433 mask = be_to_mask(tab[index].wdata_be[word+1]); 428 434 tab[index].wdata[word+1] = (tab[index].wdata[word+1] & mask) | (value & ~mask); 429 430 // error update 431 tab[index].rerror |= rerror; 432 } 433 435 } 434 436 ///////////////////////////////////////////////////////////////////// 435 437 // The erase() function erases an entry in the transaction tab. … … 439 441 void erase(const size_t index) 440 442 { 441 assert( (index < size_tab) 442 && "The selected entry is out of range in erase() Transaction Tab"); 443 assert( (index < size_tab) and 444 "MEMC ERROR: The selected entry is out of range in TRT erase()"); 445 443 446 tab[index].valid = false; 444 447 tab[index].rerror = false; 448 } 449 ///////////////////////////////////////////////////////////////////// 450 // The is_config() function returns the config flag value. 451 // Arguments : 452 // - index : the index of the entry in the transaction tab 453 ///////////////////////////////////////////////////////////////////// 454 bool is_config(const size_t index) 455 { 456 assert( (index < size_tab) and 457 "MEMC ERROR: The selected entry is out of range in TRT is_config()"); 458 459 return tab[index].config; 445 460 } 446 461 }; // end class TransactionTab -
trunk/modules/vci_mem_cache/caba/source/src/vci_mem_cache.cpp
r483 r489 45 45 #define DEBUG_MEMC_WRITE 1 // detailed trace of WRITE FSM 46 46 #define DEBUG_MEMC_CAS 1 // detailed trace of CAS FSM 47 #define DEBUG_MEMC_IXR_CMD 1 // detailed trace of IXR_ RSPFSM47 #define DEBUG_MEMC_IXR_CMD 1 // detailed trace of IXR_CMD FSM 48 48 #define DEBUG_MEMC_IXR_RSP 1 // detailed trace of IXR_RSP FSM 49 49 #define DEBUG_MEMC_XRAM_RSP 1 // detailed trace of XRAM_RSP FSM … … 124 124 "MULTI_ACK_UPT_LOCK", 125 125 "MULTI_ACK_UPT_CLEAR", 126 "MULTI_ACK_WRITE_RSP", 127 "MULTI_ACK_CONFIG_ACK" 126 "MULTI_ACK_WRITE_RSP" 128 127 }; 129 128 const char *config_fsm_str[] = … … 131 130 "CONFIG_IDLE", 132 131 "CONFIG_LOOP", 132 "CONFIG_WAIT", 133 133 "CONFIG_RSP", 134 134 "CONFIG_DIR_REQ", 135 135 "CONFIG_DIR_ACCESS", 136 "CONFIG_ DIR_IVT_LOCK",136 "CONFIG_IVT_LOCK", 137 137 "CONFIG_BC_SEND", 138 "CONFIG_BC_WAIT", 139 "CONFIG_INV_SEND", 138 "CONFIG_INVAL_SEND", 140 139 "CONFIG_HEAP_REQ", 141 140 "CONFIG_HEAP_SCAN", 142 141 "CONFIG_HEAP_LAST", 143 "CONFIG_INV_WAIT" 142 "CONFIG_TRT_LOCK", 143 "CONFIG_TRT_SET", 144 "CONFIG_PUT_REQ" 144 145 }; 145 146 const char *read_fsm_str[] = … … 165 166 "WRITE_DIR_REQ", 166 167 "WRITE_DIR_LOCK", 167 "WRITE_DIR_READ",168 168 "WRITE_DIR_HIT", 169 169 "WRITE_UPT_LOCK", … … 177 177 "WRITE_MISS_TRT_SET", 178 178 "WRITE_MISS_XRAM_REQ", 179 "WRITE_BC_DIR_READ", 179 180 "WRITE_BC_TRT_LOCK", 180 181 "WRITE_BC_IVT_LOCK", … … 187 188 { 188 189 "IXR_RSP_IDLE", 189 "IXR_RSP_ACK",190 190 "IXR_RSP_TRT_ERASE", 191 191 "IXR_RSP_TRT_READ" … … 199 199 "XRAM_RSP_DIR_UPDT", 200 200 "XRAM_RSP_DIR_RSP", 201 "XRAM_RSP_I NVAL_LOCK",201 "XRAM_RSP_IVT_LOCK", 202 202 "XRAM_RSP_INVAL_WAIT", 203 203 "XRAM_RSP_INVAL", … … 215 215 "IXR_CMD_CAS_IDLE", 216 216 "IXR_CMD_XRAM_IDLE", 217 "IXR_CMD_READ", 218 "IXR_CMD_WRITE", 219 "IXR_CMD_CAS", 220 "IXR_CMD_XRAM" 217 "IXR_CMD_CONFIG_IDLE", 218 "IXR_CMD_READ_TRT", 219 "IXR_CMD_WRITE_TRT", 220 "IXR_CMD_CAS_TRT", 221 "IXR_CMD_XRAM_TRT", 222 "IXR_CMD_CONFIG_TRT", 223 "IXR_CMD_READ_SEND", 224 "IXR_CMD_WRITE_SEND", 225 "IXR_CMD_CAS_SEND", 226 "IXR_CMD_XRAM_SEND", 227 "IXR_CMD_CONFIG_SEND" 221 228 }; 222 229 const char *cas_fsm_str[] = … … 260 267 "CLEANUP_IVT_CLEAR", 261 268 "CLEANUP_WRITE_RSP", 262 "CLEANUP_CONFIG_ACK",263 269 "CLEANUP_SEND_CLACK" 264 270 }; … … 279 285 "ALLOC_TRT_CAS", 280 286 "ALLOC_TRT_XRAM_RSP", 281 "ALLOC_TRT_IXR_RSP" 287 "ALLOC_TRT_IXR_RSP", 288 "ALLOC_TRT_CONFIG", 289 "ALLOC_TRT_IXR_CMD" 282 290 }; 283 291 const char *alloc_upt_fsm_str[] = … … 380 388 m_broadcast_boundaries(0x7C1F), 381 389 382 r_tgt_cmd_fsm("r_tgt_cmd_fsm"),383 390 384 391 // FIFOs … … 407 414 m_cc_receive_to_multi_ack_fifo("m_cc_receive_to_multi_ack_fifo", 4), 408 415 416 r_tgt_cmd_fsm("r_tgt_cmd_fsm"), 417 409 418 r_config_fsm( "r_config_fsm" ), 410 419 … … 418 427 m_write_to_cc_send_inst_fifo("m_write_to_cc_send_inst_fifo",8), 419 428 m_write_to_cc_send_srcid_fifo("m_write_to_cc_send_srcid_fifo",8), 420 #if L1_MULTI_CACHE421 m_write_to_cc_send_cache_id_fifo("m_write_to_cc_send_cache_id_fifo",8),422 #endif423 429 424 430 r_multi_ack_fsm("r_multi_ack_fsm"), … … 430 436 m_cas_to_cc_send_inst_fifo("m_cas_to_cc_send_inst_fifo",8), 431 437 m_cas_to_cc_send_srcid_fifo("m_cas_to_cc_send_srcid_fifo",8), 432 #if L1_MULTI_CACHE433 m_cas_to_cc_send_cache_id_fifo("m_cas_to_cc_send_cache_id_fifo",8),434 #endif435 438 436 439 r_ixr_rsp_fsm("r_ixr_rsp_fsm"), … … 439 442 m_xram_rsp_to_cc_send_inst_fifo("m_xram_rsp_to_cc_send_inst_fifo",8), 440 443 m_xram_rsp_to_cc_send_srcid_fifo("m_xram_rsp_to_cc_send_srcid_fifo",8), 441 #if L1_MULTI_CACHE442 m_xram_rsp_to_cc_send_cache_id_fifo("m_xram_rsp_to_cc_send_cache_id_fifo",8),443 #endif444 444 445 445 r_ixr_cmd_fsm("r_ixr_cmd_fsm"), … … 509 509 r_xram_rsp_victim_data = new sc_signal<data_t>[nwords]; 510 510 r_xram_rsp_to_tgt_rsp_data = new sc_signal<data_t>[nwords]; 511 r_xram_rsp_to_ixr_cmd_data = new sc_signal<data_t>[nwords];512 511 513 512 // Allocation for READ FSM … … 520 519 r_write_to_cc_send_data = new sc_signal<data_t>[nwords]; 521 520 r_write_to_cc_send_be = new sc_signal<be_t>[nwords]; 522 r_write_to_ixr_cmd_data = new sc_signal<data_t>[nwords];523 521 524 522 // Allocation for CAS FSM 525 r_cas_to_ixr_cmd_data = new sc_signal<data_t>[nwords];526 523 r_cas_data = new sc_signal<data_t>[nwords]; 527 524 r_cas_rdata = new sc_signal<data_t>[2]; 528 525 526 // Allocation for IXR_CMD FSM 527 r_ixr_cmd_wdata = new sc_signal<data_t>[nwords]; 528 529 529 // Allocation for debug 530 m_debug_previous_data = new sc_signal<data_t>[nwords];531 m_debug_data = new sc_signal<data_t>[nwords];530 m_debug_previous_data = new data_t[nwords]; 531 m_debug_data = new data_t[nwords]; 532 532 533 533 SC_METHOD(transition); … … 540 540 } // end constructor 541 541 542 ///////////////////////////////////////////////////////////////////////543 tmpl(void) ::start_monitor(addr_t addr, addr_t length)544 ///////////////////////////////////////////////////////////////////////545 {546 m_monitor_ok = true;547 m_monitor_base = addr;548 m_monitor_length = length;549 }550 551 ///////////////////////////////////////////////////////////////////////552 tmpl(void) ::stop_monitor()553 ///////////////////////////////////////////////////////////////////////554 {555 m_monitor_ok = false;556 }557 558 ////////////////////////////////////////////////559 tmpl(void) ::check_monitor( addr_t addr,560 data_t data,561 bool read )562 ////////////////////////////////////////////////563 {564 if((addr >= m_monitor_base) and565 (addr < m_monitor_base + m_monitor_length))566 {567 if ( read ) std::cout << " Monitor MEMC Read ";568 else std::cout << " Monitor MEMC Write";569 std::cout << " / Address = " << std::hex << addr570 << " / Data = " << data571 << " at cycle " << std::dec << m_cpt_cycles << std::endl;572 }573 }574 542 575 543 ///////////////////////////////////////////////////// … … 581 549 DirectoryEntry entry = m_cache_directory.read_neutral(addr, &way, &set ); 582 550 551 // read data and compute data_change 583 552 bool data_change = false; 584 585 553 if ( entry.valid ) 586 554 { 587 m_cache_data.read_line( way, set, m_debug_data );588 589 for ( size_t i = 0 ; i<m_words ; i++ )590 {591 if ( m_debug_previous_valid and592 (m_debug_data[i].read() != m_debug_previous_data[i].read()) )593 594 m_debug_previous_data[i] = m_debug_data[i].read();555 for ( size_t word = 0 ; word<m_words ; word++ ) 556 { 557 m_debug_data[word] = m_cache_data.read(way, set, word); 558 if ( m_debug_previous_valid and 559 (m_debug_data[word] != m_debug_previous_data[word]) ) 560 { 561 data_change = true; 562 } 595 563 } 596 564 } 597 565 566 // print values if any change 598 567 if ( (entry.valid != m_debug_previous_valid) or 599 568 (entry.valid and (entry.count != m_debug_previous_count)) or … … 603 572 << " at cycle " << std::dec << m_cpt_cycles 604 573 << " for address " << std::hex << addr 605 << " / HIT= " << std::dec << entry.valid574 << " / VAL = " << std::dec << entry.valid 606 575 << " / WAY = " << way 607 576 << " / COUNT = " << entry.count 608 577 << " / DIRTY = " << entry.dirty 609 << " / DATA_CHANGE = " << entry.count578 << " / DATA_CHANGE = " << data_change 610 579 << std::endl; 611 } 580 std::cout << std::hex << " /0:" << m_debug_data[0] 581 << "/1:" << m_debug_data[1] 582 << "/2:" << m_debug_data[2] 583 << "/3:" << m_debug_data[3] 584 << "/4:" << m_debug_data[4] 585 << "/5:" << m_debug_data[5] 586 << "/6:" << m_debug_data[6] 587 << "/7:" << m_debug_data[7] 588 << "/8:" << m_debug_data[8] 589 << "/9:" << m_debug_data[9] 590 << "/A:" << m_debug_data[10] 591 << "/B:" << m_debug_data[11] 592 << "/C:" << m_debug_data[12] 593 << "/D:" << m_debug_data[13] 594 << "/E:" << m_debug_data[14] 595 << "/F:" << m_debug_data[15] 596 << std::endl; 597 } 598 599 // register values 612 600 m_debug_previous_count = entry.count; 613 601 m_debug_previous_valid = entry.valid; 614 602 m_debug_previous_dirty = entry.dirty; 603 for( size_t word=0 ; word<m_words ; word++ ) 604 m_debug_previous_data[word] = m_debug_data[word]; 615 605 } 616 606 … … 677 667 delete [] r_xram_rsp_victim_data; 678 668 delete [] r_xram_rsp_to_tgt_rsp_data; 679 delete [] r_xram_rsp_to_ixr_cmd_data;680 669 681 670 delete [] r_read_data; … … 755 744 m_config_to_cc_send_inst_fifo.init(); 756 745 m_config_to_cc_send_srcid_fifo.init(); 757 #if L1_MULTI_CACHE758 m_config_to_cc_send_cache_id_fifo.init();759 #endif760 746 761 747 r_tgt_cmd_to_tgt_rsp_req = false; … … 772 758 m_write_to_cc_send_inst_fifo.init(); 773 759 m_write_to_cc_send_srcid_fifo.init(); 774 #if L1_MULTI_CACHE775 m_write_to_cc_send_cache_id_fifo.init();776 #endif777 760 778 761 r_cleanup_to_tgt_rsp_req = false; … … 780 763 m_cc_receive_to_cleanup_fifo.init(); 781 764 782 r_multi_ack_to_tgt_rsp_req 765 r_multi_ack_to_tgt_rsp_req = false; 783 766 784 767 m_cc_receive_to_multi_ack_fifo.init(); … … 788 771 r_cas_lfsr = -1 ; 789 772 r_cas_to_ixr_cmd_req = false; 790 r_cas_to_cc_send_multi_req = false;791 r_cas_to_cc_send_brdcast_req = false;773 r_cas_to_cc_send_multi_req = false; 774 r_cas_to_cc_send_brdcast_req = false; 792 775 793 776 m_cas_to_cc_send_inst_fifo.init(); 794 777 m_cas_to_cc_send_srcid_fifo.init(); 795 #if L1_MULTI_CACHE796 m_cas_to_cc_send_cache_id_fifo.init();797 #endif798 778 799 779 for(size_t i=0; i<m_trt_lines ; i++) … … 810 790 m_xram_rsp_to_cc_send_inst_fifo.init(); 811 791 m_xram_rsp_to_cc_send_srcid_fifo.init(); 812 #if L1_MULTI_CACHE 813 m_xram_rsp_to_cc_send_cache_id_fifo.init(); 814 #endif 815 816 r_ixr_cmd_cpt = 0; 792 817 793 r_alloc_dir_reset_cpt = 0; 818 794 r_alloc_heap_reset_cpt = 0; … … 863 839 size_t write_to_cc_send_fifo_srcid = 0; 864 840 865 #if L1_MULTI_CACHE866 size_t write_to_cc_send_fifo_cache_id = 0;867 #endif868 869 841 bool xram_rsp_to_cc_send_fifo_put = false; 870 842 bool xram_rsp_to_cc_send_fifo_get = false; … … 872 844 size_t xram_rsp_to_cc_send_fifo_srcid = 0; 873 845 874 #if L1_MULTI_CACHE875 size_t xram_rsp_to_cc_send_fifo_cache_id = 0;876 #endif877 878 846 bool config_to_cc_send_fifo_put = false; 879 847 bool config_to_cc_send_fifo_get = false; … … 885 853 bool cas_to_cc_send_fifo_inst = false; 886 854 size_t cas_to_cc_send_fifo_srcid = 0; 887 888 #if L1_MULTI_CACHE889 size_t cas_to_cc_send_fifo_cache_id = 0;890 #endif891 855 892 856 m_debug = (m_cpt_cycles > m_debug_start_cycle) and m_debug_ok; … … 939 903 // - For MEMC_CMD_TYPE, the response is delayed until the operation is completed. 940 904 //////////////////////////////////////////////////////////////////////////////////// 905 906 //std::cout << std::endl << "tgt_cmd_fsm" << std::endl; 941 907 942 908 switch(r_tgt_cmd_fsm.read()) … … 1042 1008 case TGT_CMD_ERROR: // response error must be sent 1043 1009 1044 // wait if pending TGT_CMD request to TGT_RSP FSM1010 // wait if pending request 1045 1011 if(r_tgt_cmd_to_tgt_rsp_req.read()) break; 1046 1012 … … 1076 1042 size_t error; 1077 1043 uint32_t rdata = 0; // default value 1044 uint32_t wdata = p_vci_tgt.wdata.read(); 1078 1045 1079 1046 if ( (p_vci_tgt.cmd.read() == vci_param_int::CMD_READ) // get lock … … 1084 1051 error = 0; 1085 1052 r_config_lock = true; 1053 if ( rdata == 0 ) 1054 { 1055 r_tgt_cmd_srcid = p_vci_tgt.srcid.read(); 1056 r_tgt_cmd_trdid = p_vci_tgt.trdid.read(); 1057 r_tgt_cmd_pktid = p_vci_tgt.pktid.read(); 1058 } 1086 1059 } 1087 1060 else if ( (p_vci_tgt.cmd.read() == vci_param_int::CMD_WRITE) // release lock 1088 and (cell == MEMC_LOCK) ) 1061 and (cell == MEMC_LOCK) 1062 and (p_vci_tgt.srcid.read() == r_tgt_cmd_srcid.read()) ) 1089 1063 { 1090 1064 need_rsp = true; … … 1093 1067 } 1094 1068 else if ( (p_vci_tgt.cmd.read() == vci_param_int::CMD_WRITE) // set addr_lo 1095 and (cell == MEMC_ADDR_LO) ) 1096 { 1069 and (cell == MEMC_ADDR_LO) 1070 and (p_vci_tgt.srcid.read() == r_tgt_cmd_srcid.read()) ) 1071 { 1072 assert( ((wdata % (m_words*vci_param_int::B)) == 0) and 1073 "VCI_MEM_CACHE CONFIG ERROR: The buffer must be aligned on a cache line"); 1074 1097 1075 need_rsp = true; 1098 1076 error = 0; … … 1101 1079 } 1102 1080 else if ( (p_vci_tgt.cmd.read() == vci_param_int::CMD_WRITE) // set addr_hi 1103 and (cell == MEMC_ADDR_HI) ) 1081 and (cell == MEMC_ADDR_HI) 1082 and (p_vci_tgt.srcid.read() == r_tgt_cmd_srcid.read()) ) 1083 1104 1084 { 1105 1085 need_rsp = true; … … 1109 1089 } 1110 1090 else if ( (p_vci_tgt.cmd.read() == vci_param_int::CMD_WRITE) // set buf_lines 1111 and (cell == MEMC_BUF_LENGTH) ) 1091 and (cell == MEMC_BUF_LENGTH) 1092 and (p_vci_tgt.srcid.read() == r_tgt_cmd_srcid.read()) ) 1112 1093 { 1113 1094 need_rsp = true; 1114 1095 error = 0; 1115 1096 size_t lines = (size_t)(p_vci_tgt.wdata.read()/(m_words<<2)); 1116 if ( r_config_address.read()/(m_words*vci_param_int::B) ) lines++; 1117 r_config_nlines = lines; 1097 if ( r_config_address.read()%(m_words*4) ) lines++; 1098 r_config_cmd_lines = lines; 1099 r_config_rsp_lines = lines; 1118 1100 } 1119 1101 else if ( (p_vci_tgt.cmd.read() == vci_param_int::CMD_WRITE) // set cmd type 1120 and (cell == MEMC_CMD_TYPE) ) 1102 and (cell == MEMC_CMD_TYPE) 1103 and (p_vci_tgt.srcid.read() == r_tgt_cmd_srcid.read()) ) 1121 1104 { 1122 1105 need_rsp = false; 1123 1106 error = 0; 1124 1107 r_config_cmd = p_vci_tgt.wdata.read(); 1108 1109 // prepare delayed response from CONFIG FSM 1125 1110 r_config_srcid = p_vci_tgt.srcid.read(); 1126 1111 r_config_trdid = p_vci_tgt.trdid.read(); … … 1153 1138 << " address = " << std::hex << p_vci_tgt.address.read() 1154 1139 << " / wdata = " << p_vci_tgt.wdata.read() 1140 << " / need_rsp = " << need_rsp 1155 1141 << " / error = " << error << std::endl; 1156 1142 #endif … … 1256 1242 // MULTI_ACK FSM 1257 1243 ///////////////////////////////////////////////////////////////////////// 1258 // This FSM controls the response to the multicast update or multicast 1259 // inval coherence requests sent by the memory cache to the L1 caches and 1260 // update the UPT. 1244 // This FSM controls the response to the multicast update requests sent 1245 // by the memory cache to the L1 caches and update the UPT. 1261 1246 // 1262 1247 // - The FSM decrements the proper entry in UPT, … … 1264 1249 // - If required, it sends a request to the TGT_RSP FSM to complete 1265 1250 // a pending write transaction. 1266 // - If required, it sends an acknowledge to the CONFIG FSM to signal1267 // completion of a line inval.1268 1251 // 1269 1252 // All those multi-ack packets are one flit packet. 1270 // The index in the UPT is defined in the UPDTID field.1253 // The index in the UPT is defined in the TRDID field. 1271 1254 //////////////////////////////////////////////////////////////////////// 1255 1256 //std::cout << std::endl << "multi_ack_fsm" << std::endl; 1272 1257 1273 1258 switch(r_multi_ack_fsm.read()) … … 1381 1366 r_multi_ack_nline = m_upt.nline(r_multi_ack_upt_index.read()); 1382 1367 bool need_rsp = m_upt.need_rsp(r_multi_ack_upt_index.read()); 1383 bool need_ack = m_upt.need_ack(r_multi_ack_upt_index.read());1384 1368 1385 1369 // clear the UPT entry … … 1387 1371 1388 1372 if ( need_rsp ) r_multi_ack_fsm = MULTI_ACK_WRITE_RSP; 1389 else if ( need_ack ) r_multi_ack_fsm = MULTI_ACK_CONFIG_ACK;1390 1373 else r_multi_ack_fsm = MULTI_ACK_IDLE; 1391 1374 … … 1418 1401 break; 1419 1402 } 1420 //////////////////////////1421 case MULTI_ACK_CONFIG_ACK: // Signals multi-inval completion to CONFIG FSM1422 // Wait if pending request1423 {1424 if ( r_multi_ack_to_config_ack.read() ) break;1425 1426 r_multi_ack_to_config_ack = true;1427 r_multi_ack_fsm = MULTI_ACK_IDLE;1428 1429 #if DEBUG_MEMC_MULTI_ACK1430 if(m_debug)1431 std::cout << " <MEMC " << name() << " MULTI_ACK_CONFIG_ACK>"1432 << " Signals inval completion to CONFIG FSM" << std::endl;1433 #endif1434 break;1435 }1436 1403 } // end switch r_multi_ack_fsm 1437 1404 … … 1441 1408 // The CONFIG FSM handles the VCI configuration requests (INVAL & SYNC). 1442 1409 // The target buffer can have any size, and there is one single command for 1443 // all cache lines covered by the target buffer. 1444 // An INVAL or SYNC configuration request is defined by the followinf registers: 1445 // - bool r_config_cmd : INVAL / SYNC / NOP) 1410 // all cache lines covered by the target buffer. 1411 // 1412 // An INVAL or SYNC configuration operation is defined by the following registers: 1413 // - bool r_config_cmd : INVAL / SYNC / NOP 1446 1414 // - uint64_t r_config_address : buffer base address 1447 // - uint32_t r_config_nlines : number of lines covering buffer 1415 // - uint32_t r_config_cmd_lines : number of lines to be handled 1416 // - uint32_t r_config_rsp_lines : number of lines not completed 1448 1417 // 1449 1418 // For both INVAL and SYNC commands, the CONFIG FSM contains the loop handling 1450 // all cache lines covered by the target buffer. 1451 // 1419 // all cache lines covered by the buffer. The various lines of a given buffer 1420 // can be pipelined: the CONFIG FSM does not wait the response for line (n) to send 1421 // the command for line (n+1). It decrements the r_config_cmd_lines counter until 1422 // the last request has been registered in TRT (for a SYNC), or in IVT (for an INVAL). 1423 // 1452 1424 // - INVAL request: 1453 // For each line, it access to the DIR array.1425 // For each line, it access to the DIR. 1454 1426 // In case of miss, it does nothing, and a response is requested to TGT_RSP FSM. 1455 1427 // In case of hit, with no copies in L1 caches, the line is invalidated and 1456 1428 // a response is requested to TGT_RSP FSM. 1457 1429 // If there is copies, a multi-inval, or a broadcast-inval coherence transaction 1458 // is launched and registered in UPT. The multi-inval transaction is signaled 1459 // by the r_multi_ack_to config_ack or r_cleanup_to_config_ack flip-flops. 1460 // The config inval response is sent only when the last line has been invalidated. 1461 // 1430 // is launched and registered in UPT. The multi-inval transaction completion 1431 // is signaled by the CLEANUP FSM by decrementing the r_config_rsp_lines counter. 1432 // The CONFIG INVAL response is sent only when the last line has been invalidated. 1433 // TODO : The target buffer address must be aligned on a cache line boundary. 1434 // This constraint can be released, but it requires to make 2 PUT transactions 1435 // for the first and the last line... 1436 // 1462 1437 // - SYNC request: 1463 // 1464 // ... Not implemented yet ... 1438 // For each line, it access to the DIR. 1439 // In case of miss, it does nothing, and a response is requested to TGT_RSP FSM. 1440 // In case of hit, a PUT transaction is registered in TRT and a request is sent 1441 // to IXR_CMD FSM. The IXR_RSP FSM decrements the r_config_rsp_lines counter 1442 // when a PUT response is received. 1443 // The CONFIG SYNC response is sent only when the last PUT response is received. 1465 1444 // 1466 1445 // From the software point of view, a configuration request is a sequence 1467 // of 6 atomic accesses in an uncached segment: 1446 // of 6 atomic accesses in an uncached segment. A dedicated lock is used 1447 // to handle only one configuration command at a given time: 1468 1448 // - Read MEMC_LOCK : Get the lock 1469 1449 // - Write MEMC_ADDR_LO : Set the buffer address LSB … … 1474 1454 //////////////////////////////////////////////////////////////////////////////////// 1475 1455 1456 //std::cout << std::endl << "config_fsm" << std::endl; 1457 1476 1458 switch( r_config_fsm.read() ) 1477 1459 { … … 1486 1468 if(m_debug) 1487 1469 std::cout << " <MEMC " << name() << " CONFIG_IDLE> Config Request received" 1488 << " address = " << std::hex << r_config_address.read()1489 << " / nlines = " << std::dec << r_config_nlines.read()1470 << " / address = " << std::hex << r_config_address.read() 1471 << " / lines = " << std::dec << r_config_cmd_lines.read() 1490 1472 << " / type = " << r_config_cmd.read() << std::endl; 1491 1473 #endif … … 1494 1476 } 1495 1477 ///////////////// 1496 case CONFIG_LOOP: // test last line1497 { 1498 if ( r_config_ nlines.read() == 0 )1478 case CONFIG_LOOP: // test if last line to be handled 1479 { 1480 if ( r_config_cmd_lines.read() == 0 ) 1499 1481 { 1500 1482 r_config_cmd = MEMC_CMD_NOP; 1501 r_config_fsm = CONFIG_ RSP;1483 r_config_fsm = CONFIG_WAIT; 1502 1484 } 1503 1485 else … … 1509 1491 if(m_debug) 1510 1492 std::cout << " <MEMC " << name() << " CONFIG_LOOP>" 1511 << " address = " << std::hex << r_config_address.read()1512 << " / nlines = " << std::dec << r_config_nlines.read()1493 << " / address = " << std::hex << r_config_address.read() 1494 << " / lines not handled = " << std::dec << r_config_cmd_lines.read() 1513 1495 << " / command = " << r_config_cmd.read() << std::endl; 1514 1496 #endif 1515 1497 break; 1498 } 1499 ///////////////// 1500 case CONFIG_WAIT: // wait completion (last response) 1501 { 1502 if ( r_config_rsp_lines.read() == 0 ) // last response received 1503 { 1504 r_config_fsm = CONFIG_RSP; 1505 } 1506 1507 #if DEBUG_MEMC_CONFIG 1508 if(m_debug) 1509 std::cout << " <MEMC " << name() << " CONFIG_WAIT>" 1510 << " / lines to do = " << std::dec << r_config_rsp_lines.read() << std::endl; 1511 #endif 1512 break; 1513 } 1514 //////////////// 1515 case CONFIG_RSP: // request TGT_RSP FSM to return response 1516 { 1517 if ( not r_config_to_tgt_rsp_req.read() ) 1518 { 1519 r_config_to_tgt_rsp_srcid = r_config_srcid.read(); 1520 r_config_to_tgt_rsp_trdid = r_config_trdid.read(); 1521 r_config_to_tgt_rsp_pktid = r_config_pktid.read(); 1522 r_config_to_tgt_rsp_error = false; 1523 r_config_to_tgt_rsp_req = true; 1524 r_config_fsm = CONFIG_IDLE; 1525 1526 #if DEBUG_MEMC_CONFIG 1527 if(m_debug) 1528 std::cout << " <MEMC " << name() << " CONFIG_RSP> Request TGT_RSP FSM to return response:" 1529 << " error = " << r_config_to_tgt_rsp_error.read() 1530 << " / rsrcid = " << std::hex << r_config_srcid.read() 1531 << " / rtrdid = " << std::hex << r_config_trdid.read() 1532 << " / rpktid = " << std::hex << r_config_pktid.read() << std::endl; 1533 #endif 1534 } 1535 break; 1536 1516 1537 } 1517 1538 //////////////////// … … 1533 1554 case CONFIG_DIR_ACCESS: // Access directory and decode config command 1534 1555 { 1556 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CONFIG) and 1557 "MEMC ERROR in CONFIG_DIR_ACCESS state: bad DIR allocation"); 1558 1535 1559 size_t way = 0; 1536 1560 DirectoryEntry entry = m_cache_directory.read(r_config_address.read(), way); … … 1543 1567 r_config_dir_copy_srcid = entry.owner.srcid; 1544 1568 r_config_dir_is_cnt = entry.is_cnt; 1569 r_config_dir_lock = entry.lock; 1545 1570 r_config_dir_count = entry.count; 1546 r_config_dir_ next_ptr= entry.ptr;1547 1548 r_config_fsm = CONFIG_ DIR_IVT_LOCK;1571 r_config_dir_ptr = entry.ptr; 1572 1573 r_config_fsm = CONFIG_IVT_LOCK; 1549 1574 } 1550 1575 else if ( entry.valid and // hit & sync command … … 1552 1577 (r_config_cmd.read() == MEMC_CMD_SYNC) ) 1553 1578 { 1554 std::cout << "VCI_MEM_CACHE ERROR: " 1555 << "SYNC config request not implemented yet" << std::endl; 1556 exit(0); 1579 r_config_fsm = CONFIG_TRT_LOCK; 1557 1580 } 1558 else // return to LOOP1581 else // miss : return to LOOP 1559 1582 { 1560 r_config_nlines = r_config_nlines.read() - 1; 1561 r_config_address = r_config_address.read() + (m_words<<2); 1562 r_config_fsm = CONFIG_LOOP; 1583 r_config_cmd_lines = r_config_cmd_lines.read() - 1; 1584 r_config_rsp_lines = r_config_rsp_lines.read() - 1; 1585 r_config_cmd_lines = r_config_cmd_lines.read() - 1; 1586 r_config_address = r_config_address.read() + (m_words<<2); 1587 r_config_fsm = CONFIG_LOOP; 1563 1588 } 1564 1589 … … 1574 1599 break; 1575 1600 } 1576 ///////////////////////// 1577 case CONFIG_DIR_IVT_LOCK: // enter this state in case of INVAL command 1578 // Try to get both DIR & IVT locks, and return 1579 // to LOOP state if IVT full. 1580 // Register inval in IVT, and invalidate the 1581 // directory if IVT not full. 1582 { 1601 ///////////////////// 1602 case CONFIG_TRT_LOCK: // enter this state in case of SYNC command 1603 // to a dirty cache line 1604 // keep DIR lock, and try to get TRT lock 1605 // return to LOOP state if TRT full 1606 // reset dirty bit in DIR and register a PUT 1607 // trabsaction in TRT if not full. 1608 { 1609 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CONFIG) and 1610 "MEMC ERROR in CONFIG_TRT_LOCK state: bad DIR allocation"); 1611 1612 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_CONFIG ) 1613 { 1614 size_t index = 0; 1615 bool wok = not m_trt.full(index); 1616 1617 if ( not wok ) 1618 { 1619 r_config_fsm = CONFIG_LOOP; 1620 } 1621 else 1622 { 1623 size_t way = r_config_dir_way.read(); 1624 size_t set = m_y[r_config_address.read()]; 1625 1626 // reset dirty bit in DIR 1627 DirectoryEntry entry; 1628 entry.valid = true; 1629 entry.dirty = false; 1630 entry.tag = m_z[r_config_address.read()]; 1631 entry.is_cnt = r_config_dir_is_cnt.read(); 1632 entry.lock = r_config_dir_lock.read(); 1633 entry.ptr = r_config_dir_ptr.read(); 1634 entry.count = r_config_dir_count.read(); 1635 entry.owner.inst = r_config_dir_copy_inst.read(); 1636 entry.owner.srcid = r_config_dir_copy_srcid.read(); 1637 m_cache_directory.write( set, 1638 way, 1639 entry ); 1640 1641 r_config_trt_index = index; 1642 r_config_fsm = CONFIG_TRT_SET; 1643 } 1644 1645 #if DEBUG_MEMC_CONFIG 1646 if(m_debug) 1647 std::cout << " <MEMC " << name() << " CONFIG_TRT_LOCK> Access TRT: " 1648 << " wok = " << std::dec << wok 1649 << " index = " << index << std::endl; 1650 #endif 1651 } 1652 break; 1653 } 1654 //////////////////// 1655 case CONFIG_TRT_SET: // read data in cache 1656 // and post a PUT request in TRT 1657 { 1658 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CONFIG) and 1659 "MEMC ERROR in CONFIG_TRT_SET state: bad DIR allocation"); 1660 1661 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_CONFIG) and 1662 "MEMC ERROR in CONFIG_TRT_SET state: bad TRT allocation"); 1663 1664 // read data into cache 1665 size_t way = r_config_dir_way.read(); 1666 size_t set = m_y[r_config_address.read()]; 1667 1668 sc_signal<data_t> config_data[16]; 1669 m_cache_data.read_line( way, 1670 set, 1671 config_data ); 1672 1673 // post a PUT request in TRT 1674 std::vector<data_t> data_vector; 1675 data_vector.clear(); 1676 for(size_t i=0; i<m_words; i++) data_vector.push_back(config_data[i].read()); 1677 m_trt.set( r_config_trt_index.read(), 1678 false, // PUT 1679 m_nline[r_config_address.read()], // nline 1680 0, // srcid: unused 1681 0, // trdid: unused 1682 0, // pktid: unused 1683 false, // not proc_read 1684 0, // read_length: unused 1685 0, // word_index: unused 1686 std::vector<be_t>(m_words,0xF), 1687 data_vector); 1688 1689 #if DEBUG_MEMC_CONFIG 1690 if(m_debug) 1691 std::cout << " <MEMC " << name() << " CONFIG_TRT_SET> PUT request in TRT:" 1692 << " address = " << std::hex << r_config_address.read() 1693 << " index = " << std::dec << r_config_trt_index.read() << std::endl; 1694 #endif 1695 break; 1696 } 1697 //////////////////// 1698 case CONFIG_PUT_REQ: // PUT request to IXR_CMD_FSM 1699 { 1700 if ( not r_config_to_ixr_cmd_req.read() ) 1701 { 1702 r_config_to_ixr_cmd_req = true; 1703 r_config_to_ixr_cmd_index = r_config_trt_index.read(); 1704 1705 // prepare next iteration 1706 r_config_cmd_lines = r_config_cmd_lines.read() - 1; 1707 r_config_address = r_config_address.read() + (m_words<<2); 1708 r_config_fsm = CONFIG_LOOP; 1709 1710 #if DEBUG_MEMC_CONFIG 1711 if(m_debug) 1712 std::cout << " <MEMC " << name() << " CONFIG_PUT_REQ> PUT request to IXR_CMD_FSM" 1713 << " / address = " << std::hex << r_config_address.read() << std::endl; 1714 #endif 1715 } 1716 break; 1717 } 1718 ///////////////////// 1719 case CONFIG_IVT_LOCK: // enter this state in case of INVAL command 1720 // Keep DIR lock and Try to get IVT lock. 1721 // Return to LOOP state if IVT full. 1722 // Register inval in IVT, and invalidate the 1723 // directory if IVT not full. 1724 { 1725 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CONFIG) and 1726 "MEMC ERROR in CONFIG_IVT_LOCK state: bad DIR allocation"); 1727 1583 1728 if ( r_alloc_ivt_fsm.read() == ALLOC_IVT_CONFIG ) 1584 1729 { … … 1589 1734 { 1590 1735 m_cache_directory.inval( way, set ); 1591 r_config_nlines = r_config_nlines.read() - 1; 1592 r_config_address = r_config_address.read() + (m_words<<2); 1593 r_config_fsm = CONFIG_LOOP; 1736 r_config_cmd_lines = r_config_cmd_lines.read() - 1; 1737 r_config_rsp_lines = r_config_rsp_lines.read() - 1; 1738 r_config_address = r_config_address.read() + (m_words<<2); 1739 r_config_fsm = CONFIG_LOOP; 1594 1740 1595 1741 #if DEBUG_MEMC_CONFIG 1596 1742 if(m_debug) 1597 std::cout << " <MEMC " << name() << " CONFIG_ DIR_IVT_LOCK>"1743 std::cout << " <MEMC " << name() << " CONFIG_IVT_LOCK>" 1598 1744 << " No copies in L1 : inval DIR entry" << std::endl; 1599 1745 #endif … … 1626 1772 r_config_ivt_index = index; 1627 1773 if ( broadcast ) r_config_fsm = CONFIG_BC_SEND; 1628 else r_config_fsm = CONFIG_INV _SEND;1774 else r_config_fsm = CONFIG_INVAL_SEND; 1629 1775 1630 1776 #if DEBUG_MEMC_CONFIG 1631 1777 if(m_debug) 1632 std::cout << " <MEMC " << name() << " CONFIG_ DIR_IVT_LOCK>"1778 std::cout << " <MEMC " << name() << " CONFIG_IVT_LOCK>" 1633 1779 << " Inval DIR entry and register inval in IVT" 1634 << " :index = " << std::dec << index1780 << " / index = " << std::dec << index 1635 1781 << " / broadcast = " << broadcast << std::endl; 1636 1782 #endif … … 1642 1788 #if DEBUG_MEMC_CONFIG 1643 1789 if(m_debug) 1644 std::cout << " <MEMC " << name() << " CONFIG_ DIR_IVT_LOCK>"1790 std::cout << " <MEMC " << name() << " CONFIG_IVT_LOCK>" 1645 1791 << " IVT full : release DIR & IVT locks and retry" << std::endl; 1646 1792 #endif … … 1656 1802 not r_config_to_cc_send_brdcast_req.read() ) 1657 1803 { 1804 // post bc inval request 1658 1805 r_config_to_cc_send_multi_req = false; 1659 1806 r_config_to_cc_send_brdcast_req = true; 1660 1807 r_config_to_cc_send_trdid = r_config_ivt_index.read(); 1661 1808 r_config_to_cc_send_nline = m_nline[(addr_t)(r_config_address.read())]; 1662 r_cleanup_to_config_ack = false; 1663 r_config_fsm = CONFIG_BC_WAIT; 1809 1810 // prepare next iteration 1811 r_config_cmd_lines = r_config_cmd_lines.read() - 1; 1812 r_config_address = r_config_address.read() + (m_words<<2); 1813 r_config_fsm = CONFIG_LOOP; 1664 1814 1665 1815 #if DEBUG_MEMC_CONFIG … … 1672 1822 break; 1673 1823 } 1674 //////////////////// 1675 case CONFIG_BC_WAIT: // wait broadcast completion to return to LOOP 1676 { 1677 if ( r_cleanup_to_config_ack.read() ) 1678 { 1679 r_config_fsm = CONFIG_LOOP; 1680 r_config_nlines = r_config_nlines.read() - 1; 1681 r_config_address = r_config_address.read() + (m_words<<2); 1682 } 1683 1684 #if DEBUG_MEMC_CONFIG 1685 if(m_debug) 1686 std::cout << " <MEMC " << name() << " CONFIG_BC_WAIT> Waiting BC completion " 1687 << " done = " << r_cleanup_to_config_ack.read() 1688 << std::endl; 1689 #endif 1690 break; 1691 } 1692 ///////////////////// 1693 case CONFIG_INV_SEND: // Post a multi inval request to CC_SEND FSM 1824 /////////////////////// 1825 case CONFIG_INVAL_SEND: // Post a multi inval request to CC_SEND FSM 1694 1826 { 1695 1827 if( not r_config_to_cc_send_multi_req.read() and 1696 1828 not r_config_to_cc_send_brdcast_req.read() ) 1697 1829 { 1830 // post multi inval request 1698 1831 r_config_to_cc_send_multi_req = true; 1699 1832 r_config_to_cc_send_brdcast_req = false; 1700 1833 r_config_to_cc_send_trdid = r_config_ivt_index.read(); 1701 1834 r_config_to_cc_send_nline = m_nline[(addr_t)(r_config_address.read())]; 1702 r_multi_ack_to_config_ack = false; 1703 1835 1836 // post data into FIFO 1704 1837 config_to_cc_send_fifo_srcid = r_config_dir_copy_srcid.read(); 1705 1838 config_to_cc_send_fifo_inst = r_config_dir_copy_inst.read(); 1706 1839 config_to_cc_send_fifo_put = true; 1707 1840 1708 if ( r_config_dir_count.read() == 1 ) r_config_fsm = CONFIG_INV_WAIT; 1709 else r_config_fsm = CONFIG_HEAP_REQ; 1841 if ( r_config_dir_count.read() == 1 ) // one copy 1842 { 1843 // prepare next iteration 1844 r_config_cmd_lines = r_config_cmd_lines.read() - 1; 1845 r_config_address = r_config_address.read() + (m_words<<2); 1846 r_config_fsm = CONFIG_LOOP; 1847 } 1848 else // several copies 1849 { 1850 r_config_fsm = CONFIG_HEAP_REQ; 1851 } 1710 1852 1711 1853 #if DEBUG_MEMC_CONFIG 1712 1854 if(m_debug) 1713 std::cout << " <MEMC " << name() << " CONFIG_INV _SEND>"1855 std::cout << " <MEMC " << name() << " CONFIG_INVAL_SEND>" 1714 1856 << " Post multi inval request to CC_SEND FSM" 1715 1857 << " / address = " << std::hex << r_config_address.read() … … 1726 1868 { 1727 1869 r_config_fsm = CONFIG_HEAP_SCAN; 1728 r_config_heap_next = r_config_dir_ next_ptr.read();1870 r_config_heap_next = r_config_dir_ptr.read(); 1729 1871 } 1730 1872 … … 1773 1915 if ( m_heap.is_full() ) 1774 1916 { 1775 last_entry.next = r_config_dir_ next_ptr.read();1917 last_entry.next = r_config_dir_ptr.read(); 1776 1918 m_heap.unset_full(); 1777 1919 } … … 1781 1923 } 1782 1924 1783 m_heap.write_free_ptr( r_config_dir_ next_ptr.read() );1925 m_heap.write_free_ptr( r_config_dir_ptr.read() ); 1784 1926 m_heap.write( r_config_heap_next.read(), last_entry ); 1785 r_config_fsm = CONFIG_INV_WAIT; 1927 1928 // prepare next iteration 1929 r_config_cmd_lines = r_config_cmd_lines.read() - 1; 1930 r_config_address = r_config_address.read() + (m_words<<2); 1931 r_config_fsm = CONFIG_LOOP; 1786 1932 1787 1933 #if DEBUG_MEMC_CONFIG … … 1791 1937 #endif 1792 1938 break; 1793 }1794 /////////////////////1795 case CONFIG_INV_WAIT: // wait inval completion to return to LOOP1796 {1797 if ( r_multi_ack_to_config_ack.read() )1798 {1799 r_config_fsm = CONFIG_LOOP;1800 r_config_nlines = r_config_nlines.read() - 1;1801 r_config_address = r_config_address.read() + (m_words<<2);1802 }1803 1804 #if DEBUG_MEMC_CONFIG1805 if(m_debug)1806 std::cout << " <MEMC " << name() << " CONFIG_INV_WAIT> Waiting inval completion "1807 << " done = " << r_multi_ack_to_config_ack.read()1808 << std::endl;1809 #endif1810 break;1811 }1812 1813 ////////////////1814 case CONFIG_RSP: // request TGT_RSP FSM to return response1815 {1816 if ( not r_config_to_tgt_rsp_req.read() )1817 {1818 r_config_to_tgt_rsp_srcid = r_config_srcid.read();1819 r_config_to_tgt_rsp_trdid = r_config_trdid.read();1820 r_config_to_tgt_rsp_pktid = r_config_pktid.read();1821 r_config_to_tgt_rsp_error = false;1822 r_config_to_tgt_rsp_req = true;1823 r_config_fsm = CONFIG_IDLE;1824 1825 #if DEBUG_MEMC_CONFIG1826 if(m_debug)1827 std::cout << " <MEMC " << name() << " CONFIG_RSP> Request TGT_RSP FSM to return response:"1828 << " error = " << r_config_to_tgt_rsp_error.read()1829 << " / rsrcid = " << std::hex << r_config_srcid.read() << std::endl;1830 #endif1831 }1832 break;1833 1834 1939 } 1835 1940 } // end switch r_config_fsm … … 1858 1963 //////////////////////////////////////////////////////////////////////////////////// 1859 1964 1965 //std::cout << std::endl << "read_fsm" << std::endl; 1966 1860 1967 switch(r_read_fsm.read()) 1861 1968 { … … 1863 1970 case READ_IDLE: // waiting a read request 1864 1971 { 1865 if(m_cmd_read_addr_fifo.rok())1866 {1972 if(m_cmd_read_addr_fifo.rok()) 1973 { 1867 1974 1868 1975 #if DEBUG_MEMC_READ 1869 if(m_debug) 1870 std::cout << " <MEMC " << name() << " READ_IDLE> Read request" 1871 << " : address = " << std::hex << m_cmd_read_addr_fifo.read() 1872 << " / srcid = " << m_cmd_read_srcid_fifo.read() 1873 << " / trdid = " << m_cmd_read_trdid_fifo.read() 1874 << " / pktid = " << m_cmd_read_pktid_fifo.read() 1875 << " / nwords = " << std::dec << m_cmd_read_length_fifo.read() << std::endl; 1876 #endif 1877 r_read_fsm = READ_DIR_REQ; 1878 } 1879 break; 1880 } 1881 1976 if(m_debug) 1977 std::cout << " <MEMC " << name() << " READ_IDLE> Read request" 1978 << " : address = " << std::hex << m_cmd_read_addr_fifo.read() 1979 << " / srcid = " << m_cmd_read_srcid_fifo.read() 1980 << " / trdid = " << m_cmd_read_trdid_fifo.read() 1981 << " / pktid = " << m_cmd_read_pktid_fifo.read() 1982 << " / nwords = " << std::dec << m_cmd_read_length_fifo.read() << std::endl; 1983 #endif 1984 r_read_fsm = READ_DIR_REQ; 1985 } 1986 break; 1987 } 1882 1988 ////////////////// 1883 1989 case READ_DIR_REQ: // Get the lock to the directory 1884 1990 { 1885 if(r_alloc_dir_fsm.read() == ALLOC_DIR_READ)1886 {1887 r_read_fsm = READ_DIR_LOCK;1888 }1991 if(r_alloc_dir_fsm.read() == ALLOC_DIR_READ) 1992 { 1993 r_read_fsm = READ_DIR_LOCK; 1994 } 1889 1995 1890 1996 #if DEBUG_MEMC_READ … … 1892 1998 std::cout << " <MEMC " << name() << " READ_DIR_REQ> Requesting DIR lock " << std::endl; 1893 1999 #endif 1894 break;2000 break; 1895 2001 } 1896 2002 … … 1898 2004 case READ_DIR_LOCK: // check directory for hit / miss 1899 2005 { 1900 if(r_alloc_dir_fsm.read() == ALLOC_DIR_READ) 1901 { 2006 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_READ) and 2007 "MEMC ERROR in READ_DIR_LOCK state: Bad DIR allocation"); 2008 1902 2009 size_t way = 0; 1903 DirectoryEntry entry = 1904 m_cache_directory.read(m_cmd_read_addr_fifo.read(), way); 2010 DirectoryEntry entry = m_cache_directory.read(m_cmd_read_addr_fifo.read(), way); 2011 1905 2012 // access the global table ONLY when we have an LL cmd 1906 2013 if((m_cmd_read_pktid_fifo.read() & 0x7) == TYPE_LL) 1907 2014 { 1908 r_read_ll_key = m_llsc_table.ll(m_cmd_read_addr_fifo.read());2015 r_read_ll_key = m_llsc_table.ll(m_cmd_read_addr_fifo.read()); 1909 2016 } 1910 2017 r_read_is_cnt = entry.is_cnt; … … 1915 2022 r_read_count = entry.count; 1916 2023 r_read_copy = entry.owner.srcid; 1917 1918 #if L1_MULTI_CACHE1919 r_read_copy_cache = entry.owner.cache_id;1920 #endif1921 2024 r_read_copy_inst = entry.owner.inst; 1922 2025 r_read_ptr = entry.ptr; // pointer to the heap … … 1928 2031 if(entry.valid) // hit 1929 2032 { 1930 // test if we need to register a new copy in the heap1931 if(entry.is_cnt or (entry.count == 0) or !cached_read)1932 {1933 r_read_fsm = READ_DIR_HIT;1934 }1935 else1936 {1937 r_read_fsm = READ_HEAP_REQ;1938 }2033 // test if we need to register a new copy in the heap 2034 if(entry.is_cnt or (entry.count == 0) or !cached_read) 2035 { 2036 r_read_fsm = READ_DIR_HIT; 2037 } 2038 else 2039 { 2040 r_read_fsm = READ_HEAP_REQ; 2041 } 1939 2042 } 1940 2043 else // miss 1941 2044 { 1942 r_read_fsm = READ_TRT_LOCK;2045 r_read_fsm = READ_TRT_LOCK; 1943 2046 } 1944 2047 … … 1955 2058 } 1956 2059 #endif 1957 } 1958 else 1959 { 1960 std::cout << "VCI_MEM_CACHE ERROR " << name() << " READ_DIR_LOCK state" 1961 << "Bad DIR allocation" << std::endl; 1962 exit(0); 1963 } 1964 break; 1965 } 1966 2060 break; 2061 } 1967 2062 ////////////////// 1968 2063 case READ_DIR_HIT: // read data in cache & update the directory … … 1973 2068 1974 2069 { 1975 if(r_alloc_dir_fsm.read() == ALLOC_DIR_READ) 1976 { 2070 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_READ) and 2071 "MEMC ERROR in READ_DIR_HIT state: Bad DIR allocation"); 2072 1977 2073 // check if this is an instruction read, this means pktid is either 1978 2074 // TYPE_READ_INS_UNC 0bX010 with TSAR encoding … … 1991 2087 m_cache_data.read_line(way, set, r_read_data); 1992 2088 1993 if(m_monitor_ok) check_monitor( m_cmd_read_addr_fifo.read(), r_read_data[0], true);1994 1995 2089 // update the cache directory 1996 2090 DirectoryEntry entry; … … 2004 2098 if(cached_read) // Cached read => we must update the copies 2005 2099 { 2006 if(!is_cnt) // Not counter mode 2007 { 2008 entry.owner.srcid = m_cmd_read_srcid_fifo.read(); 2009 #if L1_MULTI_CACHE 2010 entry.owner.cache_id = m_cmd_read_pktid_fifo.read(); 2011 #endif 2012 entry.owner.inst = inst_read; 2013 entry.count = r_read_count.read() + 1; 2014 } 2015 else // Counter mode 2016 { 2017 entry.owner.srcid = 0; 2018 #if L1_MULTI_CACHE 2019 entry.owner.cache_id = 0; 2020 #endif 2021 entry.owner.inst = false; 2022 entry.count = r_read_count.read() + 1; 2023 } 2100 if(!is_cnt) // Not counter mode 2101 { 2102 entry.owner.srcid = m_cmd_read_srcid_fifo.read(); 2103 entry.owner.inst = inst_read; 2104 entry.count = r_read_count.read() + 1; 2105 } 2106 else // Counter mode 2107 { 2108 entry.owner.srcid = 0; 2109 entry.owner.inst = false; 2110 entry.count = r_read_count.read() + 1; 2111 } 2024 2112 } 2025 2113 else // Uncached read 2026 2114 { 2027 entry.owner.srcid = r_read_copy.read(); 2028 #if L1_MULTI_CACHE 2029 entry.owner.cache_id = r_read_copy_cache.read(); 2030 #endif 2031 entry.owner.inst = r_read_copy_inst.read(); 2032 entry.count = r_read_count.read(); 2115 entry.owner.srcid = r_read_copy.read(); 2116 entry.owner.inst = r_read_copy_inst.read(); 2117 entry.count = r_read_count.read(); 2033 2118 } 2034 2119 2035 2120 #if DEBUG_MEMC_READ 2036 if(m_debug) 2037 std::cout << " <MEMC " << name() << " READ_DIR_HIT> Update directory entry:" 2038 << " addr = " << std::hex << m_cmd_read_addr_fifo.read() 2039 << " / set = " << std::dec << set 2040 << " / way = " << way 2041 << " / owner_id = " << std::hex << entry.owner.srcid 2042 << " / owner_ins = " << std::dec << entry.owner.inst 2043 << " / count = " << entry.count 2044 << " / is_cnt = " << entry.is_cnt << std::endl; 2045 #endif 2046 2121 if(m_debug) 2122 std::cout << " <MEMC " << name() << " READ_DIR_HIT> Update directory entry:" 2123 << " addr = " << std::hex << m_cmd_read_addr_fifo.read() 2124 << " / set = " << std::dec << set 2125 << " / way = " << way 2126 << " / owner_id = " << std::hex << entry.owner.srcid 2127 << " / owner_ins = " << std::dec << entry.owner.inst 2128 << " / count = " << entry.count 2129 << " / is_cnt = " << entry.is_cnt << std::endl; 2130 #endif 2047 2131 m_cache_directory.write(set, way, entry); 2048 2132 r_read_fsm = READ_RSP; 2049 } 2050 break; 2133 break; 2051 2134 } 2052 2135 /////////////////// … … 2080 2163 2081 2164 m_cache_data.read_line(way, set, r_read_data); 2082 2083 if(m_monitor_ok) check_monitor( m_cmd_read_addr_fifo.read(), r_read_data[0], true);2084 2165 2085 2166 // update the cache directory … … 2095 2176 { 2096 2177 entry.owner.srcid = r_read_copy.read(); 2097 #if L1_MULTI_CACHE2098 entry.owner.cache_id = r_read_copy_cache.read();2099 #endif2100 2178 entry.owner.inst = r_read_copy_inst.read(); 2101 2179 entry.ptr = m_heap.next_free_ptr(); // set pointer on the heap … … 2104 2182 { 2105 2183 entry.owner.srcid = 0; 2106 #if L1_MULTI_CACHE2107 entry.owner.cache_id = 0;2108 #endif2109 2184 entry.owner.inst = false; 2110 2185 entry.ptr = 0; … … 2172 2247 HeapEntry heap_entry; 2173 2248 heap_entry.owner.srcid = m_cmd_read_srcid_fifo.read(); 2174 #if L1_MULTI_CACHE2175 heap_entry.owner.cache_id = m_cmd_read_pktid_fifo.read();2176 #endif2177 2249 heap_entry.owner.inst = ((m_cmd_read_pktid_fifo.read() & 0x2) != 0); 2178 2250 … … 2238 2310 HeapEntry last_entry; 2239 2311 last_entry.owner.srcid = 0; 2240 #if L1_MULTI_CACHE2241 last_entry.owner.cache_id = 0;2242 #endif2243 2312 last_entry.owner.inst = false; 2244 2313 … … 2266 2335 case READ_RSP: // request the TGT_RSP FSM to return data 2267 2336 { 2268 if(!r_read_to_tgt_rsp_req)2269 {2270 for(size_t i=0 ; i<m_words ; i++) r_read_to_tgt_rsp_data[i] = r_read_data[i];2271 r_read_to_tgt_rsp_word = m_x[(addr_t) m_cmd_read_addr_fifo.read()];2272 r_read_to_tgt_rsp_length = m_cmd_read_length_fifo.read();2273 r_read_to_tgt_rsp_srcid = m_cmd_read_srcid_fifo.read();2274 r_read_to_tgt_rsp_trdid = m_cmd_read_trdid_fifo.read();2275 r_read_to_tgt_rsp_pktid = m_cmd_read_pktid_fifo.read();2276 r_read_to_tgt_rsp_ll_key = r_read_ll_key.read();2277 cmd_read_fifo_get = true;2278 r_read_to_tgt_rsp_req = true;2279 r_read_fsm = READ_IDLE;2337 if(!r_read_to_tgt_rsp_req) 2338 { 2339 for(size_t i=0 ; i<m_words ; i++) r_read_to_tgt_rsp_data[i] = r_read_data[i]; 2340 r_read_to_tgt_rsp_word = m_x[(addr_t) m_cmd_read_addr_fifo.read()]; 2341 r_read_to_tgt_rsp_length = m_cmd_read_length_fifo.read(); 2342 r_read_to_tgt_rsp_srcid = m_cmd_read_srcid_fifo.read(); 2343 r_read_to_tgt_rsp_trdid = m_cmd_read_trdid_fifo.read(); 2344 r_read_to_tgt_rsp_pktid = m_cmd_read_pktid_fifo.read(); 2345 r_read_to_tgt_rsp_ll_key = r_read_ll_key.read(); 2346 cmd_read_fifo_get = true; 2347 r_read_to_tgt_rsp_req = true; 2348 r_read_fsm = READ_IDLE; 2280 2349 2281 2350 #if DEBUG_MEMC_READ … … 2286 2355 << " / nwords = " << std::dec << m_cmd_read_length_fifo.read() << std::endl; 2287 2356 #endif 2288 }2289 break;2357 } 2358 break; 2290 2359 } 2291 2360 /////////////////// 2292 2361 case READ_TRT_LOCK: // read miss : check the Transaction Table 2293 2362 { 2294 if(r_alloc_trt_fsm.read() == ALLOC_TRT_READ)2295 {2296 size_t index = 0;2297 addr_t addr = (addr_t) m_cmd_read_addr_fifo.read();2298 bool hit_read = m_trt.hit_read(m_nline[addr], index);2299 bool hit_write = m_trt.hit_write(m_nline[addr]);2300 bool wok = !m_trt.full(index);2301 2302 if(hit_read or !wok or hit_write) // missingline already requested or no space2303 {2304 if(!wok)m_cpt_trt_full++;2305 if(hit_read or hit_write) m_cpt_trt_rb++;2306 r_read_fsm = READ_IDLE;2307 }2308 else // missing line is requested to the XRAM2309 {2310 m_cpt_read_miss++;2311 r_read_trt_index = index;2312 r_read_fsm = READ_TRT_SET;2313 }2363 if(r_alloc_trt_fsm.read() == ALLOC_TRT_READ) 2364 { 2365 size_t index = 0; 2366 addr_t addr = (addr_t) m_cmd_read_addr_fifo.read(); 2367 bool hit_read = m_trt.hit_read(m_nline[addr], index); 2368 bool hit_write = m_trt.hit_write(m_nline[addr]); 2369 bool wok = not m_trt.full(index); 2370 2371 if(hit_read or !wok or hit_write) // line already requested or no space 2372 { 2373 if(!wok) m_cpt_trt_full++; 2374 if(hit_read or hit_write) m_cpt_trt_rb++; 2375 r_read_fsm = READ_IDLE; 2376 } 2377 else // missing line is requested to the XRAM 2378 { 2379 m_cpt_read_miss++; 2380 r_read_trt_index = index; 2381 r_read_fsm = READ_TRT_SET; 2382 } 2314 2383 2315 2384 #if DEBUG_MEMC_READ … … 2320 2389 << " / full = " << !wok << std::endl; 2321 2390 #endif 2322 } 2323 break; 2324 } 2325 2391 } 2392 break; 2393 } 2326 2394 ////////////////// 2327 2395 case READ_TRT_SET: // register get transaction in TRT 2328 2396 { 2329 if(r_alloc_trt_fsm.read() == ALLOC_TRT_READ)2330 {2331 m_trt.set(r_read_trt_index.read(),2332 true,2333 2334 2335 2336 2337 true,2338 2339 2340 2341 2342 r_read_ll_key.read());2397 if(r_alloc_trt_fsm.read() == ALLOC_TRT_READ) 2398 { 2399 m_trt.set( r_read_trt_index.read(), 2400 true, // GET 2401 m_nline[(addr_t)(m_cmd_read_addr_fifo.read())], 2402 m_cmd_read_srcid_fifo.read(), 2403 m_cmd_read_trdid_fifo.read(), 2404 m_cmd_read_pktid_fifo.read(), 2405 true, // proc read 2406 m_cmd_read_length_fifo.read(), 2407 m_x[(addr_t)(m_cmd_read_addr_fifo.read())], 2408 std::vector<be_t> (m_words,0), 2409 std::vector<data_t> (m_words,0), 2410 r_read_ll_key.read() ); 2343 2411 #if DEBUG_MEMC_READ 2344 2412 if(m_debug) 2345 std::cout << " <MEMC " << name() << " READ_TRT_SET> Write in Transaction Table:"2413 std::cout << " <MEMC " << name() << " READ_TRT_SET> Set a GET in TRT:" 2346 2414 << " address = " << std::hex << m_cmd_read_addr_fifo.read() 2347 2415 << " / srcid = " << std::hex << m_cmd_read_srcid_fifo.read() << std::endl; 2348 2416 #endif 2349 r_read_fsm = READ_TRT_REQ;2350 }2351 break;2417 r_read_fsm = READ_TRT_REQ; 2418 } 2419 break; 2352 2420 } 2353 2421 … … 2355 2423 case READ_TRT_REQ: // consume the read request in FIFO and send it to IXR_CMD_FSM 2356 2424 { 2357 if(not r_read_to_ixr_cmd_req) 2358 { 2359 cmd_read_fifo_get = true; 2360 r_read_to_ixr_cmd_req = true; 2361 r_read_to_ixr_cmd_nline = m_nline[(addr_t)(m_cmd_read_addr_fifo.read())]; 2362 r_read_to_ixr_cmd_trdid = r_read_trt_index.read(); 2363 r_read_fsm = READ_IDLE; 2425 if(not r_read_to_ixr_cmd_req) 2426 { 2427 cmd_read_fifo_get = true; 2428 r_read_to_ixr_cmd_req = true; 2429 r_read_to_ixr_cmd_index = r_read_trt_index.read(); 2430 r_read_fsm = READ_IDLE; 2364 2431 2365 2432 #if DEBUG_MEMC_READ … … 2368 2435 << std::hex << m_cmd_read_addr_fifo.read() << std::endl; 2369 2436 #endif 2370 }2371 break;2437 } 2438 break; 2372 2439 } 2373 2440 } // end switch read_fsm … … 2387 2454 // If the data is cached by other processors, a coherence transaction must 2388 2455 // be launched (sc requests always require a coherence transaction): 2389 // It is a multicast update if the line is not in counter mode , andthe processor2456 // It is a multicast update if the line is not in counter mode: the processor 2390 2457 // takes the lock protecting the Update Table (UPT) to register this transaction. 2391 // It is a broadcast invalidate if the line is in counter mode.2392 2458 // If the UPT is full, it releases the lock(s) and retry. Then, it sends 2393 2459 // a multi-update request to all owners of the line (but the writer), … … 2395 2461 // does not respond to the writing processor, as this response will be sent by 2396 2462 // the MULTI_ACK FSM when all update responses have been received. 2463 // It is a broadcast invalidate if the line is in counter mode: The line 2464 // should be erased in memory cache, and written in XRAM with a PUT transaction, 2465 // after registration in TRT. 2397 2466 // 2398 2467 // - In case of MISS, the WRITE FSM takes the lock protecting the transaction 2399 2468 // table (TRT). If a read transaction to the XRAM for this line already exists, 2400 2469 // it writes in the TRT (write buffer). Otherwise, if a TRT entry is free, 2401 // the WRITE FSM register a new transaction in TRT, and sends a read linerequest2470 // the WRITE FSM register a new transaction in TRT, and sends a GET request 2402 2471 // to the XRAM. If the TRT is full, it releases the lock, and waits. 2403 2472 // Finally, the WRITE FSM returns an aknowledge response to the writing processor. 2404 2473 ///////////////////////////////////////////////////////////////////////////////////// 2474 2475 //std::cout << std::endl << "write_fsm" << std::endl; 2405 2476 2406 2477 switch(r_write_fsm.read()) … … 2409 2480 case WRITE_IDLE: // copy first word of a write burst in local buffer 2410 2481 { 2411 if(m_cmd_write_addr_fifo.rok()) 2412 { 2413 if((m_cmd_write_pktid_fifo.read() & 0x7) == TYPE_SC) 2414 m_cpt_sc++; 2415 else 2416 { 2417 m_cpt_write++; 2418 m_cpt_write_cells++; 2419 } 2420 2421 // consume a word in the FIFO & write it in the local buffer 2422 cmd_write_fifo_get = true; 2423 size_t index = m_x[(addr_t)(m_cmd_write_addr_fifo.read())]; 2424 2425 r_write_address = (addr_t)(m_cmd_write_addr_fifo.read()); 2426 r_write_word_index = index; 2427 r_write_word_count = 1; 2428 r_write_data[index] = m_cmd_write_data_fifo.read(); 2429 r_write_srcid = m_cmd_write_srcid_fifo.read(); 2430 r_write_trdid = m_cmd_write_trdid_fifo.read(); 2431 r_write_pktid = m_cmd_write_pktid_fifo.read(); 2432 r_write_pending_sc = false; 2433 2434 // initialize the be field for all words 2435 for(size_t word=0 ; word<m_words ; word++) 2436 { 2437 if(word == index) r_write_be[word] = m_cmd_write_be_fifo.read(); 2438 else r_write_be[word] = 0x0; 2439 } 2440 2441 if (m_cmd_write_eop_fifo.read() or ((m_cmd_write_pktid_fifo.read() & 0x7) == TYPE_SC)) 2442 { 2443 r_write_fsm = WRITE_DIR_REQ; 2444 } 2445 else 2446 { 2447 r_write_fsm = WRITE_NEXT; 2448 } 2482 if(m_cmd_write_addr_fifo.rok()) 2483 { 2484 if((m_cmd_write_pktid_fifo.read() & 0x7) == TYPE_SC) 2485 { 2486 m_cpt_sc++; 2487 } 2488 else 2489 { 2490 m_cpt_write++; 2491 m_cpt_write_cells++; 2492 } 2493 2494 // consume a word in the FIFO & write it in the local buffer 2495 cmd_write_fifo_get = true; 2496 size_t index = m_x[(addr_t)(m_cmd_write_addr_fifo.read())]; 2497 2498 r_write_address = (addr_t)(m_cmd_write_addr_fifo.read()); 2499 r_write_word_index = index; 2500 r_write_word_count = 1; 2501 r_write_data[index] = m_cmd_write_data_fifo.read(); 2502 r_write_srcid = m_cmd_write_srcid_fifo.read(); 2503 r_write_trdid = m_cmd_write_trdid_fifo.read(); 2504 r_write_pktid = m_cmd_write_pktid_fifo.read(); 2505 r_write_pending_sc = false; 2506 2507 // initialize the be field for all words 2508 for(size_t word=0 ; word<m_words ; word++) 2509 { 2510 if(word == index) r_write_be[word] = m_cmd_write_be_fifo.read(); 2511 else r_write_be[word] = 0x0; 2512 } 2513 2514 if (m_cmd_write_eop_fifo.read() or ((m_cmd_write_pktid_fifo.read() & 0x7) == TYPE_SC)) 2515 { 2516 r_write_fsm = WRITE_DIR_REQ; 2517 } 2518 else 2519 { 2520 r_write_fsm = WRITE_NEXT; 2521 } 2449 2522 2450 2523 #if DEBUG_MEMC_WRITE … … 2455 2528 << " / data = " << m_cmd_write_data_fifo.read() << std::endl; 2456 2529 #endif 2457 } 2458 break; 2459 } 2460 2530 } 2531 break; 2532 } 2461 2533 //////////////// 2462 2534 case WRITE_NEXT: // copy next word of a write burst in local buffer 2463 2535 { 2464 if(m_cmd_write_addr_fifo.rok())2465 {2536 if(m_cmd_write_addr_fifo.rok()) 2537 { 2466 2538 2467 2539 #if DEBUG_MEMC_WRITE … … 2471 2543 << std::endl; 2472 2544 #endif 2473 m_cpt_write_cells++; 2474 2475 // check that the next word is in the same cache line 2476 if((m_nline[(addr_t)(r_write_address.read())] != 2477 m_nline[(addr_t)(m_cmd_write_addr_fifo.read())])) 2478 { 2479 std::cout << "VCI_MEM_CACHE ERROR " << name() << " WRITE_NEXT state" << std::endl 2480 << "all words in a write burst must be in same cache line" << std::endl; 2481 2482 exit(0); 2483 } 2484 2485 // consume a word in the FIFO & write it in the local buffer 2486 cmd_write_fifo_get = true; 2487 size_t index = r_write_word_index.read() + r_write_word_count.read(); 2488 2489 r_write_be[index] = m_cmd_write_be_fifo.read(); 2490 r_write_data[index] = m_cmd_write_data_fifo.read(); 2491 r_write_word_count = r_write_word_count.read() + 1; 2492 2493 if(m_cmd_write_eop_fifo.read()) 2494 { 2495 r_write_fsm = WRITE_DIR_REQ; 2496 } 2497 } 2498 break; 2499 } 2500 2501 //////////////////// 2502 case WRITE_DIR_REQ: 2503 { 2504 // Get the lock to the directory 2505 // and access the llsc_global_table 2506 if(r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE) 2507 { 2508 /////////////////////////////////////////////////////////////////////// 2509 // SC command treatment 2510 // We test the r_write_pending_sc register to know if we are returning 2511 // from the WAIT state. 2512 // In this case, the SC has already succeed and we cannot consume 2513 // another time from the FIFO. Also, we don't have to test another 2514 // time if the SC has succeed 2515 if(((r_write_pktid.read() & 0x7) == TYPE_SC) and not r_write_pending_sc.read()) 2516 { 2517 if(not m_cmd_write_addr_fifo.rok()) break; 2518 2519 assert(m_cmd_write_eop_fifo.read() and 2520 "Error in VCI_MEM_CACHE : " 2521 "invalid packet format for SC command"); 2522 2523 size_t index = r_write_word_index.read(); 2524 bool sc_success = m_llsc_table.sc(r_write_address.read() , 2545 m_cpt_write_cells++; 2546 2547 // check that the next word is in the same cache line 2548 assert( (m_nline[(addr_t)(r_write_address.read())] == 2549 m_nline[(addr_t)(m_cmd_write_addr_fifo.read())]) and 2550 "MEMC ERROR in WRITE_NEXT state: Illegal write burst"); 2551 2552 // consume a word in the FIFO & write it in the local buffer 2553 cmd_write_fifo_get = true; 2554 size_t index = r_write_word_index.read() + r_write_word_count.read(); 2555 2556 r_write_be[index] = m_cmd_write_be_fifo.read(); 2557 r_write_data[index] = m_cmd_write_data_fifo.read(); 2558 r_write_word_count = r_write_word_count.read() + 1; 2559 2560 if(m_cmd_write_eop_fifo.read()) r_write_fsm = WRITE_DIR_REQ; 2561 } 2562 break; 2563 } 2564 /////////////////// 2565 case WRITE_DIR_REQ: // Get the lock to the directory 2566 // and access the llsc_global_table 2567 { 2568 if( r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE ) 2569 { 2570 if(((r_write_pktid.read() & 0x7) == TYPE_SC) and not r_write_pending_sc.read()) 2571 { 2572 // We enter here if it is a new SC command 2573 // If r_write_pending_sc is set the SC is not new and has already been tested 2574 2575 if(not m_cmd_write_addr_fifo.rok()) break; 2576 2577 assert( m_cmd_write_eop_fifo.read() and 2578 "MEMC ERROR in WRITE_DIR_REQ state: invalid packet format for SC command"); 2579 2580 size_t index = r_write_word_index.read(); 2581 bool sc_success = m_llsc_table.sc(r_write_address.read() , 2525 2582 r_write_data[index].read()); 2526 2583 2527 // consume a word in the FIFO & write it in the local buffer 2528 cmd_write_fifo_get = true; 2529 r_write_data[index] = m_cmd_write_data_fifo.read(); 2530 r_write_sc_fail = not sc_success; 2531 r_write_pending_sc = true; 2532 2533 if(not sc_success) r_write_fsm = WRITE_RSP; 2534 else r_write_fsm = WRITE_DIR_LOCK; 2535 2536 break; 2537 } 2538 2539 /////////////////////////////////////////////////////////////////////// 2540 // WRITE command treatment or SC command returning from the WAIT state 2541 // In the second case, we must access the LL/SC global table to 2542 // erase any possible new reservation when we release the lock on the 2543 // directory 2544 m_llsc_table.sw(m_nline[(addr_t)r_write_address.read()],r_write_word_index.read(),r_write_word_index.read()+r_write_word_count.read()); 2545 2546 r_write_fsm = WRITE_DIR_LOCK; 2547 } 2584 // consume a word in the FIFO & write it in the local buffer 2585 cmd_write_fifo_get = true; 2586 r_write_data[index] = m_cmd_write_data_fifo.read(); 2587 r_write_sc_fail = not sc_success; 2588 r_write_pending_sc = true; 2589 2590 if(not sc_success) r_write_fsm = WRITE_RSP; 2591 else r_write_fsm = WRITE_DIR_LOCK; 2592 } 2593 else 2594 { 2595 // We enter here if it is a SW command or an already tested SC command 2596 2597 m_llsc_table.sw( m_nline[(addr_t)r_write_address.read()], 2598 r_write_word_index.read(), 2599 r_write_word_index.read() + r_write_word_count.read() ); 2600 2601 r_write_fsm = WRITE_DIR_LOCK; 2602 } 2548 2603 2549 2604 #if DEBUG_MEMC_WRITE … … 2552 2607 << std::endl; 2553 2608 #endif 2554 2555 break; 2556 } 2557 2609 } 2610 break; 2611 } 2558 2612 //////////////////// 2559 2613 case WRITE_DIR_LOCK: // access directory to check hit/miss 2560 2614 { 2561 if(r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE) 2562 { 2615 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE) and 2616 "MEMC ERROR in ALLOC_DIR_LOCK state: Bad DIR allocation"); 2617 2563 2618 size_t way = 0; 2564 2619 DirectoryEntry entry(m_cache_directory.read(r_write_address.read(), way)); … … 2566 2621 if(entry.valid) // hit 2567 2622 { 2568 // copy directory entry in local buffer in case of hit 2569 r_write_is_cnt = entry.is_cnt; 2570 r_write_lock = entry.lock; 2571 r_write_tag = entry.tag; 2572 r_write_copy = entry.owner.srcid; 2573 #if L1_MULTI_CACHE 2574 r_write_copy_cache = entry.owner.cache_id; 2575 #endif 2576 r_write_copy_inst = entry.owner.inst; 2577 r_write_count = entry.count; 2578 r_write_ptr = entry.ptr; 2579 r_write_way = way; 2580 2581 if(entry.is_cnt and entry.count) 2582 { 2583 r_write_fsm = WRITE_DIR_READ; 2584 } 2585 else 2586 { 2587 r_write_fsm = WRITE_DIR_HIT; 2588 } 2623 // copy directory entry in local buffer in case of hit 2624 r_write_is_cnt = entry.is_cnt; 2625 r_write_lock = entry.lock; 2626 r_write_tag = entry.tag; 2627 r_write_copy = entry.owner.srcid; 2628 r_write_copy_inst = entry.owner.inst; 2629 r_write_count = entry.count; 2630 r_write_ptr = entry.ptr; 2631 r_write_way = way; 2632 2633 if(entry.is_cnt and entry.count) r_write_fsm = WRITE_BC_DIR_READ; 2634 else r_write_fsm = WRITE_DIR_HIT; 2589 2635 } 2590 2636 else // miss 2591 2637 { 2592 r_write_fsm = WRITE_MISS_TRT_LOCK;2638 r_write_fsm = WRITE_MISS_TRT_LOCK; 2593 2639 } 2594 2640 … … 2607 2653 } 2608 2654 #endif 2609 } 2610 else 2611 { 2612 std::cout << "VCI_MEM_CACHE ERROR " << name() 2613 << " WRITE_DIR_LOCK state" << std::endl 2614 << "bad DIR allocation" << std::endl; 2615 2616 exit(0); 2617 } 2618 break; 2619 } 2620 //////////////////// 2621 case WRITE_DIR_READ: // read the cache and complete the buffer when be!=0xF 2622 { 2623 // update local buffer 2624 size_t set = m_y[(addr_t)(r_write_address.read())]; 2625 size_t way = r_write_way.read(); 2626 for(size_t word=0 ; word<m_words ; word++) 2627 { 2628 data_t mask = 0; 2629 if(r_write_be[word].read() & 0x1) mask = mask | 0x000000FF; 2630 if(r_write_be[word].read() & 0x2) mask = mask | 0x0000FF00; 2631 if(r_write_be[word].read() & 0x4) mask = mask | 0x00FF0000; 2632 if(r_write_be[word].read() & 0x8) mask = mask | 0xFF000000; 2633 2634 // complete only if mask is not null (for energy consumption) 2635 r_write_data[word] = (r_write_data[word].read() & mask) | 2636 (m_cache_data.read(way, set, word) & ~mask); 2637 2638 } // end for 2639 2640 // test if a coherence broadcast is required 2641 r_write_fsm = WRITE_BC_TRT_LOCK; 2642 2643 #if DEBUG_MEMC_WRITE 2644 if(m_debug) 2645 std::cout << " <MEMC " << name() << " WRITE_DIR_READ>" 2646 << " Read the cache to complete local buffer" << std::endl; 2647 #endif 2648 break; 2649 } 2650 2655 break; 2656 } 2651 2657 /////////////////// 2652 case WRITE_DIR_HIT: 2653 { 2654 // update the cache directory 2655 // update directory with Dirty bit 2656 DirectoryEntry entry; 2657 entry.valid = true; 2658 entry.dirty = true; 2659 entry.tag = r_write_tag.read(); 2660 entry.is_cnt = r_write_is_cnt.read(); 2661 entry.lock = r_write_lock.read(); 2662 entry.owner.srcid = r_write_copy.read(); 2663 #if L1_MULTI_CACHE 2664 entry.owner.cache_id = r_write_copy_cache.read(); 2665 #endif 2666 entry.owner.inst = r_write_copy_inst.read(); 2667 entry.count = r_write_count.read(); 2668 entry.ptr = r_write_ptr.read(); 2669 2670 size_t set = m_y[(addr_t)(r_write_address.read())]; 2671 size_t way = r_write_way.read(); 2672 2673 // update directory 2674 m_cache_directory.write(set, way, entry); 2675 2676 // owner is true when the the first registered copy is the writer itself 2677 bool owner = (((r_write_copy.read() == r_write_srcid.read()) 2678 #if L1_MULTI_CACHE 2679 and(r_write_copy_cache.read() ==r_write_pktid.read()) 2680 #endif 2681 ) and not r_write_copy_inst.read()); 2682 2683 // no_update is true when there is no need for coherence transaction 2684 // (tests for sc requests) 2685 bool no_update = ( (r_write_count.read() == 0) or 2658 case WRITE_DIR_HIT: // update the cache directory with Dirty bit 2659 // and update data cache 2660 { 2661 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE) and 2662 "MEMC ERROR in ALLOC_DIR_HIT state: Bad DIR allocation"); 2663 2664 DirectoryEntry entry; 2665 entry.valid = true; 2666 entry.dirty = true; 2667 entry.tag = r_write_tag.read(); 2668 entry.is_cnt = r_write_is_cnt.read(); 2669 entry.lock = r_write_lock.read(); 2670 entry.owner.srcid = r_write_copy.read(); 2671 entry.owner.inst = r_write_copy_inst.read(); 2672 entry.count = r_write_count.read(); 2673 entry.ptr = r_write_ptr.read(); 2674 2675 size_t set = m_y[(addr_t)(r_write_address.read())]; 2676 size_t way = r_write_way.read(); 2677 2678 // update directory 2679 m_cache_directory.write(set, way, entry); 2680 2681 // owner is true when the the first registered copy is the writer itself 2682 bool owner = ( (r_write_copy.read() == r_write_srcid.read()) 2683 and not r_write_copy_inst.read() ); 2684 2685 // no_update is true when there is no need for coherence transaction 2686 bool no_update = ( (r_write_count.read() == 0) or 2686 2687 (owner and (r_write_count.read() ==1) and 2687 2688 (r_write_pktid.read() != TYPE_SC))); 2688 2689 2689 // write data in the cache if no coherence transaction 2690 if(no_update) 2691 { 2692 for(size_t word=0 ; word<m_words ; word++) 2693 { 2694 m_cache_data.write(way, set, word, r_write_data[word].read(), r_write_be[word].read()); 2695 2696 if(m_monitor_ok) 2697 { 2698 addr_t address = (r_write_address.read() & ~(addr_t) 0x3F) | word<<2; 2699 check_monitor( address, r_write_data[word].read(), false); 2700 } 2701 } 2702 } 2703 2704 if(owner and not no_update and(r_write_pktid.read() != TYPE_SC)) 2705 { 2706 r_write_count = r_write_count.read() - 1; 2707 } 2708 2709 if(no_update) 2710 // Write transaction completed 2711 { 2712 r_write_fsm = WRITE_RSP; 2713 } 2714 else 2715 // coherence update required 2716 { 2717 if(!r_write_to_cc_send_multi_req.read() and 2718 !r_write_to_cc_send_brdcast_req.read()) 2719 { 2720 r_write_fsm = WRITE_UPT_LOCK; 2721 } 2722 else 2723 { 2724 r_write_fsm = WRITE_WAIT; 2725 } 2726 } 2690 // write data in the cache if no coherence transaction 2691 if(no_update) 2692 { 2693 for(size_t word=0 ; word<m_words ; word++) 2694 { 2695 m_cache_data.write( way, 2696 set, 2697 word, 2698 r_write_data[word].read(), 2699 r_write_be[word].read()); 2700 } 2701 } 2702 2703 if(owner and not no_update and(r_write_pktid.read() != TYPE_SC)) 2704 { 2705 r_write_count = r_write_count.read() - 1; 2706 } 2707 2708 if(no_update) // Write transaction completed 2709 { 2710 r_write_fsm = WRITE_RSP; 2711 } 2712 else // coherence update required 2713 { 2714 if(!r_write_to_cc_send_multi_req.read() and 2715 !r_write_to_cc_send_brdcast_req.read()) 2716 { 2717 r_write_fsm = WRITE_UPT_LOCK; 2718 } 2719 else 2720 { 2721 r_write_fsm = WRITE_WAIT; 2722 } 2723 } 2727 2724 2728 2725 #if DEBUG_MEMC_WRITE 2729 2726 if(m_debug) 2730 2727 { 2731 if(no_update) 2732 { 2733 std::cout << " <MEMC " << name() 2734 << " WRITE_DIR_HIT> Write into cache / No coherence transaction" 2735 << std::endl; 2736 } 2737 else 2738 { 2739 std::cout << " <MEMC " << name() << " WRITE_DIR_HIT> Coherence update required:" 2740 << " is_cnt = " << r_write_is_cnt.read() 2741 << " nb_copies = " << std::dec << r_write_count.read() << std::endl; 2742 if(owner) std::cout << " ... but the first copy is the writer" << std::endl; 2743 } 2728 if(no_update) 2729 { 2730 std::cout << " <MEMC " << name() 2731 << " WRITE_DIR_HIT> Write into cache / No coherence transaction" << std::endl; 2744 2732 } 2745 #endif 2746 break; 2733 else 2734 { 2735 std::cout << " <MEMC " << name() << " WRITE_DIR_HIT> Coherence update required:" 2736 << " is_cnt = " << r_write_is_cnt.read() 2737 << " nb_copies = " << std::dec << r_write_count.read() << std::endl; 2738 if(owner) std::cout << " ... but the first copy is the writer" << std::endl; 2739 } 2740 } 2741 #endif 2742 break; 2747 2743 } 2748 2744 //////////////////// 2749 2745 case WRITE_UPT_LOCK: // Try to register the update request in UPT 2750 2746 { 2751 if(r_alloc_upt_fsm.read() == ALLOC_UPT_WRITE) 2752 { 2753 bool wok = false; 2754 size_t index = 0; 2755 size_t srcid = r_write_srcid.read(); 2756 size_t trdid = r_write_trdid.read(); 2757 size_t pktid = r_write_pktid.read(); 2758 addr_t nline = m_nline[(addr_t)(r_write_address.read())]; 2759 size_t nb_copies = r_write_count.read(); 2760 size_t set = m_y[(addr_t)(r_write_address.read())]; 2761 size_t way = r_write_way.read(); 2762 2763 wok = m_upt.set(true, // it's an update transaction 2764 false, // it's not a broadcast 2765 true, // response required 2766 false, // no acknowledge required 2767 srcid, 2768 trdid, 2769 pktid, 2770 nline, 2771 nb_copies, 2772 index); 2773 if(wok) // write data in cache 2774 { 2775 for(size_t word=0 ; word<m_words ; word++) 2776 { 2777 m_cache_data.write(way, 2778 set, 2779 word, 2780 r_write_data[word].read(), 2781 r_write_be[word].read()); 2782 2783 if(m_monitor_ok) 2747 if(r_alloc_upt_fsm.read() == ALLOC_UPT_WRITE) 2748 { 2749 bool wok = false; 2750 size_t index = 0; 2751 size_t srcid = r_write_srcid.read(); 2752 size_t trdid = r_write_trdid.read(); 2753 size_t pktid = r_write_pktid.read(); 2754 addr_t nline = m_nline[(addr_t)(r_write_address.read())]; 2755 size_t nb_copies = r_write_count.read(); 2756 size_t set = m_y[(addr_t)(r_write_address.read())]; 2757 size_t way = r_write_way.read(); 2758 2759 wok = m_upt.set( true, // it's an update transaction 2760 false, // it's not a broadcast 2761 true, // response required 2762 false, // no acknowledge required 2763 srcid, 2764 trdid, 2765 pktid, 2766 nline, 2767 nb_copies, 2768 index); 2769 2770 if( wok ) // write data in cache 2784 2771 { 2785 addr_t address = (r_write_address.read() & ~(addr_t) 0x3F) | word<<2; 2786 check_monitor( address, r_write_data[word].read(), false); 2772 for(size_t word=0 ; word<m_words ; word++) 2773 { 2774 m_cache_data.write( way, 2775 set, 2776 word, 2777 r_write_data[word].read(), 2778 r_write_be[word].read()); 2779 } 2787 2780 } 2788 }2789 }2790 2781 2791 2782 #if DEBUG_MEMC_WRITE 2792 if(m_debug )2783 if(m_debug and wok) 2793 2784 { 2794 if(wok) 2795 { 2796 std::cout << " <MEMC " << name() 2797 << " WRITE_UPT_LOCK> Register the multicast update in UPT / " 2798 << " nb_copies = " << r_write_count.read() << std::endl; 2799 } 2785 std::cout << " <MEMC " << name() 2786 << " WRITE_UPT_LOCK> Register the multicast update in UPT / " 2787 << " nb_copies = " << r_write_count.read() << std::endl; 2800 2788 } 2801 2789 #endif 2802 r_write_upt_index = index;2803 //releases the lock protecting UPT and the DIR if no entry...2804 if(wok) r_write_fsm = WRITE_UPT_HEAP_LOCK;2805 else r_write_fsm = WRITE_WAIT;2806 }2807 break;2790 r_write_upt_index = index; 2791 // releases the lock protecting UPT and the DIR if no entry... 2792 if(wok) r_write_fsm = WRITE_UPT_HEAP_LOCK; 2793 else r_write_fsm = WRITE_WAIT; 2794 } 2795 break; 2808 2796 } 2809 2797 … … 2847 2835 for(size_t i=min ; i<max ; i++) r_write_to_cc_send_data[i] = r_write_data[i]; 2848 2836 2849 if((r_write_copy.read() != r_write_srcid.read()) or(r_write_pktid.read() == TYPE_SC) or 2850 #if L1_MULTI_CACHE 2851 (r_write_copy_cache.read() != r_write_pktid.read()) or 2852 #endif 2853 r_write_copy_inst.read()) 2837 if( (r_write_copy.read() != r_write_srcid.read()) or 2838 (r_write_pktid.read() == TYPE_SC) or r_write_copy_inst.read()) 2854 2839 { 2855 2840 // put the first srcid in the fifo … … 2857 2842 write_to_cc_send_fifo_inst = r_write_copy_inst.read(); 2858 2843 write_to_cc_send_fifo_srcid = r_write_copy.read(); 2859 #if L1_MULTI_CACHE2860 write_to_cc_send_fifo_cache_id= r_write_copy_cache.read();2861 #endif2862 2844 if(r_write_count.read() == 1) 2863 2845 { … … 2910 2892 bool dec_upt_counter; 2911 2893 2912 if(((entry.owner.srcid != r_write_srcid.read()) or (r_write_pktid.read() == TYPE_SC)) or 2913 #if L1_MULTI_CACHE 2914 (entry.owner.cache_id != r_write_pktid.read()) or 2915 #endif 2916 entry.owner.inst) // put the next srcid in the fifo 2894 // put the next srcid in the fifo 2895 if( (entry.owner.srcid != r_write_srcid.read()) or 2896 (r_write_pktid.read() == TYPE_SC) or entry.owner.inst) 2917 2897 { 2918 2898 dec_upt_counter = false; … … 2920 2900 write_to_cc_send_fifo_inst = entry.owner.inst; 2921 2901 write_to_cc_send_fifo_srcid = entry.owner.srcid; 2922 #if L1_MULTI_CACHE2923 write_to_cc_send_fifo_cache_id = entry.owner.cache_id;2924 #endif2925 2902 2926 2903 #if DEBUG_MEMC_WRITE … … 2992 2969 2993 2970 /////////////// 2994 case WRITE_RSP: 2995 { 2996 // Post a request to TGT_RSP FSM to acknowledge the write 2997 // In order to increase the Write requests throughput, 2998 // we don't wait to return in the IDLE state to consume 2999 // a new request in the write FIFO 3000 2971 case WRITE_RSP: // Post a request to TGT_RSP FSM to acknowledge the write 2972 // In order to increase the Write requests throughput, 2973 // we don't wait to return in the IDLE state to consume 2974 // a new request in the write FIFO 2975 { 3001 2976 if(!r_write_to_tgt_rsp_req.read()) 3002 2977 { … … 3086 3061 bool hit_read = m_trt.hit_read(m_nline[addr], hit_index); 3087 3062 bool hit_write = m_trt.hit_write(m_nline[addr]); 3088 bool wok = !m_trt.full(wok_index);3063 bool wok = not m_trt.full(wok_index); 3089 3064 3090 3065 if(hit_read) // register the modified data in TRT … … 3170 3145 data_vector.push_back(r_write_data[i]); 3171 3146 } 3172 m_trt.write_data_mask( r_write_trt_index.read(),3173 3174 data_vector);3147 m_trt.write_data_mask( r_write_trt_index.read(), 3148 be_vector, 3149 data_vector ); 3175 3150 r_write_fsm = WRITE_RSP; 3176 3151 … … 3182 3157 break; 3183 3158 } 3184 3185 3159 ///////////////////////// 3186 3160 case WRITE_MISS_XRAM_REQ: // send a GET request to IXR_CMD FSM 3187 3161 { 3188 if( !r_write_to_ixr_cmd_req)3162 if( not r_write_to_ixr_cmd_req.read() ) 3189 3163 { 3190 3164 r_write_to_ixr_cmd_req = true; 3191 r_write_to_ixr_cmd_write = false; 3192 r_write_to_ixr_cmd_nline = m_nline[(addr_t)(r_write_address.read())]; 3193 r_write_to_ixr_cmd_trdid = r_write_trt_index.read(); 3165 r_write_to_ixr_cmd_put = false; 3166 r_write_to_ixr_cmd_index = r_write_trt_index.read(); 3194 3167 r_write_fsm = WRITE_RSP; 3195 3168 … … 3201 3174 break; 3202 3175 } 3203 3204 3176 /////////////////////// 3205 case WRITE_BC_TRT_LOCK: // Check TRT not full 3206 { 3207 if(r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE) 3208 { 3209 size_t wok_index = 0; 3210 bool wok = !m_trt.full(wok_index); 3211 if(wok) // set a new entry in TRT 3212 { 3213 r_write_trt_index = wok_index; 3214 r_write_fsm = WRITE_BC_IVT_LOCK; 3215 } 3216 else // wait an empty entry in TRT 3217 { 3218 r_write_fsm = WRITE_WAIT; 3219 } 3177 case WRITE_BC_DIR_READ: // enter this state if a broadcast-inval is required 3178 // the cache line must be erased in mem-cache, and written 3179 // into XRAM. we read the cache and complete the buffer 3180 { 3181 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE) and 3182 "MEMC ERROR in WRITE_BC_DIR_READ state: Bad DIR allocation"); 3183 3184 // update local buffer 3185 size_t set = m_y[(addr_t)(r_write_address.read())]; 3186 size_t way = r_write_way.read(); 3187 for(size_t word=0 ; word<m_words ; word++) 3188 { 3189 data_t mask = 0; 3190 if(r_write_be[word].read() & 0x1) mask = mask | 0x000000FF; 3191 if(r_write_be[word].read() & 0x2) mask = mask | 0x0000FF00; 3192 if(r_write_be[word].read() & 0x4) mask = mask | 0x00FF0000; 3193 if(r_write_be[word].read() & 0x8) mask = mask | 0xFF000000; 3194 3195 // complete only if mask is not null (for energy consumption) 3196 r_write_data[word] = (r_write_data[word].read() & mask) | 3197 (m_cache_data.read(way, set, word) & ~mask); 3198 } // end for 3199 3200 r_write_fsm = WRITE_BC_TRT_LOCK; 3201 3202 #if DEBUG_MEMC_WRITE 3203 if(m_debug) 3204 std::cout << " <MEMC " << name() << " WRITE_BC_DIR_READ>" 3205 << " Read the cache to complete local buffer" << std::endl; 3206 #endif 3207 break; 3208 } 3209 /////////////////////// 3210 case WRITE_BC_TRT_LOCK: // get TRT lock to check TRT not full 3211 { 3212 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE) and 3213 "MEMC ERROR in WRITE_BC_TRT_LOCK state: Bad DIR allocation"); 3214 3215 if(r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE) 3216 { 3217 size_t wok_index = 0; 3218 bool wok = not m_trt.full(wok_index); 3219 if( wok ) 3220 { 3221 r_write_trt_index = wok_index; 3222 r_write_fsm = WRITE_BC_IVT_LOCK; 3223 } 3224 else // wait an empty slot in TRT 3225 { 3226 r_write_fsm = WRITE_WAIT; 3227 } 3220 3228 3221 3229 #if DEBUG_MEMC_WRITE … … 3224 3232 << " : wok = " << wok << " / index = " << wok_index << std::endl; 3225 3233 #endif 3226 } 3227 break; 3228 } 3229 3234 } 3235 break; 3236 } 3230 3237 ////////////////////// 3231 case WRITE_BC_IVT_LOCK: // register BC transaction in IVT 3232 { 3233 if(r_alloc_ivt_fsm.read() == ALLOC_IVT_WRITE) 3234 { 3235 bool wok = false; 3236 size_t index = 0; 3237 size_t srcid = r_write_srcid.read(); 3238 size_t trdid = r_write_trdid.read(); 3239 size_t pktid = r_write_pktid.read(); 3240 addr_t nline = m_nline[(addr_t)(r_write_address.read())]; 3241 size_t nb_copies = r_write_count.read(); 3242 3243 wok = m_ivt.set(false, // it's an inval transaction 3244 true, // it's a broadcast 3245 true, // response required 3246 false, // no acknowledge required 3247 srcid, 3248 trdid, 3249 pktid, 3250 nline, 3251 nb_copies, 3252 index); 3253 3238 case WRITE_BC_IVT_LOCK: // get IVT lock and register BC transaction in IVT 3239 { 3240 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE) and 3241 "MEMC ERROR in WRITE_BC_IVT_LOCK state: Bad DIR allocation"); 3242 3243 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE) and 3244 "MEMC ERROR in WRITE_BC_IVT_LOCK state: Bad TRT allocation"); 3245 3246 if(r_alloc_ivt_fsm.read() == ALLOC_IVT_WRITE) 3247 { 3248 bool wok = false; 3249 size_t index = 0; 3250 size_t srcid = r_write_srcid.read(); 3251 size_t trdid = r_write_trdid.read(); 3252 size_t pktid = r_write_pktid.read(); 3253 addr_t nline = m_nline[(addr_t)(r_write_address.read())]; 3254 size_t nb_copies = r_write_count.read(); 3255 3256 wok = m_ivt.set(false, // it's an inval transaction 3257 true, // it's a broadcast 3258 true, // response required 3259 false, // no acknowledge required 3260 srcid, 3261 trdid, 3262 pktid, 3263 nline, 3264 nb_copies, 3265 index); 3254 3266 #if DEBUG_MEMC_WRITE 3255 3267 if( m_debug and wok ) … … 3257 3269 << " / nb_copies = " << r_write_count.read() << std::endl; 3258 3270 #endif 3259 r_write_upt_index = index; 3260 3261 if(wok) r_write_fsm = WRITE_BC_DIR_INVAL; 3262 else r_write_fsm = WRITE_WAIT; 3263 } 3264 break; 3265 } 3266 3271 r_write_upt_index = index; 3272 3273 if( wok ) r_write_fsm = WRITE_BC_DIR_INVAL; 3274 else r_write_fsm = WRITE_WAIT; 3275 } 3276 break; 3277 } 3267 3278 //////////////////////// 3268 case WRITE_BC_DIR_INVAL: 3269 { 3270 // Register a put transaction to XRAM in TRT 3271 // and invalidate the line in directory 3272 if((r_alloc_trt_fsm.read() != ALLOC_TRT_WRITE) or 3273 (r_alloc_ivt_fsm.read() != ALLOC_IVT_WRITE) or 3274 (r_alloc_dir_fsm.read() != ALLOC_DIR_WRITE)) 3275 { 3276 std::cout << "VCI_MEM_CACHE ERROR " << name() << " WRITE_BC_DIR_INVAL state" << std::endl; 3277 std::cout << "bad TRT, DIR, or IVT allocation" << std::endl; 3278 exit(0); 3279 } 3280 3281 // register a write request to XRAM in TRT 3282 m_trt.set(r_write_trt_index.read(), 3283 false, // write request to XRAM 3284 m_nline[(addr_t)(r_write_address.read())], 3285 0, 3286 0, 3287 0, 3288 false, // not a processor read 3289 0, // not a single word 3290 0, // word index 3291 std::vector<be_t> (m_words,0), 3292 std::vector<data_t> (m_words,0)); 3293 3294 // invalidate directory entry 3295 DirectoryEntry entry; 3296 entry.valid = false; 3297 entry.dirty = false; 3298 entry.tag = 0; 3299 entry.is_cnt = false; 3300 entry.lock = false; 3301 entry.owner.srcid = 0; 3302 #if L1_MULTI_CACHE 3303 entry.owner.cache_id= 0; 3304 #endif 3305 entry.owner.inst = false; 3306 entry.ptr = 0; 3307 entry.count = 0; 3308 size_t set = m_y[(addr_t)(r_write_address.read())]; 3309 size_t way = r_write_way.read(); 3310 3311 m_cache_directory.write(set, way, entry); 3279 case WRITE_BC_DIR_INVAL: // Register a put transaction in TRT 3280 // and invalidate the line in directory 3281 { 3282 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_WRITE) and 3283 "MEMC ERROR in WRITE_BC_DIR_INVAL state: Bad DIR allocation"); 3284 3285 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_WRITE) and 3286 "MEMC ERROR in WRITE_BC_DIR_INVAL state: Bad TRT allocation"); 3287 3288 assert( (r_alloc_ivt_fsm.read() == ALLOC_IVT_WRITE) and 3289 "MEMC ERROR in WRITE_BC_DIR_INVAL state: Bad IVT allocation"); 3290 3291 // register PUT request in TRT 3292 std::vector<data_t> data_vector; 3293 data_vector.clear(); 3294 for(size_t i=0; i<m_words; i++) data_vector.push_back(r_write_data[i].read()); 3295 m_trt.set( r_write_trt_index.read(), 3296 false, // PUT request 3297 m_nline[(addr_t)(r_write_address.read())], 3298 0, // unused 3299 0, // unused 3300 0, // unused 3301 false, // not a processor read 3302 0, // unused 3303 0, // unused 3304 std::vector<be_t> (m_words,0), 3305 data_vector ); 3306 3307 // invalidate directory entry 3308 DirectoryEntry entry; 3309 entry.valid = false; 3310 entry.dirty = false; 3311 entry.tag = 0; 3312 entry.is_cnt = false; 3313 entry.lock = false; 3314 entry.owner.srcid = 0; 3315 entry.owner.inst = false; 3316 entry.ptr = 0; 3317 entry.count = 0; 3318 size_t set = m_y[(addr_t)(r_write_address.read())]; 3319 size_t way = r_write_way.read(); 3320 3321 m_cache_directory.write(set, way, entry); 3312 3322 3313 3323 #if DEBUG_MEMC_WRITE 3314 3324 if(m_debug) 3315 std::cout << " <MEMC " << name() << " WRITE_BC_DIR_INVAL> Inval idate the directory entry: @ ="3316 << r_write_address.read() << " / register the put transaction in TRT:"<< std::endl;3317 #endif 3318 r_write_fsm = WRITE_BC_CC_SEND;3319 break;3325 std::cout << " <MEMC " << name() << " WRITE_BC_DIR_INVAL> Inval DIR and register in TRT:" 3326 << " address = " << r_write_address.read() << std::endl; 3327 #endif 3328 r_write_fsm = WRITE_BC_CC_SEND; 3329 break; 3320 3330 } 3321 3331 … … 3323 3333 case WRITE_BC_CC_SEND: // Post a coherence broadcast request to CC_SEND FSM 3324 3334 { 3325 if(!r_write_to_cc_send_multi_req.read() and !r_write_to_cc_send_brdcast_req.read())3326 {3327 r_write_to_cc_send_multi_req = false;3328 r_write_to_cc_send_brdcast_req = true;3329 r_write_to_cc_send_trdid = r_write_upt_index.read();3330 r_write_to_cc_send_nline = m_nline[(addr_t)(r_write_address.read())];3331 r_write_to_cc_send_index = 0;3332 r_write_to_cc_send_count = 0;3333 3334 for(size_t i=0; i<m_words ; i++)3335 {3336 r_write_to_cc_send_be[i]=0;3337 r_write_to_cc_send_data[i] = 0;3338 }3339 r_write_fsm = WRITE_BC_XRAM_REQ;3335 if(!r_write_to_cc_send_multi_req.read() and !r_write_to_cc_send_brdcast_req.read()) 3336 { 3337 r_write_to_cc_send_multi_req = false; 3338 r_write_to_cc_send_brdcast_req = true; 3339 r_write_to_cc_send_trdid = r_write_upt_index.read(); 3340 r_write_to_cc_send_nline = m_nline[(addr_t)(r_write_address.read())]; 3341 r_write_to_cc_send_index = 0; 3342 r_write_to_cc_send_count = 0; 3343 3344 for(size_t i=0; i<m_words ; i++) // Ã quoi sert ce for? (AG) 3345 { 3346 r_write_to_cc_send_be[i]=0; 3347 r_write_to_cc_send_data[i] = 0; 3348 } 3349 r_write_fsm = WRITE_BC_XRAM_REQ; 3340 3350 3341 3351 #if DEBUG_MEMC_WRITE … … 3344 3354 << " WRITE_BC_CC_SEND> Post a broadcast request to CC_SEND FSM" << std::endl; 3345 3355 #endif 3346 }3347 break;3356 } 3357 break; 3348 3358 } 3349 3359 3350 3360 /////////////////////// 3351 case WRITE_BC_XRAM_REQ: // Post a put request to IXR_CMD FSM 3352 { 3353 if(!r_write_to_ixr_cmd_req) 3354 { 3355 r_write_to_ixr_cmd_req = true; 3356 r_write_to_ixr_cmd_write = true; 3357 r_write_to_ixr_cmd_nline = m_nline[(addr_t)(r_write_address.read())]; 3358 r_write_to_ixr_cmd_trdid = r_write_trt_index.read(); 3359 3360 for(size_t i=0; i<m_words; i++) r_write_to_ixr_cmd_data[i] = r_write_data[i]; 3361 3362 r_write_fsm = WRITE_IDLE; 3361 case WRITE_BC_XRAM_REQ: // Post a PUT request to IXR_CMD FSM 3362 { 3363 if( not r_write_to_ixr_cmd_req.read() ) 3364 { 3365 r_write_to_ixr_cmd_req = true; 3366 r_write_to_ixr_cmd_put = true; 3367 r_write_to_ixr_cmd_index = r_write_trt_index.read(); 3368 r_write_fsm = WRITE_IDLE; 3363 3369 3364 3370 #if DEBUG_MEMC_WRITE … … 3367 3373 << " WRITE_BC_XRAM_REQ> Post a put request to IXR_CMD FSM" << std::endl; 3368 3374 #endif 3369 }3370 break;3375 } 3376 break; 3371 3377 } 3372 3378 } // end switch r_write_fsm … … 3376 3382 /////////////////////////////////////////////////////////////////////// 3377 3383 // The IXR_CMD fsm controls the command packets to the XRAM : 3378 // It handles requests from the READ, WRITE, CAS, XRAM_RSP FSMs3379 // with a round-robin priority.3384 // It handles requests from 5 FSMs with a round-robin priority: 3385 // READ > WRITE > CAS > XRAM_RSP > CONFIG 3380 3386 // 3381 // - It sends a single flit VCI read request to the XRAM in case of MISS 3382 // posted by the READ, WRITE or CAS FSMs : the TRDID field contains 3383 // the Transaction Tab index. 3384 // The VCI response is a multi-flit packet : the N cells contain 3385 // the N data words. 3387 // - It sends a single flit VCI read to the XRAM in case of 3388 // GET request posted by the READ, WRITE or CAS FSMs. 3389 // - It sends a multi-flit VCI write in case of PUT request posted by 3390 // the XRAM_RSP, WRITE, CAS, or CONFIG FSMs. 3386 3391 // 3387 // - It sends a multi-flit VCI write when the XRAM_RSP FSM, WRITE FSM 3388 // or CAS FSM request to save a dirty line to the XRAM. 3389 // The VCI response is a single flit packet. 3392 // For each client, there is three steps: 3393 // - IXR_CMD_*_IDLE : round-robin allocation to a client 3394 // - IXR_CMD_*_TRT : access to TRT for address and data 3395 // - IXR_CMD_*_SEND : send the PUT or GET VCI command 3396 // 3397 // The address and data to be written (for a PUT) are stored in TRT. 3398 // The trdid field contains always the TRT entry index. 3390 3399 //////////////////////////////////////////////////////////////////////// 3400 3401 //std::cout << std::endl << "ixr_cmd_fsm" << std::endl; 3391 3402 3392 3403 switch(r_ixr_cmd_fsm.read()) 3393 3404 { 3394 /////////////////////// /3405 /////////////////////// 3395 3406 case IXR_CMD_READ_IDLE: 3396 3407 { 3397 if (r_write_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_WRITE; 3398 else if(r_cas_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_CAS; 3399 else if(r_xram_rsp_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_XRAM; 3400 else if(r_read_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_READ; 3408 if (r_write_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_WRITE_TRT; 3409 else if(r_cas_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_CAS_TRT; 3410 else if(r_xram_rsp_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_XRAM_TRT; 3411 else if(r_config_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_CONFIG_TRT; 3412 else if(r_read_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_READ_TRT; 3401 3413 break; 3402 3414 } … … 3404 3416 case IXR_CMD_WRITE_IDLE: 3405 3417 { 3406 if (r_cas_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_CAS; 3407 else if(r_xram_rsp_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_XRAM; 3408 else if(r_read_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_READ; 3409 else if(r_write_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_WRITE; 3418 if (r_cas_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_CAS_TRT; 3419 else if(r_xram_rsp_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_XRAM_TRT; 3420 else if(r_config_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_CONFIG_TRT; 3421 else if(r_read_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_READ_TRT; 3422 else if(r_write_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_WRITE_TRT; 3410 3423 break; 3411 3424 } 3425 ////////////////////// 3426 case IXR_CMD_CAS_IDLE: 3427 { 3428 if (r_xram_rsp_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_XRAM_TRT; 3429 else if(r_config_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_CONFIG_TRT; 3430 else if(r_read_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_READ_TRT; 3431 else if(r_write_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_WRITE_TRT; 3432 else if(r_cas_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_CAS_TRT; 3433 break; 3434 } 3435 /////////////////////// 3436 case IXR_CMD_XRAM_IDLE: 3437 { 3438 if (r_config_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_CONFIG_TRT; 3439 else if(r_read_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_READ_TRT; 3440 else if(r_write_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_WRITE_TRT; 3441 else if(r_cas_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_CAS_TRT; 3442 else if(r_xram_rsp_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_XRAM_TRT; 3443 break; 3444 } 3445 ///////////////////////// 3446 case IXR_CMD_CONFIG_IDLE: 3447 { 3448 if (r_read_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_READ_TRT; 3449 else if(r_write_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_WRITE_TRT; 3450 else if(r_cas_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_CAS_TRT; 3451 else if(r_xram_rsp_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_XRAM_TRT; 3452 else if(r_config_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_CONFIG_TRT; 3453 break; 3454 } 3455 3456 ////////////////////// 3457 case IXR_CMD_READ_TRT: // access TRT for a GET 3458 { 3459 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_CMD ) 3460 { 3461 TransactionTabEntry entry = m_trt.read( r_read_to_ixr_cmd_index.read() ); 3462 r_ixr_cmd_address = entry.nline * (m_words<<2); 3463 r_ixr_cmd_trdid = r_read_to_ixr_cmd_index.read(); 3464 r_ixr_cmd_get = true; 3465 r_ixr_cmd_word = 0; 3466 r_ixr_cmd_fsm = IXR_CMD_READ_SEND; 3467 for( size_t i=0 ; i<m_words ; i++ ) r_ixr_cmd_wdata[i] = entry.wdata[i]; 3468 3469 #if DEBUG_MEMC_IXR_CMD 3470 if(m_debug) 3471 std::cout << " <MEMC " << name() << " IXR_CMD_READ_TRT> TRT access" 3472 << " index = " << std::dec << r_read_to_ixr_cmd_index.read() 3473 << " / address = " << std::hex << (entry.nline*(m_words<<2)) << std::endl; 3474 #endif 3475 } 3476 break; 3477 } 3478 /////////////////////// 3479 case IXR_CMD_WRITE_TRT: // access TRT for a PUT or a GET 3480 { 3481 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_CMD ) 3482 { 3483 TransactionTabEntry entry = m_trt.read( r_write_to_ixr_cmd_index.read() ); 3484 r_ixr_cmd_address = entry.nline * (m_words<<2); 3485 r_ixr_cmd_trdid = r_write_to_ixr_cmd_index.read(); 3486 r_ixr_cmd_get = entry.xram_read; 3487 r_ixr_cmd_word = 0; 3488 r_ixr_cmd_fsm = IXR_CMD_WRITE_SEND; 3489 for( size_t i=0 ; i<m_words ; i++ ) r_ixr_cmd_wdata[i] = entry.wdata[i]; 3490 3491 #if DEBUG_MEMC_IXR_CMD 3492 if(m_debug) 3493 std::cout << " <MEMC " << name() << " IXR_CMD_WRITE_TRT> TRT access" 3494 << " index = " << std::dec << r_write_to_ixr_cmd_index.read() 3495 << " / address = " << std::hex << (entry.nline*(m_words<<2)) << std::endl; 3496 #endif 3497 } 3498 break; 3499 } 3500 ///////////////////// 3501 case IXR_CMD_CAS_TRT: // access TRT for a PUT or a GET 3502 { 3503 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_CMD ) 3504 { 3505 TransactionTabEntry entry = m_trt.read( r_cas_to_ixr_cmd_index.read() ); 3506 r_ixr_cmd_address = entry.nline * (m_words<<2); 3507 r_ixr_cmd_trdid = r_cas_to_ixr_cmd_index.read(); 3508 r_ixr_cmd_get = entry.xram_read; 3509 r_ixr_cmd_word = 0; 3510 r_ixr_cmd_fsm = IXR_CMD_CAS_SEND; 3511 for( size_t i=0 ; i<m_words ; i++ ) r_ixr_cmd_wdata[i] = entry.wdata[i]; 3512 3513 #if DEBUG_MEMC_IXR_CMD 3514 if(m_debug) 3515 std::cout << " <MEMC " << name() << " IXR_CMD_CAS_TRT> TRT access" 3516 << " index = " << std::dec << r_cas_to_ixr_cmd_index.read() 3517 << " / address = " << std::hex << (entry.nline*(m_words<<2)) << std::endl; 3518 #endif 3519 } 3520 break; 3521 } 3522 ////////////////////// 3523 case IXR_CMD_XRAM_TRT: // access TRT for a PUT 3524 { 3525 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_CMD ) 3526 { 3527 TransactionTabEntry entry = m_trt.read( r_xram_rsp_to_ixr_cmd_index.read() ); 3528 r_ixr_cmd_address = entry.nline * (m_words<<2); 3529 r_ixr_cmd_trdid = r_xram_rsp_to_ixr_cmd_index.read(); 3530 r_ixr_cmd_get = false; 3531 r_ixr_cmd_word = 0; 3532 r_ixr_cmd_fsm = IXR_CMD_XRAM_SEND; 3533 for( size_t i=0 ; i<m_words ; i++ ) r_ixr_cmd_wdata[i] = entry.wdata[i]; 3534 3535 #if DEBUG_MEMC_IXR_CMD 3536 if(m_debug) 3537 std::cout << " <MEMC " << name() << " IXR_CMD_XRAM_TRT> TRT access" 3538 << " index = " << std::dec << r_xram_rsp_to_ixr_cmd_index.read() 3539 << " / address = " << std::hex << (entry.nline*(m_words<<2)) << std::endl; 3540 #endif 3541 } 3542 break; 3543 } 3412 3544 //////////////////////// 3413 case IXR_CMD_CAS_IDLE: 3414 { 3415 if (r_xram_rsp_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_XRAM; 3416 else if(r_read_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_READ; 3417 else if(r_write_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_WRITE; 3418 else if(r_cas_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_CAS; 3419 break; 3545 case IXR_CMD_CONFIG_TRT: // access TRT for a PUT 3546 { 3547 if ( r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_CMD ) 3548 { 3549 TransactionTabEntry entry = m_trt.read( r_config_to_ixr_cmd_index.read() ); 3550 r_ixr_cmd_address = entry.nline * (m_words<<2); 3551 r_ixr_cmd_trdid = r_config_to_ixr_cmd_index.read(); 3552 r_ixr_cmd_get = false; 3553 r_ixr_cmd_word = 0; 3554 r_ixr_cmd_fsm = IXR_CMD_CONFIG_SEND; 3555 for( size_t i=0 ; i<m_words ; i++ ) r_ixr_cmd_wdata[i] = entry.wdata[i]; 3556 3557 #if DEBUG_MEMC_IXR_CMD 3558 if(m_debug) 3559 std::cout << " <MEMC " << name() << " IXR_CMD_CONFIG_TRT> TRT access" 3560 << " index = " << std::dec << r_config_to_ixr_cmd_index.read() 3561 << " / address = " << std::hex << (entry.nline*(m_words<<2)) << std::endl; 3562 #endif 3563 } 3564 break; 3565 } 3566 3567 /////////////////////// 3568 case IXR_CMD_READ_SEND: // send a get from READ FSM 3569 { 3570 if(p_vci_ixr.cmdack) 3571 { 3572 r_ixr_cmd_fsm = IXR_CMD_READ_IDLE; 3573 r_read_to_ixr_cmd_req = false; 3574 3575 #if DEBUG_MEMC_IXR_CMD 3576 if(m_debug) 3577 std::cout << " <MEMC " << name() << " IXR_CMD_READ_SEND> GET request:" << std::hex 3578 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2) << std::endl; 3579 #endif 3580 } 3581 break; 3420 3582 } 3421 3583 //////////////////////// 3422 case IXR_CMD_ XRAM_IDLE:3423 { 3424 if (r_read_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_READ;3425 else if(r_write_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_WRITE;3426 else if(r_cas_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_CAS;3427 else if(r_xram_rsp_to_ixr_cmd_req) r_ixr_cmd_fsm = IXR_CMD_XRAM;3428 break;3429 }3430 ////////////////// // send a get from READ FSM3431 case IXR_CMD_READ:3432 {3433 if(p_vci_ixr.cmdack)3434 {3435 r_ixr_cmd_fsm = IXR_CMD_READ_IDLE;3436 r_read_to_ixr_cmd_req = false;3584 case IXR_CMD_WRITE_SEND: // send a put or get from WRITE FSM 3585 { 3586 if(p_vci_ixr.cmdack) 3587 { 3588 if(r_write_to_ixr_cmd_put.read()) // PUT 3589 { 3590 if(r_ixr_cmd_word.read() == (m_words - 2)) 3591 { 3592 r_ixr_cmd_fsm = IXR_CMD_WRITE_IDLE; 3593 r_write_to_ixr_cmd_req = false; 3594 } 3595 else 3596 { 3597 r_ixr_cmd_word = r_ixr_cmd_word.read() + 2; 3598 } 3437 3599 3438 3600 #if DEBUG_MEMC_IXR_CMD 3439 3601 if(m_debug) 3440 std::cout << " <MEMC " << name() << " IXR_CMD_READ>" 3441 << " Send a get request to xram / address = " << std::hex 3442 << (addr_t)(r_read_to_ixr_cmd_nline.read()*m_words*4) << std::endl; 3443 #endif 3444 } 3445 break; 3446 } 3447 /////////////////// 3448 case IXR_CMD_WRITE: // send a put or get from WRITE FSM 3449 { 3450 if(p_vci_ixr.cmdack) 3451 { 3452 if(r_write_to_ixr_cmd_write.read()) // PUT 3453 { 3454 if(r_ixr_cmd_cpt.read() == (m_words - 2)) 3455 { 3456 r_ixr_cmd_cpt = 0; 3457 r_ixr_cmd_fsm = IXR_CMD_WRITE_IDLE; 3458 r_write_to_ixr_cmd_req = false; 3459 } 3460 else 3461 { 3462 r_ixr_cmd_cpt = r_ixr_cmd_cpt + 2; 3463 } 3602 std::cout << " <MEMC " << name() << " IXR_CMD_WRITE_SEND> PUT request:" << std::hex 3603 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2) << std::endl; 3604 #endif 3605 } 3606 else // GET 3607 { 3608 r_ixr_cmd_fsm = IXR_CMD_WRITE_IDLE; 3609 r_write_to_ixr_cmd_req = false; 3464 3610 3465 3611 #if DEBUG_MEMC_IXR_CMD 3466 3612 if(m_debug) 3467 std::cout << " <MEMC " << name() << " IXR_CMD_WRITE>" 3468 << " Send a put request to xram / address = " << std::hex 3469 << (addr_t)((r_write_to_ixr_cmd_nline.read() * m_words + 3470 r_ixr_cmd_cpt.read()) * 4 ) << std::endl; 3471 #endif 3472 } 3473 else // GET 3474 { 3475 r_ixr_cmd_fsm = IXR_CMD_WRITE_IDLE; 3476 r_write_to_ixr_cmd_req = false; 3613 std::cout << " <MEMC " << name() << " IXR_CMD_WRITE_SEND> GET request:" << std::hex 3614 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2) << std::endl; 3615 #endif 3616 } 3617 } 3618 break; 3619 } 3620 ////////////////////// 3621 case IXR_CMD_CAS_SEND: // send a put or get command from CAS FSM 3622 { 3623 if(p_vci_ixr.cmdack) 3624 { 3625 if(r_cas_to_ixr_cmd_put.read()) // PUT 3626 { 3627 if(r_ixr_cmd_word.read() == (m_words - 2)) 3628 { 3629 r_ixr_cmd_fsm = IXR_CMD_CAS_IDLE; 3630 r_cas_to_ixr_cmd_req = false; 3631 } 3632 else 3633 { 3634 r_ixr_cmd_word = r_ixr_cmd_word.read() + 2; 3635 } 3477 3636 3478 3637 #if DEBUG_MEMC_IXR_CMD 3479 3638 if(m_debug) 3480 std::cout << " <MEMC " << name() << " IXR_CMD_WRITE>" 3481 << " Send a get request to xram / address = " << std::hex 3482 << (addr_t)(r_write_to_ixr_cmd_nline.read()*m_words*4) << std::endl; 3483 #endif 3484 } 3485 } 3486 break; 3487 } 3488 ///////////////// 3489 case IXR_CMD_CAS: // send a put or get command from CAS FSM 3490 { 3491 if(p_vci_ixr.cmdack) 3492 { 3493 if(r_cas_to_ixr_cmd_write.read()) // PUT 3494 { 3495 if(r_ixr_cmd_cpt.read() == (m_words - 2)) 3496 { 3497 r_ixr_cmd_cpt = 0; 3498 r_ixr_cmd_fsm = IXR_CMD_CAS_IDLE; 3499 r_cas_to_ixr_cmd_req = false; 3500 } 3501 else 3502 { 3503 r_ixr_cmd_cpt = r_ixr_cmd_cpt + 2; 3504 } 3639 std::cout << " <MEMC " << name() << " IXR_CMD_CAS_SEND> PUT request:" << std::hex 3640 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2) << std::endl; 3641 #endif 3642 } 3643 else // GET 3644 { 3645 r_ixr_cmd_fsm = IXR_CMD_CAS_IDLE; 3646 r_cas_to_ixr_cmd_req = false; 3505 3647 3506 3648 #if DEBUG_MEMC_IXR_CMD 3507 3649 if(m_debug) 3508 std::cout << " <MEMC " << name() << " IXR_CMD_CAS>" 3509 << " Send a put request to xram / address = " << std::hex 3510 << (addr_t)( (r_cas_to_ixr_cmd_nline.read() * m_words + 3511 r_ixr_cmd_cpt.read()) * 4 ) << std::endl; 3512 #endif 3513 } 3514 else // GET 3515 { 3516 r_ixr_cmd_fsm = IXR_CMD_CAS_IDLE; 3517 r_cas_to_ixr_cmd_req = false; 3650 std::cout << " <MEMC " << name() << " IXR_CMD_CAS_SEND> GET request:" << std::hex 3651 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2) << std::endl; 3652 #endif 3653 } 3654 } 3655 break; 3656 } 3657 /////////////////////// 3658 case IXR_CMD_XRAM_SEND: // send a put from XRAM_RSP FSM 3659 { 3660 if(p_vci_ixr.cmdack) 3661 { 3662 if(r_ixr_cmd_word.read() == (m_words - 2)) 3663 { 3664 r_ixr_cmd_fsm = IXR_CMD_XRAM_IDLE; 3665 r_xram_rsp_to_ixr_cmd_req = false; 3666 } 3667 else 3668 { 3669 r_ixr_cmd_word = r_ixr_cmd_word.read() + 2; 3670 } 3518 3671 3519 3672 #if DEBUG_MEMC_IXR_CMD 3520 3673 if(m_debug) 3521 std::cout << " <MEMC " << name() << " IXR_CMD_CAS>" 3522 << " Send a get request to xram / address = " << std::hex 3523 << (addr_t)(r_cas_to_ixr_cmd_nline.read()*m_words*4) << std::endl; 3524 #endif 3525 } 3526 } 3527 break; 3528 } 3529 ////////////////// 3530 case IXR_CMD_XRAM: // send a put from XRAM_RSP FSM 3531 { 3532 if(p_vci_ixr.cmdack) 3533 { 3534 if(r_ixr_cmd_cpt.read() == (m_words - 2)) 3535 { 3536 r_ixr_cmd_cpt = 0; 3537 r_ixr_cmd_fsm = IXR_CMD_XRAM_IDLE; 3538 r_xram_rsp_to_ixr_cmd_req = false; 3539 } 3540 else 3541 { 3542 r_ixr_cmd_cpt = r_ixr_cmd_cpt + 2; 3543 } 3674 std::cout << " <MEMC " << name() << " IXR_CMD_XRAM_SEND> PUT request:" << std::hex 3675 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2) << std::endl; 3676 #endif 3677 } 3678 break; 3679 } 3680 ///////////////////////// 3681 case IXR_CMD_CONFIG_SEND: // send a put from CONFIG FSM 3682 { 3683 if(p_vci_ixr.cmdack) 3684 { 3685 if(r_ixr_cmd_word.read() == (m_words - 2)) 3686 { 3687 r_ixr_cmd_fsm = IXR_CMD_CONFIG_IDLE; 3688 r_config_to_ixr_cmd_req = false; 3689 } 3690 else 3691 { 3692 r_ixr_cmd_word = r_ixr_cmd_word.read() + 2; 3693 } 3544 3694 3545 3695 #if DEBUG_MEMC_IXR_CMD 3546 3696 if(m_debug) 3547 std::cout << " <MEMC " << name() << " IXR_CMD_XRAM>" 3548 << " Send a put request to xram / address = " << std::hex 3549 << (addr_t)( (r_xram_rsp_to_ixr_cmd_nline.read() * m_words + 3550 r_ixr_cmd_cpt.read()) * 4 ) << std::endl; 3551 #endif 3552 } 3553 break; 3554 } 3555 3697 std::cout << " <MEMC " << name() << " IXR_CMD_CONFIG_SEND> PUT request:" << std::hex 3698 << " address = " << r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2) << std::endl; 3699 #endif 3700 } 3701 break; 3702 } 3556 3703 } // end switch r_ixr_cmd_fsm 3557 3704 … … 3560 3707 //////////////////////////////////////////////////////////////////////////// 3561 3708 // The IXR_RSP FSM receives the response packets from the XRAM, 3562 // for both put transaction, and gettransaction.3709 // for both PUT transaction, and GET transaction. 3563 3710 // 3564 // - A response to a putrequest is a single-cell VCI packet.3565 // The T ransaction Tabindex is contained in the RTRDID field.3711 // - A response to a PUT request is a single-cell VCI packet. 3712 // The TRT index is contained in the RTRDID field. 3566 3713 // The FSM takes the lock protecting the TRT, and the corresponding 3567 // entry is erased. 3714 // entry is erased. If an acknowledge was required (in case of software SYNC) 3715 // the r_config_rsp_lines counter is decremented. 3568 3716 // 3569 // - A response to a getrequest is a multi-cell VCI packet.3570 // The T ransaction Tabindex is contained in the RTRDID field.3717 // - A response to a GET request is a multi-cell VCI packet. 3718 // The TRT index is contained in the RTRDID field. 3571 3719 // The N cells contain the N words of the cache line in the RDATA field. 3572 3720 // The FSM takes the lock protecting the TRT to store the line in the TRT 3573 3721 // (taking into account the write requests already stored in the TRT). 3574 // When the line is completely written, the corresponding rok signal is set. 3722 // When the line is completely written, the r_ixr_rsp_to_xram_rsp_rok[index] 3723 // signal is set to inform the XRAM_RSP FSM. 3575 3724 /////////////////////////////////////////////////////////////////////////////// 3725 3726 //std::cout << std::endl << "ixr_rsp_fsm" << std::endl; 3576 3727 3577 3728 switch(r_ixr_rsp_fsm.read()) 3578 3729 { 3579 ////////////////// 3580 case IXR_RSP_IDLE: // test transaction type: PUT/GET 3581 { 3582 if(p_vci_ixr.rspval.read()) 3583 { 3584 r_ixr_rsp_cpt = 0; 3585 r_ixr_rsp_trt_index = p_vci_ixr.rtrdid.read(); 3586 if(p_vci_ixr.reop.read() and !(p_vci_ixr.rerror.read() &0x1)) // PUT transaction 3587 { 3588 r_ixr_rsp_fsm = IXR_RSP_ACK; 3730 ////////////////// 3731 case IXR_RSP_IDLE: // test transaction type: PUT/GET 3732 { 3733 if(p_vci_ixr.rspval.read()) 3734 { 3735 r_ixr_rsp_cpt = 0; 3736 r_ixr_rsp_trt_index = p_vci_ixr.rtrdid.read(); 3737 3738 assert( ((p_vci_ixr.rerror.read() & 0x1) == 0) and 3739 "MEMC ERROR in IXR_RSP state: XRAM response error !"); 3740 3741 if(p_vci_ixr.reop.read()) // PUT 3742 { 3743 r_ixr_rsp_fsm = IXR_RSP_TRT_ERASE; 3589 3744 3590 3745 #if DEBUG_MEMC_IXR_RSP … … 3593 3748 << " IXR_RSP_IDLE> Response from XRAM to a put transaction" << std::endl; 3594 3749 #endif 3595 }3596 else // GET transaction3597 {3598 r_ixr_rsp_fsm = IXR_RSP_TRT_READ;3750 } 3751 else // GET 3752 { 3753 r_ixr_rsp_fsm = IXR_RSP_TRT_READ; 3599 3754 3600 3755 #if DEBUG_MEMC_IXR_RSP … … 3603 3758 << " IXR_RSP_IDLE> Response from XRAM to a get transaction" << std::endl; 3604 3759 #endif 3605 } 3606 } 3607 break; 3608 } 3609 ///////////////// 3610 case IXR_RSP_ACK: // Aknowledge the VCI response for a PUT 3611 { 3612 if(p_vci_ixr.rspval.read()) r_ixr_rsp_fsm = IXR_RSP_TRT_ERASE; 3613 3614 #if DEBUG_MEMC_IXR_RSP 3615 if(m_debug) 3616 std::cout << " <MEMC " << name() << " IXR_RSP_ACK>" << std::endl; 3617 #endif 3618 break; 3619 } 3620 //////////////////////// 3621 case IXR_RSP_TRT_ERASE: // erase the entry in the TRT 3622 { 3623 if(r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_RSP) 3624 { 3625 m_trt.erase(r_ixr_rsp_trt_index.read()); 3626 r_ixr_rsp_fsm = IXR_RSP_IDLE; 3760 } 3761 } 3762 break; 3763 } 3764 //////////////////////// 3765 case IXR_RSP_TRT_ERASE: // erase the entry in the TRT 3766 // decrease the line counter if config request 3767 { 3768 if(r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_RSP) 3769 { 3770 size_t index = r_ixr_rsp_trt_index.read(); 3771 if (m_trt.is_config(index) ) r_config_rsp_lines = r_config_rsp_lines.read() - 1; 3772 m_trt.erase(index); 3773 r_ixr_rsp_fsm = IXR_RSP_IDLE; 3627 3774 3628 3775 #if DEBUG_MEMC_IXR_RSP … … 3631 3778 << r_ixr_rsp_trt_index.read() << std::endl; 3632 3779 #endif 3633 }3634 break;3635 }3636 //////////////////////3637 case IXR_RSP_TRT_READ: // write a 64 bits data in theTRT3638 {3639 if((r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_RSP) and p_vci_ixr.rspval)3640 {3641 size_t index = r_ixr_rsp_trt_index.read();3642 bool eop = p_vci_ixr.reop.read();3643 wide_data_t data = p_vci_ixr.rdata.read();3644 bool error = ((p_vci_ixr.rerror.read() & 0x1) == 1);3645 3646 assert(((eop == (r_ixr_rsp_cpt.read() == (m_words-2))) or p_vci_ixr.rerror.read()) 3647 and "Error in VCI_MEM_CACHE : invalid length for a response from XRAM");3648 3649 m_trt.write_rsp( index, 3650 r_ixr_rsp_cpt.read(),3651 data,3652 error);3653 3654 r_ixr_rsp_cpt = r_ixr_rsp_cpt.read()+ 2;3655 3656 if(eop)3657 {3658 r_ixr_rsp_to_xram_rsp_rok[r_ixr_rsp_trt_index.read()]=true;3659 r_ixr_rsp_fsm = IXR_RSP_IDLE;3660 }3780 } 3781 break; 3782 } 3783 ////////////////////// 3784 case IXR_RSP_TRT_READ: // write a 64 bits data word in TRT 3785 { 3786 if((r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_RSP) and p_vci_ixr.rspval) 3787 { 3788 size_t index = r_ixr_rsp_trt_index.read(); 3789 size_t word = r_ixr_rsp_cpt.read(); 3790 bool eop = p_vci_ixr.reop.read(); 3791 wide_data_t data = p_vci_ixr.rdata.read(); 3792 bool error = ((p_vci_ixr.rerror.read() & 0x1) == 1); 3793 3794 assert(((eop == (word == (m_words-2))) or error) and 3795 "MEMC ERROR in IXR_RSP_TRT_READ state : invalid response from XRAM"); 3796 3797 m_trt.write_rsp( index, 3798 word, 3799 data ); 3800 3801 r_ixr_rsp_cpt = word + 2; 3802 3803 if( eop ) 3804 { 3805 r_ixr_rsp_to_xram_rsp_rok[r_ixr_rsp_trt_index.read()] = true; 3806 r_ixr_rsp_fsm = IXR_RSP_IDLE; 3807 } 3661 3808 3662 3809 #if DEBUG_MEMC_IXR_RSP 3663 3810 if(m_debug) 3664 std::cout << " <MEMC " << name() << " IXR_RSP_TRT_READ> Writing a wordin TRT : "3811 std::cout << " <MEMC " << name() << " IXR_RSP_TRT_READ> Writing 2 words in TRT : " 3665 3812 << " index = " << std::dec << index 3666 << " / word = " << r_ixr_rsp_cpt.read()3813 << " / word = " << word 3667 3814 << " / data = " << std::hex << data << std::endl; 3668 3815 #endif 3669 }3670 break;3671 }3816 } 3817 break; 3818 } 3672 3819 } // end swich r_ixr_rsp_fsm 3673 3820 … … 3675 3822 // XRAM_RSP FSM 3676 3823 //////////////////////////////////////////////////////////////////////////// 3677 // The XRAM_RSP FSM handles the incoming cache lines from the XRAM.3824 // The XRAM_RSP FSM handles the incoming cache lines after an XRAM GET. 3678 3825 // The cache line has been written in the TRT by the IXR_CMD_FSM. 3679 3826 // As the IXR_RSP FSM and the XRAM_RSP FSM are running in parallel, 3680 // there is as many flip-flops r_ixr_rsp_to_xram_rsp_rok[i] 3681 // as the number of entries in the TRT, that are handled with 3682 // a round-robin priority... 3827 // there is as many flip-flops r_ixr_rsp_to_xram_rsp_rok[i] as the number 3828 // of entries in the TRT, that are handled with a round-robin priority... 3683 3829 // 3684 // When a response is available, the corresponding TRT entry 3685 // is copied in a local buffer to be written in the cache. 3686 // The FSM takes the lock protecting the TRT, and the lock protecting the DIR. 3687 // It selects a cache slot and writes the line in the cache. 3830 // The FSM takes the lock protecting TRT, and the lock protecting DIR. 3831 // The selected TRT entry is copied in the local buffer r_xram_rsp_trt_buf. 3832 // It selects a cache slot and save the victim line in another local buffer 3833 // r_xram_rsp_victim_***. 3834 // It writes the line extracted from TRT in the cache. 3688 3835 // If it was a read MISS, the XRAM_RSP FSM send a request to the TGT_RSP 3689 3836 // FSM to return the cache line to the registered processor. … … 3695 3842 /////////////////////////////////////////////////////////////////////////////// 3696 3843 3844 //std::cout << std::endl << "xram_rsp_fsm" << std::endl; 3845 3697 3846 switch(r_xram_rsp_fsm.read()) 3698 3847 { … … 3700 3849 case XRAM_RSP_IDLE: // scan the XRAM responses / select a TRT index (round robin) 3701 3850 { 3702 size_t ptr= r_xram_rsp_trt_index.read();3703 size_t lines = m_trt_lines;3704 for(size_t i=0 ; i<lines ; i++)3705 {3706 size_t index = (i+ptr+1) %lines;3707 if(r_ixr_rsp_to_xram_rsp_rok[index])3708 {3709 r_xram_rsp_trt_index = index;3710 r_ixr_rsp_to_xram_rsp_rok[index] = false;3711 r_xram_rsp_fsm = XRAM_RSP_DIR_LOCK;3851 size_t old = r_xram_rsp_trt_index.read(); 3852 size_t lines = m_trt_lines; 3853 for(size_t i=0 ; i<lines ; i++) 3854 { 3855 size_t index = (i+old+1) %lines; 3856 if(r_ixr_rsp_to_xram_rsp_rok[index]) 3857 { 3858 r_xram_rsp_trt_index = index; 3859 r_ixr_rsp_to_xram_rsp_rok[index] = false; 3860 r_xram_rsp_fsm = XRAM_RSP_DIR_LOCK; 3712 3861 3713 3862 #if DEBUG_MEMC_XRAM_RSP … … 3717 3866 << " index = " << std::dec << index << std::endl; 3718 3867 #endif 3719 break;3720 }3721 }3722 break;3868 break; 3869 } 3870 } 3871 break; 3723 3872 } 3724 3873 /////////////////////// … … 3726 3875 // Copy the TRT entry in a local buffer 3727 3876 { 3728 if((r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP) and 3729 (r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP)) 3730 { 3731 // copy the TRT entry in the r_xram_rsp_trt_buf local buffer 3732 size_t index = r_xram_rsp_trt_index.read(); 3733 r_xram_rsp_trt_buf.copy( m_trt.read(index) ); 3734 3735 r_xram_rsp_fsm = XRAM_RSP_TRT_COPY; 3877 if( (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP) and 3878 (r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP) ) 3879 { 3880 // copy the TRT entry in the r_xram_rsp_trt_buf local buffer 3881 size_t index = r_xram_rsp_trt_index.read(); 3882 r_xram_rsp_trt_buf.copy( m_trt.read(index) ); 3883 r_xram_rsp_fsm = XRAM_RSP_TRT_COPY; 3736 3884 3737 3885 #if DEBUG_MEMC_XRAM_RSP … … 3740 3888 << " Get access to DIR and TRT" << std::endl; 3741 3889 #endif 3742 }3743 break;3890 } 3891 break; 3744 3892 } 3745 3893 /////////////////////// … … 3747 3895 // and copy it in a local buffer 3748 3896 { 3749 if ( (r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP) and 3750 (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP) ) 3751 { 3897 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP) and 3898 "MEMC ERROR in XRAM_RSP_TRT_COPY state: Bad DIR allocation"); 3899 3900 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP) and 3901 "MEMC ERROR in XRAM_RSP_TRT_COPY state: Bad TRT allocation"); 3902 3752 3903 // selects & extracts a victim line from cache 3753 3904 size_t way = 0; … … 3758 3909 bool inval = (victim.count and victim.valid) ; 3759 3910 3760 // copy the victim line in a local buffer 3911 // copy the victim line in a local buffer (both data dir) 3761 3912 m_cache_data.read_line(way, set, r_xram_rsp_victim_data); 3762 3913 3763 3914 r_xram_rsp_victim_copy = victim.owner.srcid; 3764 3765 #if L1_MULTI_CACHE3766 r_xram_rsp_victim_copy_cache= victim.owner.cache_id;3767 #endif3768 3915 r_xram_rsp_victim_copy_inst = victim.owner.inst; 3769 3916 r_xram_rsp_victim_count = victim.count; … … 3776 3923 r_xram_rsp_victim_dirty = victim.dirty; 3777 3924 3778 if(!r_xram_rsp_trt_buf.rerror) 3779 { 3780 r_xram_rsp_fsm = XRAM_RSP_INVAL_LOCK; 3781 } 3782 else 3783 { 3784 r_xram_rsp_fsm = XRAM_RSP_ERROR_ERASE; 3785 } 3925 if( not r_xram_rsp_trt_buf.rerror ) r_xram_rsp_fsm = XRAM_RSP_IVT_LOCK; 3926 else r_xram_rsp_fsm = XRAM_RSP_ERROR_ERASE; 3786 3927 3787 3928 #if DEBUG_MEMC_XRAM_RSP 3788 3929 if(m_debug) 3789 3930 std::cout << " <MEMC " << name() << " XRAM_RSP_TRT_COPY>" 3790 << " Select a slot: "3931 << " Select a victim slot: " 3791 3932 << " way = " << std::dec << way 3792 3933 << " / set = " << set 3793 3934 << " / inval_required = " << inval << std::endl; 3794 3935 #endif 3795 }3796 else3797 {3798 std::cout << "VCI_MEM_CACHE ERROR " << name() << " XRAM_RSP_TRT_COPY"3799 << " bad TRT or DIR allocation" << std::endl;3800 exit(0);3801 }3802 break;3803 } 3804 /////////////////////////3805 case XRAM_RSP_INVAL_LOCK: // Take the IVT lock to check a possible pending inval3806 { 3807 if(r_alloc_ivt_fsm == ALLOC_IVT_XRAM_RSP)3808 {3809 size_t index = 0;3810 if(m_ivt.search_inval(r_xram_rsp_trt_buf.nline, index)) // pending inval3811 {3812 r_xram_rsp_fsm = XRAM_RSP_INVAL_WAIT;3936 break; 3937 } 3938 /////////////////////// 3939 case XRAM_RSP_IVT_LOCK: // Keep DIR and TRT locks and take the IVT lock 3940 // to check a possible pending inval 3941 { 3942 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP) and 3943 "MEMC ERROR in XRAM_RSP_IVT_LOCK state: Bad DIR allocation"); 3944 3945 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP) and 3946 "MEMC ERROR in XRAM_RSP_IVT_LOCK state: Bad TRT allocation"); 3947 3948 if(r_alloc_ivt_fsm == ALLOC_IVT_XRAM_RSP) 3949 { 3950 size_t index = 0; 3951 if(m_ivt.search_inval(r_xram_rsp_trt_buf.nline, index)) // pending inval 3952 { 3953 r_xram_rsp_fsm = XRAM_RSP_INVAL_WAIT; 3813 3954 3814 3955 #if DEBUG_MEMC_XRAM_RSP 3815 3956 if(m_debug) 3816 std::cout << " <MEMC " << name() << " XRAM_RSP_I NVAL_LOCK>"3957 std::cout << " <MEMC " << name() << " XRAM_RSP_IVT_LOCK>" 3817 3958 << " Get acces to IVT, but line invalidation registered" 3818 << " / nline = " << std::hex << r_xram_rsp_trt_buf.nline3959 << " / address = " << std::hex << r_xram_rsp_trt_buf.nline*m_words*4 3819 3960 << " / index = " << std::dec << index << std::endl; 3820 3961 #endif 3821 3962 3822 }3823 else if(m_ivt.is_full() and r_xram_rsp_victim_inval.read()) // IVT full3824 {3825 r_xram_rsp_fsm = XRAM_RSP_INVAL_WAIT;3963 } 3964 else if(m_ivt.is_full() and r_xram_rsp_victim_inval.read()) // IVT full 3965 { 3966 r_xram_rsp_fsm = XRAM_RSP_INVAL_WAIT; 3826 3967 3827 3968 #if DEBUG_MEMC_XRAM_RSP 3828 3969 if(m_debug) 3829 std::cout << " <MEMC " << name() << " XRAM_RSP_I NVAL_LOCK>"3970 std::cout << " <MEMC " << name() << " XRAM_RSP_IVT_LOCK>" 3830 3971 << " Get acces to IVT, but inval required and IVT full" << std::endl; 3831 3972 #endif 3832 }3833 else3834 {3835 r_xram_rsp_fsm = XRAM_RSP_DIR_UPDT;3973 } 3974 else 3975 { 3976 r_xram_rsp_fsm = XRAM_RSP_DIR_UPDT; 3836 3977 3837 3978 #if DEBUG_MEMC_XRAM_RSP 3838 3979 if(m_debug) 3839 std::cout << " <MEMC " << name() << " XRAM_RSP_I NVAL_LOCK>"3840 << " Get acces to IVT " << std::endl;3841 #endif 3842 }3843 }3844 break;3980 std::cout << " <MEMC " << name() << " XRAM_RSP_IVT_LOCK>" 3981 << " Get acces to IVT / no pending inval request" << std::endl; 3982 #endif 3983 } 3984 } 3985 break; 3845 3986 } 3846 3987 ///////////////////////// … … 3853 3994 << " Release all locks and retry" << std::endl; 3854 3995 #endif 3855 r_xram_rsp_fsm = XRAM_RSP_DIR_LOCK;3856 break;3996 r_xram_rsp_fsm = XRAM_RSP_DIR_LOCK; 3997 break; 3857 3998 } 3858 3999 /////////////////////// 3859 case XRAM_RSP_DIR_UPDT: // updates the cache (both data & directory) 3860 // and possibly set an inval request in IVT 3861 { 3862 // check if this is an instruction read, this means pktid is either 3863 // TYPE_READ_INS_UNC 0bX010 with TSAR encoding 3864 // TYPE_READ_INS_MISS 0bX011 with TSAR encoding 3865 bool inst_read = (r_xram_rsp_trt_buf.pktid & 0x2) and r_xram_rsp_trt_buf.proc_read; 3866 3867 // check if this is a cached read, this means pktid is either 3868 // TYPE_READ_DATA_MISS 0bX001 with TSAR encoding 3869 // TYPE_READ_INS_MISS 0bX011 with TSAR encoding 3870 bool cached_read = (r_xram_rsp_trt_buf.pktid & 0x1) and r_xram_rsp_trt_buf.proc_read; 3871 3872 bool dirty = false; 3873 3874 // update cache data 3875 size_t set = r_xram_rsp_victim_set.read(); 3876 size_t way = r_xram_rsp_victim_way.read(); 3877 for(size_t word=0; word<m_words ; word++) 3878 { 3879 m_cache_data.write(way, set, word, r_xram_rsp_trt_buf.wdata[word]); 3880 3881 dirty = dirty or (r_xram_rsp_trt_buf.wdata_be[word] != 0); 3882 3883 if(m_monitor_ok) 3884 { 3885 addr_t address = r_xram_rsp_trt_buf.nline<<6 | word<<2; 3886 check_monitor( address, r_xram_rsp_trt_buf.wdata[word], false); 3887 } 3888 } 3889 3890 // update cache directory 3891 DirectoryEntry entry; 3892 entry.valid = true; 3893 entry.is_cnt = false; 3894 entry.lock = false; 3895 entry.dirty = dirty; 3896 entry.tag = r_xram_rsp_trt_buf.nline / m_sets; 3897 entry.ptr = 0; 3898 if(cached_read) 3899 { 3900 entry.owner.srcid = r_xram_rsp_trt_buf.srcid; 3901 #if L1_MULTI_CACHE 3902 entry.owner.cache_id= r_xram_rsp_trt_buf.pktid; 3903 #endif 3904 entry.owner.inst = inst_read; 3905 entry.count = 1; 3906 } 3907 else 3908 { 3909 entry.owner.srcid = 0; 3910 #if L1_MULTI_CACHE 3911 entry.owner.cache_id = 0; 3912 #endif 3913 entry.owner.inst = 0; 3914 entry.count = 0; 3915 } 3916 m_cache_directory.write(set, way, entry); 3917 3918 // request an invalidattion request in IVT for victim line 3919 if(r_xram_rsp_victim_inval.read()) 3920 { 3921 bool broadcast = r_xram_rsp_victim_is_cnt.read(); 3922 size_t index = 0; 3923 size_t count_copies = r_xram_rsp_victim_count.read(); 3924 3925 bool wok = m_ivt.set(false, // it's an inval transaction 3926 broadcast, // set broadcast bit 3927 false, // no response required 3928 false, // no acknowledge required 3929 0, // srcid 3930 0, // trdid 3931 0, // pktid 3932 r_xram_rsp_victim_nline.read(), 3933 count_copies, 3934 index); 3935 3936 r_xram_rsp_ivt_index = index; 3937 3938 if(!wok) 3939 { 3940 std::cout << "VCI_MEM_CACHE ERROR " << name() << " XRAM_RSP_DIR_UPDT" 3941 << " invalidate_tab entry free but write unsuccessful" << std::endl; 3942 exit(0); 3943 } 3944 } 4000 case XRAM_RSP_DIR_UPDT: // updates the cache (both data & directory), 4001 // erases the TRT entry if victim not dirty, 4002 // and set inval request in IVT if required 4003 { 4004 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP) and 4005 "MEMC ERROR in XRAM_RSP_DIR_UPDT state: Bad DIR allocation"); 4006 4007 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP) and 4008 "MEMC ERROR in XRAM_RSP_DIR_UPDT state: Bad TRT allocation"); 4009 4010 assert( (r_alloc_ivt_fsm.read() == ALLOC_IVT_XRAM_RSP) and 4011 "MEMC ERROR in XRAM_RSP_DIR_UPDT state: Bad IVT allocation"); 4012 4013 // check if this is an instruction read, this means pktid is either 4014 // TYPE_READ_INS_UNC 0bX010 with TSAR encoding 4015 // TYPE_READ_INS_MISS 0bX011 with TSAR encoding 4016 bool inst_read = (r_xram_rsp_trt_buf.pktid & 0x2) and r_xram_rsp_trt_buf.proc_read; 4017 4018 // check if this is a cached read, this means pktid is either 4019 // TYPE_READ_DATA_MISS 0bX001 with TSAR encoding 4020 // TYPE_READ_INS_MISS 0bX011 with TSAR encoding 4021 bool cached_read = (r_xram_rsp_trt_buf.pktid & 0x1) and r_xram_rsp_trt_buf.proc_read; 4022 4023 bool dirty = false; 4024 4025 // update cache data 4026 size_t set = r_xram_rsp_victim_set.read(); 4027 size_t way = r_xram_rsp_victim_way.read(); 4028 4029 for(size_t word=0; word<m_words ; word++) 4030 { 4031 m_cache_data.write(way, set, word, r_xram_rsp_trt_buf.wdata[word]); 4032 dirty = dirty or (r_xram_rsp_trt_buf.wdata_be[word] != 0); 4033 } 4034 4035 // update cache directory 4036 DirectoryEntry entry; 4037 entry.valid = true; 4038 entry.is_cnt = false; 4039 entry.lock = false; 4040 entry.dirty = dirty; 4041 entry.tag = r_xram_rsp_trt_buf.nline / m_sets; 4042 entry.ptr = 0; 4043 if(cached_read) 4044 { 4045 entry.owner.srcid = r_xram_rsp_trt_buf.srcid; 4046 entry.owner.inst = inst_read; 4047 entry.count = 1; 4048 } 4049 else 4050 { 4051 entry.owner.srcid = 0; 4052 entry.owner.inst = 0; 4053 entry.count = 0; 4054 } 4055 m_cache_directory.write(set, way, entry); 4056 4057 // register invalid request in IVT for victim line if required 4058 if(r_xram_rsp_victim_inval.read()) 4059 { 4060 bool broadcast = r_xram_rsp_victim_is_cnt.read(); 4061 size_t index = 0; 4062 size_t count_copies = r_xram_rsp_victim_count.read(); 4063 4064 bool wok = m_ivt.set(false, // it's an inval transaction 4065 broadcast, // set broadcast bit 4066 false, // no response required 4067 false, // no acknowledge required 4068 0, // srcid 4069 0, // trdid 4070 0, // pktid 4071 r_xram_rsp_victim_nline.read(), 4072 count_copies, 4073 index); 4074 4075 r_xram_rsp_ivt_index = index; 4076 4077 assert( wok and 4078 "MEMC ERROR in XRAM_RSP_DIR_UPDT state: IVT should not be full"); 4079 } 3945 4080 3946 4081 #if DEBUG_MEMC_XRAM_RSP … … 3956 4091 << " / is_cnt = " << entry.is_cnt << std::endl; 3957 4092 if(r_xram_rsp_victim_inval.read()) 3958 std::cout << " Invalidation request for victim line"3959 << std::hex << r_xram_rsp_victim_nline.read() 4093 std::cout << " Invalidation request for address " 4094 << std::hex << r_xram_rsp_victim_nline.read()*m_words*4 3960 4095 << " / broadcast = " << r_xram_rsp_victim_is_cnt.read() << std::endl; 3961 4096 } 3962 4097 #endif 3963 4098 3964 // If the victim is not dirty, we don't need another XRAM put transaction, 3965 // and we can erase the TRT entry 3966 if(!r_xram_rsp_victim_dirty.read()) m_trt.erase(r_xram_rsp_trt_index.read()); 3967 3968 // Next state 3969 if(r_xram_rsp_victim_dirty.read()) r_xram_rsp_fsm = XRAM_RSP_TRT_DIRTY; 3970 else if(r_xram_rsp_trt_buf.proc_read) r_xram_rsp_fsm = XRAM_RSP_DIR_RSP; 3971 else if(r_xram_rsp_victim_inval.read()) r_xram_rsp_fsm = XRAM_RSP_INVAL; 3972 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 3973 break; 4099 // If the victim is not dirty, we don't need to reuse the TRT entry for 4100 // another PUT transaction, and we can erase the TRT entry 4101 if( not r_xram_rsp_victim_dirty.read() ) 4102 { 4103 m_trt.erase(r_xram_rsp_trt_index.read()); 4104 } 4105 4106 // Next state 4107 if(r_xram_rsp_victim_dirty.read()) r_xram_rsp_fsm = XRAM_RSP_TRT_DIRTY; 4108 else if(r_xram_rsp_trt_buf.proc_read) r_xram_rsp_fsm = XRAM_RSP_DIR_RSP; 4109 else if(r_xram_rsp_victim_inval.read()) r_xram_rsp_fsm = XRAM_RSP_INVAL; 4110 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 4111 break; 3974 4112 } 3975 4113 //////////////////////// 3976 4114 case XRAM_RSP_TRT_DIRTY: // set the TRT entry (PUT to XRAM) if the victim is dirty 3977 4115 { 3978 if(r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP) 3979 { 3980 m_trt.set(r_xram_rsp_trt_index.read(), 3981 false, // write to XRAM 3982 r_xram_rsp_victim_nline.read(), // line index 3983 0, 3984 0, 3985 0, 3986 false, 3987 0, 3988 0, 3989 std::vector<be_t> (m_words,0), 3990 std::vector<data_t> (m_words,0)); 4116 if(r_alloc_trt_fsm.read() == ALLOC_TRT_XRAM_RSP) 4117 { 4118 std::vector<data_t> data_vector; 4119 data_vector.clear(); 4120 for(size_t i=0; i<m_words; i++) 4121 { 4122 data_vector.push_back(r_xram_rsp_victim_data[i].read()); 4123 } 4124 m_trt.set( r_xram_rsp_trt_index.read(), 4125 false, // PUT 4126 r_xram_rsp_victim_nline.read(), // line index 4127 0, // unused 4128 0, // unused 4129 0, // unused 4130 false, // not proc_read 4131 0, // unused 4132 0, // unused 4133 std::vector<be_t>(m_words,0xF), 4134 data_vector); 3991 4135 3992 4136 #if DEBUG_MEMC_XRAM_RSP … … 3994 4138 std::cout << " <MEMC " << name() << " XRAM_RSP_TRT_DIRTY>" 3995 4139 << " Set TRT entry for the put transaction" 3996 << " / dirty victim line = " << r_xram_rsp_victim_nline.read() << std::endl;3997 #endif 3998 if(r_xram_rsp_trt_buf.proc_read) r_xram_rsp_fsm = XRAM_RSP_DIR_RSP;3999 else if(r_xram_rsp_victim_inval.read()) r_xram_rsp_fsm = XRAM_RSP_INVAL;4000 else r_xram_rsp_fsm = XRAM_RSP_WRITE_DIRTY;4001 }4002 break;4140 << " / address = " << (r_xram_rsp_victim_nline.read()*m_words*4) << std::endl; 4141 #endif 4142 if(r_xram_rsp_trt_buf.proc_read) r_xram_rsp_fsm = XRAM_RSP_DIR_RSP; 4143 else if(r_xram_rsp_victim_inval.read()) r_xram_rsp_fsm = XRAM_RSP_INVAL; 4144 else r_xram_rsp_fsm = XRAM_RSP_WRITE_DIRTY; 4145 } 4146 break; 4003 4147 } 4004 4148 ////////////////////// 4005 4149 case XRAM_RSP_DIR_RSP: // Request a response to TGT_RSP FSM 4006 4150 { 4007 if(!r_xram_rsp_to_tgt_rsp_req.read())4008 {4009 r_xram_rsp_to_tgt_rsp_srcid = r_xram_rsp_trt_buf.srcid;4010 r_xram_rsp_to_tgt_rsp_trdid = r_xram_rsp_trt_buf.trdid;4011 r_xram_rsp_to_tgt_rsp_pktid = r_xram_rsp_trt_buf.pktid;4012 for(size_t i=0; i < m_words; i++)4013 {4014 r_xram_rsp_to_tgt_rsp_data[i] = r_xram_rsp_trt_buf.wdata[i];4015 }4016 r_xram_rsp_to_tgt_rsp_word = r_xram_rsp_trt_buf.word_index;4017 r_xram_rsp_to_tgt_rsp_length = r_xram_rsp_trt_buf.read_length;4018 r_xram_rsp_to_tgt_rsp_ll_key = r_xram_rsp_trt_buf.ll_key;4019 r_xram_rsp_to_tgt_rsp_rerror = false;4020 r_xram_rsp_to_tgt_rsp_req = true;4021 4022 if(r_xram_rsp_victim_inval) r_xram_rsp_fsm = XRAM_RSP_INVAL;4023 else if(r_xram_rsp_victim_dirty) r_xram_rsp_fsm = XRAM_RSP_WRITE_DIRTY;4024 elser_xram_rsp_fsm = XRAM_RSP_IDLE;4151 if ( not r_xram_rsp_to_tgt_rsp_req.read() ) 4152 { 4153 r_xram_rsp_to_tgt_rsp_srcid = r_xram_rsp_trt_buf.srcid; 4154 r_xram_rsp_to_tgt_rsp_trdid = r_xram_rsp_trt_buf.trdid; 4155 r_xram_rsp_to_tgt_rsp_pktid = r_xram_rsp_trt_buf.pktid; 4156 for(size_t i=0; i < m_words; i++) 4157 { 4158 r_xram_rsp_to_tgt_rsp_data[i] = r_xram_rsp_trt_buf.wdata[i]; 4159 } 4160 r_xram_rsp_to_tgt_rsp_word = r_xram_rsp_trt_buf.word_index; 4161 r_xram_rsp_to_tgt_rsp_length = r_xram_rsp_trt_buf.read_length; 4162 r_xram_rsp_to_tgt_rsp_ll_key = r_xram_rsp_trt_buf.ll_key; 4163 r_xram_rsp_to_tgt_rsp_rerror = false; 4164 r_xram_rsp_to_tgt_rsp_req = true; 4165 4166 if(r_xram_rsp_victim_inval.read()) r_xram_rsp_fsm = XRAM_RSP_INVAL; 4167 else if(r_xram_rsp_victim_dirty.read()) r_xram_rsp_fsm = XRAM_RSP_WRITE_DIRTY; 4168 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 4025 4169 4026 4170 #if DEBUG_MEMC_XRAM_RSP … … 4032 4176 << " / nwords = " << std::dec << r_xram_rsp_trt_buf.read_length << std::endl; 4033 4177 #endif 4034 }4035 break;4178 } 4179 break; 4036 4180 } 4037 4181 //////////////////// … … 4051 4195 xram_rsp_to_cc_send_fifo_srcid = r_xram_rsp_victim_copy.read(); 4052 4196 xram_rsp_to_cc_send_fifo_inst = r_xram_rsp_victim_copy_inst.read(); 4053 #if L1_MULTI_CACHE4054 xram_rsp_to_cc_send_fifo_cache_id = r_xram_rsp_victim_copy_cache.read();4055 #endif4056 4197 xram_rsp_to_cc_send_fifo_put = multi_req; 4057 r_xram_rsp_next_ptr 4198 r_xram_rsp_next_ptr = r_xram_rsp_victim_ptr.read(); 4058 4199 4059 4200 if(r_xram_rsp_victim_dirty) r_xram_rsp_fsm = XRAM_RSP_WRITE_DIRTY; … … 4065 4206 std::cout << " <MEMC " << name() << " XRAM_RSP_INVAL>" 4066 4207 << " Send an inval request to CC_SEND FSM" 4067 << " / victim line = " << r_xram_rsp_victim_nline.read()<< std::endl;4208 << " / address = " << r_xram_rsp_victim_nline.read()*m_words*4 << std::endl; 4068 4209 #endif 4069 4210 } … … 4073 4214 case XRAM_RSP_WRITE_DIRTY: // send a write request to IXR_CMD FSM 4074 4215 { 4075 if(!r_xram_rsp_to_ixr_cmd_req.read()) 4076 { 4077 r_xram_rsp_to_ixr_cmd_req = true; 4078 r_xram_rsp_to_ixr_cmd_nline = r_xram_rsp_victim_nline.read(); 4079 r_xram_rsp_to_ixr_cmd_trdid = r_xram_rsp_trt_index.read(); 4080 for(size_t i=0; i<m_words ; i++) 4081 { 4082 r_xram_rsp_to_ixr_cmd_data[i] = r_xram_rsp_victim_data[i]; 4083 } 4084 m_cpt_write_dirty++; 4085 4086 bool multi_req = !r_xram_rsp_victim_is_cnt.read() and r_xram_rsp_victim_inval.read(); 4087 bool not_last_multi_req = multi_req and (r_xram_rsp_victim_count.read() != 1); 4088 4089 if(not_last_multi_req) r_xram_rsp_fsm = XRAM_RSP_HEAP_REQ; 4090 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 4216 if ( not r_xram_rsp_to_ixr_cmd_req.read() ) 4217 { 4218 r_xram_rsp_to_ixr_cmd_req = true; 4219 r_xram_rsp_to_ixr_cmd_index = r_xram_rsp_trt_index.read(); 4220 4221 m_cpt_write_dirty++; 4222 4223 bool multi_req = not r_xram_rsp_victim_is_cnt.read() and 4224 r_xram_rsp_victim_inval.read(); 4225 bool not_last_multi_req = multi_req and (r_xram_rsp_victim_count.read() != 1); 4226 4227 if(not_last_multi_req) r_xram_rsp_fsm = XRAM_RSP_HEAP_REQ; 4228 else r_xram_rsp_fsm = XRAM_RSP_IDLE; 4091 4229 4092 4230 #if DEBUG_MEMC_XRAM_RSP … … 4094 4232 std::cout << " <MEMC " << name() << " XRAM_RSP_WRITE_DIRTY>" 4095 4233 << " Send the put request to IXR_CMD FSM" 4096 << " / victim line = " << r_xram_rsp_victim_nline.read()<< std::endl;4097 #endif 4098 }4099 break;4234 << " / address = " << r_xram_rsp_victim_nline.read()*m_words*4 << std::endl; 4235 #endif 4236 } 4237 break; 4100 4238 } 4101 4239 ///////////////////////// 4102 4240 case XRAM_RSP_HEAP_REQ: // Get the lock to the HEAP 4103 4241 { 4104 if(r_alloc_heap_fsm.read() == ALLOC_HEAP_XRAM_RSP)4105 {4106 r_xram_rsp_fsm = XRAM_RSP_HEAP_ERASE;4107 }4242 if(r_alloc_heap_fsm.read() == ALLOC_HEAP_XRAM_RSP) 4243 { 4244 r_xram_rsp_fsm = XRAM_RSP_HEAP_ERASE; 4245 } 4108 4246 4109 4247 #if DEBUG_MEMC_XRAM_RSP … … 4112 4250 << " Requesting HEAP lock" << std::endl; 4113 4251 #endif 4114 break;4252 break; 4115 4253 } 4116 4254 ///////////////////////// … … 4122 4260 4123 4261 xram_rsp_to_cc_send_fifo_srcid = entry.owner.srcid; 4124 #if L1_MULTI_CACHE4125 xram_rsp_to_cc_send_fifo_cache_id = entry.owner.cache_id;4126 #endif4127 4262 xram_rsp_to_cc_send_fifo_inst = entry.owner.inst; 4128 4263 xram_rsp_to_cc_send_fifo_put = true; … … 4168 4303 HeapEntry last_entry; 4169 4304 last_entry.owner.srcid = 0; 4170 #if L1_MULTI_CACHE4171 last_entry.owner.cache_id = 0;4172 #endif4173 4305 last_entry.owner.inst = false; 4174 4306 if(m_heap.is_full()) … … 4194 4326 break; 4195 4327 } 4196 // 4328 ////////////////////////// 4197 4329 case XRAM_RSP_ERROR_ERASE: // erase TRT entry in case of error 4198 4330 { … … 4247 4379 //////////////////////////////////////////////////////////////////////////////////// 4248 4380 4381 //std::cout << std::endl << "cleanup_fsm" << std::endl; 4382 4249 4383 switch(r_cleanup_fsm.read()) 4250 4384 { 4251 ////////////////// 4252 case CLEANUP_IDLE: // Get first DSPIN flit of the CLEANUP command 4253 { 4254 if(not m_cc_receive_to_cleanup_fifo.rok()) break; 4255 4256 uint64_t flit = m_cc_receive_to_cleanup_fifo.read(); 4257 4258 uint32_t srcid = 4259 DspinDhccpParam::dspin_get( 4260 flit, 4261 DspinDhccpParam::CLEANUP_SRCID); 4262 4263 uint8_t type = 4264 DspinDhccpParam::dspin_get( 4265 flit, 4266 DspinDhccpParam::P2M_TYPE); 4267 4268 r_cleanup_way_index = 4269 DspinDhccpParam::dspin_get( 4270 flit, 4271 DspinDhccpParam::CLEANUP_WAY_INDEX); 4272 4273 r_cleanup_nline = 4274 DspinDhccpParam::dspin_get( 4275 flit, 4276 DspinDhccpParam::CLEANUP_NLINE_MSB) << 32; 4277 4278 r_cleanup_inst = (type == DspinDhccpParam::TYPE_CLEANUP_INST); 4279 r_cleanup_srcid = srcid; 4280 4281 if(srcid >= m_initiators) 4282 { 4283 std::cout 4284 << "VCI_MEM_CACHE ERROR " << name() 4285 << " CLEANUP_IDLE state" << std::endl 4286 << "illegal srcid for cleanup request" << std::endl; 4287 4288 exit(0); 4289 } 4290 4291 m_cpt_cleanup++; 4292 cc_receive_to_cleanup_fifo_get = true; 4293 r_cleanup_fsm = CLEANUP_GET_NLINE; 4385 ////////////////// 4386 case CLEANUP_IDLE: // Get first DSPIN flit of the CLEANUP command 4387 { 4388 if(not m_cc_receive_to_cleanup_fifo.rok()) break; 4389 4390 uint64_t flit = m_cc_receive_to_cleanup_fifo.read(); 4391 4392 uint32_t srcid = DspinDhccpParam::dspin_get( flit, 4393 DspinDhccpParam::CLEANUP_SRCID); 4394 4395 uint8_t type = DspinDhccpParam::dspin_get( flit, 4396 DspinDhccpParam::P2M_TYPE); 4397 4398 r_cleanup_way_index = DspinDhccpParam::dspin_get( flit, 4399 DspinDhccpParam::CLEANUP_WAY_INDEX); 4400 4401 r_cleanup_nline = DspinDhccpParam::dspin_get( flit, 4402 DspinDhccpParam::CLEANUP_NLINE_MSB) << 32; 4403 4404 r_cleanup_inst = (type == DspinDhccpParam::TYPE_CLEANUP_INST); 4405 r_cleanup_srcid = srcid; 4406 4407 assert( (srcid < m_initiators) and 4408 "MEMC ERROR in CLEANUP_IDLE state : illegal SRCID value"); 4409 4410 m_cpt_cleanup++; 4411 cc_receive_to_cleanup_fifo_get = true; 4412 r_cleanup_fsm = CLEANUP_GET_NLINE; 4294 4413 4295 4414 #if DEBUG_MEMC_CLEANUP … … 4297 4416 std::cout << " <MEMC " << name() 4298 4417 << " CLEANUP_IDLE> Cleanup request:" << std::hex 4299 << " /owner_id = " << srcid4418 << " owner_id = " << srcid 4300 4419 << " / owner_ins = " << (type == DspinDhccpParam::TYPE_CLEANUP_INST) << std::endl; 4301 4420 #endif 4302 break; 4303 } 4304 4305 /////////////////////// 4306 case CLEANUP_GET_NLINE: // GET second DSPIN flit of the cleanup command 4307 { 4308 if(not m_cc_receive_to_cleanup_fifo.rok()) break; 4309 4310 uint64_t flit = m_cc_receive_to_cleanup_fifo.read(); 4311 4312 addr_t nline = r_cleanup_nline.read() | 4313 DspinDhccpParam::dspin_get(flit, DspinDhccpParam::CLEANUP_NLINE_LSB); 4314 4315 cc_receive_to_cleanup_fifo_get = true; 4316 r_cleanup_nline = nline; 4317 r_cleanup_fsm = CLEANUP_DIR_REQ; 4421 break; 4422 } 4423 /////////////////////// 4424 case CLEANUP_GET_NLINE: // GET second DSPIN flit of the cleanup command 4425 { 4426 if(not m_cc_receive_to_cleanup_fifo.rok()) break; 4427 4428 uint64_t flit = m_cc_receive_to_cleanup_fifo.read(); 4429 4430 addr_t nline = r_cleanup_nline.read() | 4431 DspinDhccpParam::dspin_get(flit, DspinDhccpParam::CLEANUP_NLINE_LSB); 4432 4433 cc_receive_to_cleanup_fifo_get = true; 4434 r_cleanup_nline = nline; 4435 r_cleanup_fsm = CLEANUP_DIR_REQ; 4318 4436 4319 4437 #if DEBUG_MEMC_CLEANUP … … 4321 4439 std::cout << " <MEMC " << name() 4322 4440 << " CLEANUP_GET_NLINE> Cleanup request:" 4323 << " / address = " << std::hex << nline * m_words * 4 << std::endl; 4324 #endif 4325 break; 4326 } 4327 4328 ///////////////////// 4329 case CLEANUP_DIR_REQ: // Get the lock to the directory 4330 { 4331 if(r_alloc_dir_fsm.read() != ALLOC_DIR_CLEANUP) break; 4332 4333 r_cleanup_fsm = CLEANUP_DIR_LOCK; 4441 << " address = " << std::hex << nline * m_words * 4 << std::endl; 4442 #endif 4443 break; 4444 } 4445 ///////////////////// 4446 case CLEANUP_DIR_REQ: // Get the lock to the directory 4447 { 4448 if(r_alloc_dir_fsm.read() != ALLOC_DIR_CLEANUP) break; 4449 4450 r_cleanup_fsm = CLEANUP_DIR_LOCK; 4334 4451 4335 4452 #if DEBUG_MEMC_CLEANUP … … 4337 4454 std::cout << " <MEMC " << name() << " CLEANUP_DIR_REQ> Requesting DIR lock" << std::endl; 4338 4455 #endif 4339 break; 4340 } 4341 4342 ////////////////////// 4343 case CLEANUP_DIR_LOCK: 4344 { 4345 // test directory status 4346 if(r_alloc_dir_fsm.read() != ALLOC_DIR_CLEANUP) 4347 { 4348 std::cout 4349 << "VCI_MEM_CACHE ERROR " << name() 4350 << " CLEANUP_DIR_LOCK state" 4351 << " bad DIR allocation" << std::endl; 4352 4353 exit(0); 4354 } 4355 4356 // Read the directory 4357 size_t way = 0; 4358 addr_t cleanup_address = r_cleanup_nline.read() * m_words * 4; 4359 4360 DirectoryEntry entry = m_cache_directory.read(cleanup_address , way); 4361 r_cleanup_is_cnt = entry.is_cnt; 4362 r_cleanup_dirty = entry.dirty; 4363 r_cleanup_tag = entry.tag; 4364 r_cleanup_lock = entry.lock; 4365 r_cleanup_way = way; 4366 r_cleanup_count = entry.count; 4367 r_cleanup_ptr = entry.ptr; 4368 r_cleanup_copy = entry.owner.srcid; 4369 r_cleanup_copy_inst = entry.owner.inst; 4370 #if L1_MULTI_CACHE 4371 r_cleanup_copy_cache = entry.owner.cache_id; 4372 #endif 4373 4374 if(entry.valid) // hit : the copy must be cleared 4375 { 4376 assert( 4377 (entry.count > 0) and 4378 "VCI MEM CACHE ERROR: " 4379 "In CLEANUP_DIR_LOCK, CLEANUP command on a valid entry " 4380 "with no copies"); 4381 4382 // no access to the heap 4383 if((entry.count == 1) or (entry.is_cnt)) 4384 { 4385 r_cleanup_fsm = CLEANUP_DIR_WRITE; 4386 } 4387 // access to the heap 4388 else 4389 { 4390 r_cleanup_fsm = CLEANUP_HEAP_REQ; 4391 } 4392 } 4393 else // miss : check UPT for a pending invalidation transaction 4394 { 4395 r_cleanup_fsm = CLEANUP_IVT_LOCK; 4396 } 4456 break; 4457 } 4458 ////////////////////// 4459 case CLEANUP_DIR_LOCK: // test directory status 4460 { 4461 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CLEANUP) and 4462 "MEMC ERROR in CLEANUP_DIR_LOCK: bad DIR allocation"); 4463 4464 // Read the directory 4465 size_t way = 0; 4466 addr_t cleanup_address = r_cleanup_nline.read() * m_words * 4; 4467 DirectoryEntry entry = m_cache_directory.read(cleanup_address , way); 4468 r_cleanup_is_cnt = entry.is_cnt; 4469 r_cleanup_dirty = entry.dirty; 4470 r_cleanup_tag = entry.tag; 4471 r_cleanup_lock = entry.lock; 4472 r_cleanup_way = way; 4473 r_cleanup_count = entry.count; 4474 r_cleanup_ptr = entry.ptr; 4475 r_cleanup_copy = entry.owner.srcid; 4476 r_cleanup_copy_inst = entry.owner.inst; 4477 4478 if(entry.valid) // hit : the copy must be cleared 4479 { 4480 assert( (entry.count > 0) and 4481 "MEMC ERROR in CLEANUP_DIR_LOCK state, CLEANUP on valid entry with no copies"); 4482 4483 if((entry.count == 1) or (entry.is_cnt)) // no access to the heap 4484 { 4485 r_cleanup_fsm = CLEANUP_DIR_WRITE; 4486 } 4487 else // access to the heap 4488 { 4489 r_cleanup_fsm = CLEANUP_HEAP_REQ; 4490 } 4491 } 4492 else // miss : check IVT for a pending inval 4493 { 4494 r_cleanup_fsm = CLEANUP_IVT_LOCK; 4495 } 4397 4496 4398 4497 #if DEBUG_MEMC_CLEANUP 4399 if(m_debug) 4400 { 4401 std::cout 4402 << " <MEMC " << name() 4403 << " CLEANUP_DIR_LOCK> Test directory status: " 4404 << std::hex 4405 << " line = " << cleanup_address 4406 << " / hit = " << entry.valid 4407 << " / dir_id = " << entry.owner.srcid 4408 << " / dir_ins = " << entry.owner.inst 4409 << " / search_id = " << r_cleanup_srcid.read() 4410 << " / search_ins = " << r_cleanup_inst.read() 4411 << " / count = " << entry.count 4412 << " / is_cnt = " << entry.is_cnt 4413 << std::endl; 4414 } 4415 #endif 4416 break; 4417 } 4418 4419 /////////////////////// 4420 case CLEANUP_DIR_WRITE: 4421 { 4422 // Update the directory entry without heap access 4423 if(r_alloc_dir_fsm.read() != ALLOC_DIR_CLEANUP) 4424 { 4425 std::cout 4426 << "VCI_MEM_CACHE ERROR " << name() 4427 << " CLEANUP_DIR_WRITE state" 4428 << " bad DIR allocation" << std::endl; 4429 4430 exit(0); 4431 } 4432 4433 size_t way = r_cleanup_way.read(); 4434 size_t set = m_y[(addr_t)(r_cleanup_nline.read()*m_words*4)]; 4435 bool match_srcid = (r_cleanup_copy.read() == r_cleanup_srcid.read()); 4436 4437 #if L1_MULTI_CACHE 4438 match_srcid &= (r_cleanup_copy_cache.read() == r_cleanup_pktid.read()); 4439 #endif 4440 4441 bool match_inst = (r_cleanup_copy_inst.read() == r_cleanup_inst.read()); 4442 bool match = match_srcid and match_inst; 4443 4444 if(not r_cleanup_is_cnt.read() and not match) 4445 { 4446 std::cout 4447 << "VCI_MEM_CACHE ERROR : Cleanup request on a valid" 4448 << "entry using linked list mode with no corresponding" 4449 << "directory or heap entry" 4450 << std::endl; 4451 4452 exit(1); 4453 } 4454 4455 // update the cache directory (for the copies) 4456 DirectoryEntry entry; 4457 entry.valid = true; 4458 entry.is_cnt = r_cleanup_is_cnt.read(); 4459 entry.dirty = r_cleanup_dirty.read(); 4460 entry.tag = r_cleanup_tag.read(); 4461 entry.lock = r_cleanup_lock.read(); 4462 entry.ptr = r_cleanup_ptr.read(); 4463 entry.count = r_cleanup_count.read() - 1; 4464 entry.owner.srcid = 0; 4465 entry.owner.inst = 0; 4466 4467 #if L1_MULTI_CACHE 4468 entry.owner.cache_id = 0; 4469 #endif 4470 4471 m_cache_directory.write(set, way, entry); 4472 4473 r_cleanup_fsm = CLEANUP_SEND_CLACK; 4498 if(m_debug) 4499 std::cout << " <MEMC " << name() 4500 << " CLEANUP_DIR_LOCK> Test directory status: " 4501 << std::hex << " address = " << cleanup_address 4502 << " / hit = " << entry.valid 4503 << " / dir_id = " << entry.owner.srcid 4504 << " / dir_ins = " << entry.owner.inst 4505 << " / search_id = " << r_cleanup_srcid.read() 4506 << " / search_ins = " << r_cleanup_inst.read() 4507 << " / count = " << entry.count 4508 << " / is_cnt = " << entry.is_cnt << std::endl; 4509 #endif 4510 break; 4511 } 4512 /////////////////////// 4513 case CLEANUP_DIR_WRITE: // Update the directory entry without heap access 4514 { 4515 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CLEANUP) and 4516 "MEMC ERROR in CLEANUP_DIR_LOCK: bad DIR allocation"); 4517 4518 size_t way = r_cleanup_way.read(); 4519 size_t set = m_y[(addr_t)(r_cleanup_nline.read()*m_words*4)]; 4520 bool match_srcid = (r_cleanup_copy.read() == r_cleanup_srcid.read()); 4521 bool match_inst = (r_cleanup_copy_inst.read() == r_cleanup_inst.read()); 4522 bool match = match_srcid and match_inst; 4523 4524 assert( (r_cleanup_is_cnt.read() or match) and 4525 "MEMC ERROR in CLEANUP_DIR_LOCK: illegal CLEANUP on valid entry"); 4526 4527 // update the cache directory (for the copies) 4528 DirectoryEntry entry; 4529 entry.valid = true; 4530 entry.is_cnt = r_cleanup_is_cnt.read(); 4531 entry.dirty = r_cleanup_dirty.read(); 4532 entry.tag = r_cleanup_tag.read(); 4533 entry.lock = r_cleanup_lock.read(); 4534 entry.ptr = r_cleanup_ptr.read(); 4535 entry.count = r_cleanup_count.read() - 1; 4536 entry.owner.srcid = 0; 4537 entry.owner.inst = 0; 4538 4539 m_cache_directory.write(set, way, entry); 4540 4541 r_cleanup_fsm = CLEANUP_SEND_CLACK; 4474 4542 4475 4543 #if DEBUG_MEMC_CLEANUP 4476 if(m_debug) 4477 { 4478 std::cout 4479 << " <MEMC " << name() 4480 << " CLEANUP_DIR_WRITE> Update directory:" 4481 << std::hex 4482 << " address = " << r_cleanup_nline.read() * m_words * 4 4483 << " / dir_id = " << entry.owner.srcid 4484 << " / dir_ins = " << entry.owner.inst 4485 << " / count = " << entry.count 4486 << " / is_cnt = " << entry.is_cnt 4487 << std::endl; 4488 } 4489 #endif 4490 4491 break; 4492 } 4493 4494 ////////////////////// 4495 case CLEANUP_HEAP_REQ: 4496 { 4497 // get the lock to the HEAP directory 4498 if(r_alloc_heap_fsm.read() != ALLOC_HEAP_CLEANUP) break; 4499 4500 r_cleanup_fsm = CLEANUP_HEAP_LOCK; 4544 if(m_debug) 4545 std::cout << " <MEMC " << name() 4546 << " CLEANUP_DIR_WRITE> Update directory:" 4547 << std::hex << " address = " << r_cleanup_nline.read() * m_words * 4 4548 << " / dir_id = " << entry.owner.srcid 4549 << " / dir_ins = " << entry.owner.inst 4550 << " / count = " << entry.count 4551 << " / is_cnt = " << entry.is_cnt << std::endl; 4552 #endif 4553 4554 break; 4555 } 4556 ////////////////////// 4557 case CLEANUP_HEAP_REQ: // get the lock to the HEAP directory 4558 { 4559 if(r_alloc_heap_fsm.read() != ALLOC_HEAP_CLEANUP) break; 4560 4561 r_cleanup_fsm = CLEANUP_HEAP_LOCK; 4501 4562 4502 4563 #if DEBUG_MEMC_CLEANUP 4503 if(m_debug) 4504 { 4505 std::cout 4506 << " <MEMC " << name() 4507 << " CLEANUP_HEAP_REQ> HEAP lock acquired " 4508 << std::endl; 4509 } 4510 #endif 4511 break; 4512 } 4513 4514 ////////////////////// 4515 case CLEANUP_HEAP_LOCK: 4516 { 4517 // two cases are handled in this state : 4518 // 1. the matching copy is directly in the directory 4519 // 2. the matching copy is the first copy in the heap 4520 if(r_alloc_heap_fsm.read() != ALLOC_HEAP_CLEANUP) 4521 { 4522 std::cout 4523 << "VCI_MEM_CACHE ERROR " << name() 4524 << " CLEANUP_HEAP_LOCK state" 4525 << " bad HEAP allocation" << std::endl; 4526 4527 exit(0); 4528 } 4529 4530 size_t way = r_cleanup_way.read(); 4531 size_t set = m_y[(addr_t)(r_cleanup_nline.read() *m_words*4)]; 4532 4533 HeapEntry heap_entry = m_heap.read(r_cleanup_ptr.read()); 4534 bool last = (heap_entry.next == r_cleanup_ptr.read()); 4535 4536 // match_dir computation 4537 bool match_dir_srcid = (r_cleanup_copy.read() == r_cleanup_srcid.read()); 4538 bool match_dir_inst = (r_cleanup_copy_inst.read() == r_cleanup_inst.read()); 4539 bool match_dir = match_dir_srcid and match_dir_inst; 4540 4541 // match_heap computation 4542 bool match_heap_srcid = (heap_entry.owner.srcid == r_cleanup_srcid.read()); 4543 bool match_heap_inst = (heap_entry.owner.inst == r_cleanup_inst.read()); 4544 bool match_heap = match_heap_srcid and match_heap_inst; 4545 4546 r_cleanup_prev_ptr = r_cleanup_ptr.read(); 4547 r_cleanup_prev_srcid = heap_entry.owner.srcid; 4548 r_cleanup_prev_inst = heap_entry.owner.inst; 4549 4550 #if L1_MULTI_CACHE 4551 match_dir = match_dir and(r_cleanup_copy_cache.read() == r_cleanup_pktid.read()); 4552 match_heap = match_heap and(heap_entry.owner.cache_id == r_cleanup_pktid.read()); 4553 r_cleanup_prev_cache_id = heap_entry.owner.cache_id; 4554 #endif 4555 4556 if(not match_dir and not match_heap and last) 4557 { 4558 std::cout 4559 << "VCI_MEM_CACHE ERROR " << name() 4560 << " CLEANUP_HEAP_LOCK state" 4561 << " hit but copy not found" 4562 << std::endl; 4563 /**/ 4564 std::cout 4565 << "r_cleanup_srcid = " << r_cleanup_srcid.read() 4566 << " / r_cleanup_inst = " << r_cleanup_inst.read() << std::endl 4567 << "r_cleanup_copy = " << r_cleanup_copy.read() 4568 << " / r_cleanup_copy_inst = " << r_cleanup_copy_inst.read() << std::endl 4569 << "heap_entry.owner.srcid = " << heap_entry.owner.srcid 4570 << " / heap_entry.owner.inst = " << heap_entry.owner.inst << std::endl; 4571 /**/ 4572 exit(0); 4573 } 4574 4575 if(match_dir and match_heap) 4576 { 4577 std::cout 4578 << "VCI_MEM_CACHE ERROR " << name() 4579 << " CLEANUP_HEAP_LOCK state" 4580 << " two copies matching the cleanup owner id" 4581 << std::endl; 4582 /**/ 4583 std::cout 4584 << "r_cleanup_srcid = " << r_cleanup_srcid.read() 4585 << " / r_cleanup_inst = " << r_cleanup_inst.read() << std::endl 4586 << "r_cleanup_copy = " << r_cleanup_copy.read() 4587 << " / r_cleanup_copy_inst = " << r_cleanup_copy_inst.read() << std::endl 4588 << "heap_entry.owner.srcid = " << heap_entry.owner.srcid 4589 << " / heap_entry.owner.inst = " << heap_entry.owner.inst << std::endl; 4590 /**/ 4591 4592 exit(0); 4593 } 4594 4595 DirectoryEntry dir_entry; 4596 dir_entry.valid = true; 4597 dir_entry.is_cnt = r_cleanup_is_cnt.read(); 4598 dir_entry.dirty = r_cleanup_dirty.read(); 4599 dir_entry.tag = r_cleanup_tag.read(); 4600 dir_entry.lock = r_cleanup_lock.read(); 4601 dir_entry.count = r_cleanup_count.read()-1; 4602 4603 // the matching copy is registered in the directory and 4604 // it must be replaced by the first copy registered in 4605 // the heap. The corresponding entry must be freed 4606 if(match_dir) 4607 { 4608 dir_entry.ptr = heap_entry.next; 4609 dir_entry.owner.srcid = heap_entry.owner.srcid; 4610 dir_entry.owner.inst = heap_entry.owner.inst; 4611 4612 #if L1_MULTI_CACHE 4613 dir_entry.owner.cache_id = heap_entry.owner.cache_id; 4614 #endif 4615 4616 r_cleanup_next_ptr = r_cleanup_ptr.read(); 4617 r_cleanup_fsm = CLEANUP_HEAP_FREE; 4618 } 4619 4620 // the matching copy is the first copy in the heap 4621 // It must be freed and the copy registered in directory 4622 // must point to the next copy in heap 4623 else if(match_heap) 4624 { 4625 dir_entry.ptr = heap_entry.next; 4626 dir_entry.owner.srcid = r_cleanup_copy.read(); 4627 dir_entry.owner.inst = r_cleanup_copy_inst.read(); 4628 4629 #if L1_MULTI_CACHE 4630 dir_entry.owner.cache_id = r_cleanup_copy_cache.read(); 4631 #endif 4632 4633 r_cleanup_next_ptr = r_cleanup_ptr.read(); 4634 r_cleanup_fsm = CLEANUP_HEAP_FREE; 4635 } 4636 4637 // The matching copy is in the heap, but is not the first copy 4638 // The directory entry must be modified to decrement count 4639 else 4640 { 4641 dir_entry.ptr = r_cleanup_ptr.read(); 4642 dir_entry.owner.srcid = r_cleanup_copy.read(); 4643 dir_entry.owner.inst = r_cleanup_copy_inst.read(); 4644 4645 #if L1_MULTI_CACHE 4646 dir_entry.owner.cache_id = r_cleanup_copy_cache.read(); 4647 #endif 4648 4649 r_cleanup_next_ptr = heap_entry.next; 4650 r_cleanup_fsm = CLEANUP_HEAP_SEARCH; 4651 } 4652 4653 m_cache_directory.write(set,way,dir_entry); 4564 if(m_debug) 4565 std::cout << " <MEMC " << name() 4566 << " CLEANUP_HEAP_REQ> HEAP lock acquired " << std::endl; 4567 #endif 4568 break; 4569 } 4570 ////////////////////// 4571 case CLEANUP_HEAP_LOCK: // two cases are handled in this state : 4572 // 1. the matching copy is directly in the directory 4573 // 2. the matching copy is the first copy in the heap 4574 { 4575 assert( (r_alloc_heap_fsm.read() == ALLOC_HEAP_CLEANUP) and 4576 "MEMC ERROR in CLEANUP_HEAP_LOCK state: bad HEAP allocation"); 4577 4578 size_t way = r_cleanup_way.read(); 4579 size_t set = m_y[(addr_t)(r_cleanup_nline.read() *m_words*4)]; 4580 4581 HeapEntry heap_entry = m_heap.read(r_cleanup_ptr.read()); 4582 bool last = (heap_entry.next == r_cleanup_ptr.read()); 4583 4584 // match_dir computation 4585 bool match_dir_srcid = (r_cleanup_copy.read() == r_cleanup_srcid.read()); 4586 bool match_dir_inst = (r_cleanup_copy_inst.read() == r_cleanup_inst.read()); 4587 bool match_dir = match_dir_srcid and match_dir_inst; 4588 4589 // match_heap computation 4590 bool match_heap_srcid = (heap_entry.owner.srcid == r_cleanup_srcid.read()); 4591 bool match_heap_inst = (heap_entry.owner.inst == r_cleanup_inst.read()); 4592 bool match_heap = match_heap_srcid and match_heap_inst; 4593 4594 r_cleanup_prev_ptr = r_cleanup_ptr.read(); 4595 r_cleanup_prev_srcid = heap_entry.owner.srcid; 4596 r_cleanup_prev_inst = heap_entry.owner.inst; 4597 4598 assert( (not last or match_dir or match_heap) and 4599 "MEMC ERROR in CLEANUP_HEAP_LOCK state: hit but no copy found"); 4600 4601 assert( (not match_dir or not match_heap) and 4602 "MEMC ERROR in CLEANUP_HEAP_LOCK state: two matching copies found"); 4603 4604 DirectoryEntry dir_entry; 4605 dir_entry.valid = true; 4606 dir_entry.is_cnt = r_cleanup_is_cnt.read(); 4607 dir_entry.dirty = r_cleanup_dirty.read(); 4608 dir_entry.tag = r_cleanup_tag.read(); 4609 dir_entry.lock = r_cleanup_lock.read(); 4610 dir_entry.count = r_cleanup_count.read()-1; 4611 4612 // the matching copy is registered in the directory and 4613 // it must be replaced by the first copy registered in 4614 // the heap. The corresponding entry must be freed 4615 if(match_dir) 4616 { 4617 dir_entry.ptr = heap_entry.next; 4618 dir_entry.owner.srcid = heap_entry.owner.srcid; 4619 dir_entry.owner.inst = heap_entry.owner.inst; 4620 r_cleanup_next_ptr = r_cleanup_ptr.read(); 4621 r_cleanup_fsm = CLEANUP_HEAP_FREE; 4622 } 4623 4624 // the matching copy is the first copy in the heap 4625 // It must be freed and the copy registered in directory 4626 // must point to the next copy in heap 4627 else if(match_heap) 4628 { 4629 dir_entry.ptr = heap_entry.next; 4630 dir_entry.owner.srcid = r_cleanup_copy.read(); 4631 dir_entry.owner.inst = r_cleanup_copy_inst.read(); 4632 r_cleanup_next_ptr = r_cleanup_ptr.read(); 4633 r_cleanup_fsm = CLEANUP_HEAP_FREE; 4634 } 4635 4636 // The matching copy is in the heap, but is not the first copy 4637 // The directory entry must be modified to decrement count 4638 else 4639 { 4640 dir_entry.ptr = r_cleanup_ptr.read(); 4641 dir_entry.owner.srcid = r_cleanup_copy.read(); 4642 dir_entry.owner.inst = r_cleanup_copy_inst.read(); 4643 r_cleanup_next_ptr = heap_entry.next; 4644 r_cleanup_fsm = CLEANUP_HEAP_SEARCH; 4645 } 4646 4647 m_cache_directory.write(set,way,dir_entry); 4654 4648 4655 4649 #if DEBUG_MEMC_CLEANUP 4656 if(m_debug) 4657 { 4658 std::cout 4659 << " <MEMC " << name() 4660 << " CLEANUP_HEAP_LOCK> Checks matching:" 4661 << " address = " << r_cleanup_nline.read() * m_words * 4 4662 << " / dir_id = " << r_cleanup_copy.read() 4663 << " / dir_ins = " << r_cleanup_copy_inst.read() 4664 << " / heap_id = " << heap_entry.owner.srcid 4665 << " / heap_ins = " << heap_entry.owner.inst 4666 << " / search_id = " << r_cleanup_srcid.read() 4667 << " / search_ins = " << r_cleanup_inst.read() 4668 << std::endl; 4669 } 4670 #endif 4671 break; 4672 } 4673 4674 //////////////////////// 4675 case CLEANUP_HEAP_SEARCH: 4676 { 4677 // This state is handling the case where the copy 4678 // is in the heap, but is not the first in the linked list 4679 if(r_alloc_heap_fsm.read() != ALLOC_HEAP_CLEANUP) 4680 { 4681 std::cout 4682 << "VCI_MEM_CACHE ERROR " << name() 4683 << " CLEANUP_HEAP_SEARCH state" 4684 << " bad HEAP allocation" << std::endl; 4685 4686 exit(0); 4687 } 4688 4689 HeapEntry heap_entry = m_heap.read(r_cleanup_next_ptr.read()); 4690 4691 bool last = (heap_entry.next == r_cleanup_next_ptr.read()); 4692 bool match_heap_srcid = (heap_entry.owner.srcid == r_cleanup_srcid.read()); 4693 bool match_heap_inst = (heap_entry.owner.inst == r_cleanup_inst.read()); 4694 bool match_heap = match_heap_srcid and match_heap_inst; 4695 4696 #if L1_MULTI_CACHE 4697 match_heap = match_heap and(heap_entry.owner.cache_id == r_cleanup_pktid.read()); 4698 #endif 4699 4700 if(not match_heap and last) 4701 { 4702 std::cout 4703 << "VCI_MEM_CACHE_ERROR " << name() 4704 << " CLEANUP_HEAP_SEARCH state" 4705 << " cleanup on valid line but copy not found" 4706 << std::endl; 4707 4708 exit(0); 4709 } 4710 4711 // the matching copy must be removed 4712 if(match_heap) 4713 { 4714 // re-use ressources 4715 r_cleanup_ptr = heap_entry.next; 4716 r_cleanup_fsm = CLEANUP_HEAP_CLEAN; 4717 } 4718 // test the next in the linked list 4719 else 4720 { 4721 r_cleanup_prev_ptr = r_cleanup_next_ptr.read(); 4722 r_cleanup_prev_srcid = heap_entry.owner.srcid; 4723 r_cleanup_prev_inst = heap_entry.owner.inst; 4724 r_cleanup_next_ptr = heap_entry.next; 4725 4726 r_cleanup_fsm = CLEANUP_HEAP_SEARCH; 4727 4728 #if L1_MULTI_CACHE 4729 r_cleanup_prev_cache_id = heap_entry.owner.cache_id; 4730 #endif 4731 } 4650 if(m_debug) 4651 std::cout << " <MEMC " << name() 4652 << " CLEANUP_HEAP_LOCK> Checks matching:" 4653 << " address = " << r_cleanup_nline.read() * m_words * 4 4654 << " / dir_id = " << r_cleanup_copy.read() 4655 << " / dir_ins = " << r_cleanup_copy_inst.read() 4656 << " / heap_id = " << heap_entry.owner.srcid 4657 << " / heap_ins = " << heap_entry.owner.inst 4658 << " / search_id = " << r_cleanup_srcid.read() 4659 << " / search_ins = " << r_cleanup_inst.read() << std::endl; 4660 #endif 4661 break; 4662 } 4663 //////////////////////// 4664 case CLEANUP_HEAP_SEARCH: // This state is handling the case where the copy 4665 // is in the heap, but not the first in linked list 4666 { 4667 assert( (r_alloc_heap_fsm.read() == ALLOC_HEAP_CLEANUP) and 4668 "MEMC ERROR in CLEANUP_HEAP_LOCK state: bad HEAP allocation"); 4669 4670 HeapEntry heap_entry = m_heap.read(r_cleanup_next_ptr.read()); 4671 4672 bool last = (heap_entry.next == r_cleanup_next_ptr.read()); 4673 bool match_heap_srcid = (heap_entry.owner.srcid == r_cleanup_srcid.read()); 4674 bool match_heap_inst = (heap_entry.owner.inst == r_cleanup_inst.read()); 4675 bool match_heap = match_heap_srcid and match_heap_inst; 4676 4677 assert( (not last or match_heap) and 4678 "MEMC ERROR in CLEANUP_HEAP_SEARCH state: no copy found"); 4679 4680 // the matching copy must be removed 4681 if(match_heap) 4682 { 4683 // re-use ressources 4684 r_cleanup_ptr = heap_entry.next; 4685 r_cleanup_fsm = CLEANUP_HEAP_CLEAN; 4686 } 4687 // test the next in the linked list 4688 else 4689 { 4690 r_cleanup_prev_ptr = r_cleanup_next_ptr.read(); 4691 r_cleanup_prev_srcid = heap_entry.owner.srcid; 4692 r_cleanup_prev_inst = heap_entry.owner.inst; 4693 r_cleanup_next_ptr = heap_entry.next; 4694 r_cleanup_fsm = CLEANUP_HEAP_SEARCH; 4695 } 4732 4696 4733 4697 #if DEBUG_MEMC_CLEANUP 4734 4735 4698 if(m_debug) 4699 { 4736 4700 if(not match_heap) 4737 4701 { … … 4748 4712 << std::endl; 4749 4713 } 4750 4751 4714 std::cout 4752 4715 << " address = " << r_cleanup_nline.read() * m_words * 4 … … 4757 4720 << " / last = " << last 4758 4721 << std::endl; 4759 } 4760 #endif 4761 break; 4762 } 4763 //////////////////////// 4764 case CLEANUP_HEAP_CLEAN: // remove a copy in the linked list 4765 { 4766 if(r_alloc_heap_fsm.read() != ALLOC_HEAP_CLEANUP) 4767 { 4768 std::cout 4769 << "VCI_MEM_CACHE ERROR " << name() 4770 << " CLEANUP_HEAP_CLEAN state" 4771 << "Bad HEAP allocation" << std::endl; 4772 4773 exit(0); 4774 } 4775 4776 HeapEntry heap_entry; 4777 heap_entry.owner.srcid = r_cleanup_prev_srcid.read(); 4778 heap_entry.owner.inst = r_cleanup_prev_inst.read(); 4779 4780 #if L1_MULTI_CACHE 4781 heap_entry.owner.cache_id = r_cleanup_prev_cache_id.read(); 4782 #endif 4783 4784 bool last = (r_cleanup_next_ptr.read() == r_cleanup_ptr.read()); 4785 4786 // this is the last entry of the list of copies 4787 if(last) 4788 { 4789 heap_entry.next = r_cleanup_prev_ptr.read(); 4790 } 4791 // this is not the last entry 4792 else 4793 { 4794 heap_entry.next = r_cleanup_ptr.read(); 4795 } 4796 4797 m_heap.write(r_cleanup_prev_ptr.read(), heap_entry); 4798 4799 r_cleanup_fsm = CLEANUP_HEAP_FREE; 4722 } 4723 #endif 4724 break; 4725 } 4726 //////////////////////// 4727 case CLEANUP_HEAP_CLEAN: // remove a copy in the linked list 4728 { 4729 assert( (r_alloc_heap_fsm.read() == ALLOC_HEAP_CLEANUP) and 4730 "MEMC ERROR in CLEANUP_HEAP_LOCK state: bad HEAP allocation"); 4731 4732 HeapEntry heap_entry; 4733 heap_entry.owner.srcid = r_cleanup_prev_srcid.read(); 4734 heap_entry.owner.inst = r_cleanup_prev_inst.read(); 4735 bool last = (r_cleanup_next_ptr.read() == r_cleanup_ptr.read()); 4736 4737 if (last) // this is the last entry of the list of copies 4738 { 4739 heap_entry.next = r_cleanup_prev_ptr.read(); 4740 } 4741 else // this is not the last entry 4742 { 4743 heap_entry.next = r_cleanup_ptr.read(); 4744 } 4745 4746 m_heap.write(r_cleanup_prev_ptr.read(), heap_entry); 4747 4748 r_cleanup_fsm = CLEANUP_HEAP_FREE; 4800 4749 4801 4750 #if DEBUG_MEMC_CLEANUP … … 4804 4753 << " Remove the copy in the linked list" << std::endl; 4805 4754 #endif 4806 break; 4807 } 4808 /////////////////////// 4809 case CLEANUP_HEAP_FREE: // The heap entry pointed by r_cleanup_next_ptr is freed 4810 // and becomes the head of the list of free entries 4811 { 4812 if(r_alloc_heap_fsm.read() != ALLOC_HEAP_CLEANUP) 4813 { 4814 std::cout 4815 << "VCI_MEM_CACHE ERROR " << name() 4816 << " CLEANUP_HEAP_CLEAN state" << std::endl 4817 << "Bad HEAP allocation" << std::endl; 4818 4819 exit(0); 4820 } 4821 4822 HeapEntry heap_entry; 4823 heap_entry.owner.srcid = 0; 4824 heap_entry.owner.inst = false; 4825 4826 #if L1_MULTI_CACHE 4827 heap_entry.owner.cache_id = 0; 4828 #endif 4829 4830 if(m_heap.is_full()) 4831 { 4832 heap_entry.next = r_cleanup_next_ptr.read(); 4833 } 4834 else 4835 { 4836 heap_entry.next = m_heap.next_free_ptr(); 4837 } 4838 4839 m_heap.write(r_cleanup_next_ptr.read(),heap_entry); 4840 m_heap.write_free_ptr(r_cleanup_next_ptr.read()); 4841 m_heap.unset_full(); 4842 4843 r_cleanup_fsm = CLEANUP_SEND_CLACK; 4755 break; 4756 } 4757 /////////////////////// 4758 case CLEANUP_HEAP_FREE: // The heap entry pointed by r_cleanup_next_ptr is freed 4759 // and becomes the head of the list of free entries 4760 { 4761 assert( (r_alloc_heap_fsm.read() == ALLOC_HEAP_CLEANUP) and 4762 "MEMC ERROR in CLEANUP_HEAP_LOCK state: bad HEAP allocation"); 4763 4764 HeapEntry heap_entry; 4765 heap_entry.owner.srcid = 0; 4766 heap_entry.owner.inst = false; 4767 4768 if(m_heap.is_full()) 4769 { 4770 heap_entry.next = r_cleanup_next_ptr.read(); 4771 } 4772 else 4773 { 4774 heap_entry.next = m_heap.next_free_ptr(); 4775 } 4776 4777 m_heap.write(r_cleanup_next_ptr.read(),heap_entry); 4778 m_heap.write_free_ptr(r_cleanup_next_ptr.read()); 4779 m_heap.unset_full(); 4780 4781 r_cleanup_fsm = CLEANUP_SEND_CLACK; 4844 4782 4845 4783 #if DEBUG_MEMC_CLEANUP … … 4848 4786 << " Update the list of free entries" << std::endl; 4849 4787 #endif 4788 break; 4789 } 4790 ////////////////////// 4791 case CLEANUP_IVT_LOCK: // get the lock protecting the IVT to search a pending 4792 // invalidate transaction matching the cleanup 4793 { 4794 if(r_alloc_ivt_fsm.read() != ALLOC_IVT_CLEANUP) break; 4795 4796 size_t index = 0; 4797 bool match_inval; 4798 4799 match_inval = m_ivt.search_inval(r_cleanup_nline.read(), index); 4800 4801 if ( not match_inval ) // no pending inval in IVT 4802 { 4803 r_cleanup_fsm = CLEANUP_SEND_CLACK; 4804 4805 #if DEBUG_MEMC_CLEANUP 4806 if(m_debug) 4807 std::cout << " <MEMC " << name() << " CLEANUP_IVT_LOCK>" 4808 << " Unexpected cleanup with no corresponding IVT entry:" 4809 << " address = " << std::hex << (r_cleanup_nline.read()*4*m_words) << std::endl; 4810 #endif 4811 } 4812 else // pending inval in IVT 4813 { 4814 r_cleanup_write_srcid = m_ivt.srcid(index); 4815 r_cleanup_write_trdid = m_ivt.trdid(index); 4816 r_cleanup_write_pktid = m_ivt.pktid(index); 4817 r_cleanup_need_rsp = m_ivt.need_rsp(index); 4818 r_cleanup_need_ack = m_ivt.need_ack(index); 4819 r_cleanup_index = index; 4820 r_cleanup_fsm = CLEANUP_IVT_DECREMENT; 4821 4822 #if DEBUG_MEMC_CLEANUP 4823 if(m_debug) 4824 std::cout << " <MEMC " << name() << " CLEANUP_IVT_LOCK>" 4825 << " Cleanup matching pending invalidate transaction on IVT:" 4826 << " address = " << std::hex << (r_cleanup_nline.read()*m_words*4) 4827 << " / ivt_entry = " << index << std::endl; 4828 #endif 4829 } 4850 4830 break; 4851 } 4852 ////////////////////// 4853 case CLEANUP_IVT_LOCK: // get the lock protecting the IVT to search a pending 4854 // invalidate transaction matching the cleanup 4855 { 4856 if(r_alloc_ivt_fsm.read() != ALLOC_IVT_CLEANUP) break; 4857 4858 size_t index = 0; 4859 bool match_inval; 4860 4861 match_inval = m_ivt.search_inval(r_cleanup_nline.read(), index); 4862 4863 if ( not match_inval ) // no pending inval 4864 { 4865 r_cleanup_fsm = CLEANUP_SEND_CLACK; 4866 4867 #if DEBUG_MEMC_CLEANUP 4868 if(m_debug) 4869 std::cout << " <MEMC " << name() 4870 << " CLEANUP_IVT_LOCK> Unexpected cleanup" 4871 << " with no corresponding IVT entry:" 4872 << " address = " << std::hex 4873 << (r_cleanup_nline.read() *4*m_words) 4874 << std::endl; 4875 #endif 4876 break; 4877 } 4878 4879 // pending inval 4880 r_cleanup_write_srcid = m_ivt.srcid(index); 4881 r_cleanup_write_trdid = m_ivt.trdid(index); 4882 r_cleanup_write_pktid = m_ivt.pktid(index); 4883 r_cleanup_need_rsp = m_ivt.need_rsp(index); 4884 r_cleanup_need_ack = m_ivt.need_ack(index); 4885 r_cleanup_index = index; 4886 4887 r_cleanup_fsm = CLEANUP_IVT_DECREMENT; 4888 4889 #if DEBUG_MEMC_CLEANUP 4890 if(m_debug) 4891 std::cout << " <MEMC " << name() 4892 << " CLEANUP_IVT_LOCK> Cleanup matching pending" 4893 << " invalidate transaction on IVT:" 4894 << " address = " << std::hex << r_cleanup_nline.read() * m_words * 4 4895 << " / ivt_entry = " << index << std::endl; 4896 #endif 4897 break; 4898 } 4899 /////////////////////////// 4900 case CLEANUP_IVT_DECREMENT: // decrement response counter in IVT matching entry 4901 { 4902 if(r_alloc_ivt_fsm.read() != ALLOC_IVT_CLEANUP) 4903 { 4904 std::cout 4905 << "VCI_MEM_CACHE ERROR " << name() 4906 << " CLEANUP_IVT_DECREMENT state" << std::endl 4907 << "Bad IVT allocation" 4908 << std::endl; 4909 4910 exit(0); 4911 } 4912 4913 size_t count = 0; 4914 m_ivt.decrement(r_cleanup_index.read(), count); 4915 4916 if(count == 0) // multi inval transaction completed 4917 { 4918 r_cleanup_fsm = CLEANUP_IVT_CLEAR; 4919 } 4920 else // multi inval transaction not completed 4921 { 4922 r_cleanup_fsm = CLEANUP_SEND_CLACK ; 4923 } 4831 } 4832 /////////////////////////// 4833 case CLEANUP_IVT_DECREMENT: // decrement response counter in IVT matching entry 4834 // and test if last 4835 { 4836 assert( (r_alloc_ivt_fsm.read() == ALLOC_IVT_CLEANUP) and 4837 "MEMC ERROR in CLEANUP_IVT_DECREMENT state: Bad IVT allocation"); 4838 4839 size_t count = 0; 4840 m_ivt.decrement(r_cleanup_index.read(), count); 4841 4842 if(count == 0) r_cleanup_fsm = CLEANUP_IVT_CLEAR; 4843 else r_cleanup_fsm = CLEANUP_SEND_CLACK ; 4924 4844 4925 4845 #if DEBUG_MEMC_CLEANUP … … 4927 4847 std::cout << " <MEMC " << name() << " CLEANUP_IVT_DECREMENT>" 4928 4848 << " Decrement response counter in IVT:" 4929 4930 4931 #endif 4932 break;4933 }4934 ///////////////////////4935 case CLEANUP_IVT_CLEAR: // Clear IVT entry4936 {4937 if(r_alloc_ivt_fsm.read() != ALLOC_IVT_CLEANUP)4938 {4939 std::cout4940 << "VCI_MEM_CACHE ERROR " << name() 4941 << " CLEANUP_IVT_CLEAR state" << std::endl4942 << "Bad IVT allocation" 4943 << std::endl;4944 4945 exit(0);4946 }4947 4948 m_ivt.clear(r_cleanup_index.read());4949 4950 if ( r_cleanup_need_rsp.read() ) r_cleanup_fsm = CLEANUP_WRITE_RSP; 4951 else if ( r_cleanup_need_ack.read() ) r_cleanup_fsm = CLEANUP_CONFIG_ACK;4952 elser_cleanup_fsm = CLEANUP_SEND_CLACK;4849 << " IVT_index = " << r_cleanup_index.read() 4850 << " / rsp_count = " << count << std::endl; 4851 #endif 4852 break; 4853 } 4854 /////////////////////// 4855 case CLEANUP_IVT_CLEAR: // Clear IVT entry 4856 // Acknowledge CONFIG FSM if required 4857 { 4858 assert( (r_alloc_ivt_fsm.read() == ALLOC_IVT_CLEANUP) and 4859 "MEMC ERROR in CLEANUP_IVT_CLEAR state : bad IVT allocation"); 4860 4861 m_ivt.clear(r_cleanup_index.read()); 4862 4863 if ( r_cleanup_need_ack.read() ) 4864 { 4865 assert( (r_config_rsp_lines.read() > 0) and 4866 "MEMC ERROR in CLEANUP_IVT_CLEAR state"); 4867 4868 r_config_rsp_lines = r_config_rsp_lines.read() - 1; 4869 } 4870 4871 if ( r_cleanup_need_rsp.read() ) r_cleanup_fsm = CLEANUP_WRITE_RSP; 4872 else r_cleanup_fsm = CLEANUP_SEND_CLACK; 4953 4873 4954 4874 #if DEBUG_MEMC_CLEANUP … … 4958 4878 << " IVT_index = " << r_cleanup_index.read() << std::endl; 4959 4879 #endif 4960 break;4961 }4962 ///////////////////////4963 case CLEANUP_WRITE_RSP: // response to a previous write on the direct network4880 break; 4881 } 4882 /////////////////////// 4883 case CLEANUP_WRITE_RSP: // response to a previous write on the direct network 4964 4884 // wait if pending request to the TGT_RSP FSM 4965 { 4966 if(r_cleanup_to_tgt_rsp_req.read()) break; 4967 4968 // no pending request 4969 r_cleanup_to_tgt_rsp_req = true; 4970 r_cleanup_to_tgt_rsp_srcid = r_cleanup_write_srcid.read(); 4971 r_cleanup_to_tgt_rsp_trdid = r_cleanup_write_trdid.read(); 4972 r_cleanup_to_tgt_rsp_pktid = r_cleanup_write_pktid.read(); 4973 4974 r_cleanup_fsm = CLEANUP_SEND_CLACK; 4885 { 4886 if(r_cleanup_to_tgt_rsp_req.read()) break; 4887 4888 // no pending request 4889 r_cleanup_to_tgt_rsp_req = true; 4890 r_cleanup_to_tgt_rsp_srcid = r_cleanup_write_srcid.read(); 4891 r_cleanup_to_tgt_rsp_trdid = r_cleanup_write_trdid.read(); 4892 r_cleanup_to_tgt_rsp_pktid = r_cleanup_write_pktid.read(); 4893 r_cleanup_fsm = CLEANUP_SEND_CLACK; 4975 4894 4976 4895 #if DEBUG_MEMC_CLEANUP … … 4982 4901 << " / rpktid = " << r_cleanup_write_pktid.read() << std::endl; 4983 4902 #endif 4984 break; 4985 } 4986 //////////////////////// 4987 case CLEANUP_CONFIG_ACK: // signals inval completion to CONFIG FSM 4988 // wait if pending request 4989 { 4990 if ( r_cleanup_to_config_ack.read() ) break; 4991 4992 r_cleanup_to_config_ack = true; 4993 r_cleanup_fsm = CLEANUP_SEND_CLACK; 4994 4995 #if DEBUG_MEMC_CLEANUP 4996 if(m_debug) 4997 std::cout << " <MEMC " << name() << " CLEANUP_CONFIG_ACK>" 4998 << " Acknowledge broacast inval completion" << std::endl; 4999 #endif 5000 break; 5001 } 5002 //////////////////////// 5003 case CLEANUP_SEND_CLACK: // acknowledgement to a cleanup command 5004 // on the coherence CLACK network. 5005 { 5006 if(not p_dspin_clack.read) break; 5007 5008 r_cleanup_fsm = CLEANUP_IDLE; 4903 break; 4904 } 4905 //////////////////////// 4906 case CLEANUP_SEND_CLACK: // acknowledgement to a cleanup command 4907 // on the coherence CLACK network. 4908 { 4909 if(not p_dspin_clack.read) break; 4910 4911 r_cleanup_fsm = CLEANUP_IDLE; 5009 4912 5010 4913 #if DEBUG_MEMC_CLEANUP … … 5012 4915 std::cout << " <MEMC " << name() 5013 4916 << " CLEANUP_SEND_CLACK> Send the response to a cleanup request:" 5014 << " nline = " << std::hex << r_cleanup_nline.read()4917 << " address = " << std::hex << r_cleanup_nline.read()*m_words*4 5015 4918 << " / way = " << std::dec << r_cleanup_way.read() 5016 4919 << " / srcid = " << std::dec << r_cleanup_srcid.read() 5017 4920 << std::endl; 5018 4921 #endif 5019 break;5020 }4922 break; 4923 } 5021 4924 } // end switch cleanup fsm 5022 4925 … … 5024 4927 // CAS FSM 5025 4928 //////////////////////////////////////////////////////////////////////////////////// 5026 // The CAS FSM handles the CAS (Store Conditionnal) atomic commands, 5027 // that are handled as "compare-and-swap instructions. 4929 // The CAS FSM handles the CAS (Compare And Swap) atomic commands. 5028 4930 // 5029 4931 // This command contains two or four flits: 5030 4932 // - In case of 32 bits atomic access, the first flit contains the value read 5031 // by a previous LLinstruction, the second flit contains the value to be writen.4933 // by a previous READ instruction, the second flit contains the value to be writen. 5032 4934 // - In case of 64 bits atomic access, the 2 first flits contains the value read 5033 // by a previous LLinstruction, the 2 next flits contains the value to be writen.4935 // by a previous READ instruction, the 2 next flits contains the value to be writen. 5034 4936 // 5035 4937 // The target address is cachable. If it is replicated in other L1 caches … … 5038 4940 // It access the directory to check hit / miss. 5039 4941 // - In case of miss, the CAS FSM must register a GET transaction in TRT. 5040 // If a read transaction to the XRAM for this line already exists,5041 // or if the transaction table is full, it goes to the WAIT state5042 // to release the locks and try again. When the GET transaction has been5043 // launched, it goes to the WAIT state and try again.5044 // The CAS request is not consumed in the FIFO until a HIT is obtained.4942 // If a read transaction to the XRAM for this line already exists, 4943 // or if the transaction table is full, it goes to the WAIT state 4944 // to release the locks and try again. When the GET transaction has been 4945 // launched, it goes to the WAIT state and try again. 4946 // The CAS request is not consumed in the FIFO until a HIT is obtained. 5045 4947 // - In case of hit... 5046 4948 /////////////////////////////////////////////////////////////////////////////////// 5047 4949 4950 //std::cout << std::endl << "cas_fsm" << std::endl; 4951 5048 4952 switch(r_cas_fsm.read()) 5049 4953 { 5050 /////////////4954 //////////// 5051 4955 case CAS_IDLE: // fill the local rdata buffers 5052 4956 { 5053 if(m_cmd_cas_addr_fifo.rok())5054 {4957 if (m_cmd_cas_addr_fifo.rok() ) 4958 { 5055 4959 5056 4960 #if DEBUG_MEMC_CAS 5057 if(m_debug) 5058 { 5059 std::cout << " <MEMC " << name() << " CAS_IDLE> CAS command: " << std::hex 5060 << " srcid = " << std::dec << m_cmd_cas_srcid_fifo.read() 5061 << " addr = " << std::hex << m_cmd_cas_addr_fifo.read() 5062 << " wdata = " << m_cmd_cas_wdata_fifo.read() 5063 << " eop = " << std::dec << m_cmd_cas_eop_fifo.read() 5064 << " cpt = " << std::dec << r_cas_cpt.read() << std::endl; 5065 } 5066 #endif 5067 if(m_cmd_cas_eop_fifo.read()) 5068 { 5069 m_cpt_cas++; 5070 r_cas_fsm = CAS_DIR_REQ; 5071 } 5072 else // we keep the last word in the FIFO 5073 { 5074 cmd_cas_fifo_get = true; 5075 } 5076 // We fill the two buffers 5077 if(r_cas_cpt.read() < 2) // 32 bits access 5078 r_cas_rdata[r_cas_cpt.read()] = m_cmd_cas_wdata_fifo.read(); 5079 5080 if((r_cas_cpt.read() == 1) and m_cmd_cas_eop_fifo.read()) 5081 r_cas_wdata = m_cmd_cas_wdata_fifo.read(); 5082 5083 if(r_cas_cpt.read() >3) // more than 4 flits... 5084 { 5085 std::cout << "VCI_MEM_CACHE ERROR in CAS_IDLE state : illegal CAS command" 5086 << std::endl; 5087 exit(0); 5088 } 5089 5090 if(r_cas_cpt.read() ==2) 5091 r_cas_wdata = m_cmd_cas_wdata_fifo.read(); 5092 5093 r_cas_cpt = r_cas_cpt.read() +1; 5094 } 5095 break; 5096 } 5097 4961 if(m_debug) 4962 std::cout << " <MEMC " << name() << " CAS_IDLE> CAS command: " << std::hex 4963 << " srcid = " << std::dec << m_cmd_cas_srcid_fifo.read() 4964 << " addr = " << std::hex << m_cmd_cas_addr_fifo.read() 4965 << " wdata = " << m_cmd_cas_wdata_fifo.read() 4966 << " eop = " << std::dec << m_cmd_cas_eop_fifo.read() 4967 << " cpt = " << std::dec << r_cas_cpt.read() << std::endl; 4968 #endif 4969 if(m_cmd_cas_eop_fifo.read()) 4970 { 4971 m_cpt_cas++; 4972 r_cas_fsm = CAS_DIR_REQ; 4973 } 4974 else // we keep the last word in the FIFO 4975 { 4976 cmd_cas_fifo_get = true; 4977 } 4978 4979 // We fill the two buffers 4980 if(r_cas_cpt.read() < 2) // 32 bits access 4981 r_cas_rdata[r_cas_cpt.read()] = m_cmd_cas_wdata_fifo.read(); 4982 4983 if((r_cas_cpt.read() == 1) and m_cmd_cas_eop_fifo.read()) 4984 r_cas_wdata = m_cmd_cas_wdata_fifo.read(); 4985 4986 assert( (r_cas_cpt.read() <= 3) and // no more than 4 flits... 4987 "MEMC ERROR in CAS_IDLE state: illegal CAS command"); 4988 4989 if(r_cas_cpt.read() ==2) 4990 r_cas_wdata = m_cmd_cas_wdata_fifo.read(); 4991 4992 r_cas_cpt = r_cas_cpt.read() +1; 4993 } 4994 break; 4995 } 5098 4996 ///////////////// 5099 4997 case CAS_DIR_REQ: 5100 4998 { 5101 if(r_alloc_dir_fsm.read() == ALLOC_DIR_CAS)5102 {5103 r_cas_fsm = CAS_DIR_LOCK;5104 }4999 if(r_alloc_dir_fsm.read() == ALLOC_DIR_CAS) 5000 { 5001 r_cas_fsm = CAS_DIR_LOCK; 5002 } 5105 5003 5106 5004 #if DEBUG_MEMC_CAS 5107 if(m_debug) 5108 { 5109 std::cout 5110 << " <MEMC " << name() << " CAS_DIR_REQ> Requesting DIR lock " 5111 << std::endl; 5112 } 5113 #endif 5114 break; 5115 } 5116 5005 if(m_debug) 5006 std::cout << " <MEMC " << name() << " CAS_DIR_REQ> Requesting DIR lock " << std::endl; 5007 #endif 5008 break; 5009 } 5117 5010 ///////////////// 5118 5011 case CAS_DIR_LOCK: // Read the directory 5119 5012 { 5120 if(r_alloc_dir_fsm.read() == ALLOC_DIR_CAS) 5121 { 5013 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CAS) and 5014 "MEMC ERROR in CAS_DIR_LOCK: Bad DIR allocation"); 5015 5122 5016 size_t way = 0; 5123 5017 DirectoryEntry entry(m_cache_directory.read(m_cmd_cas_addr_fifo.read(), way)); … … 5128 5022 r_cas_way = way; 5129 5023 r_cas_copy = entry.owner.srcid; 5130 #if L1_MULTI_CACHE5131 r_cas_copy_cache = entry.owner.cache_id;5132 #endif5133 5024 r_cas_copy_inst = entry.owner.inst; 5134 5025 r_cas_ptr = entry.ptr; … … 5139 5030 5140 5031 #if DEBUG_MEMC_CAS 5141 if(m_debug) 5142 { 5143 std::cout << " <MEMC " << name() << " CAS_DIR_LOCK> Directory acces" 5144 << " / address = " << std::hex << m_cmd_cas_addr_fifo.read() 5145 << " / hit = " << std::dec << entry.valid 5146 << " / count = " << entry.count 5147 << " / is_cnt = " << entry.is_cnt << std::endl; 5148 } 5149 #endif 5150 } 5151 else 5152 { 5153 std::cout 5154 << "VCI_MEM_CACHE ERROR " << name() 5155 << " CAS_DIR_LOCK state" << std::endl 5156 << "Bad DIR allocation" << std::endl; 5157 5158 exit(0); 5159 } 5160 5161 break; 5032 if(m_debug) 5033 std::cout << " <MEMC " << name() << " CAS_DIR_LOCK> Directory acces" 5034 << " / address = " << std::hex << m_cmd_cas_addr_fifo.read() 5035 << " / hit = " << std::dec << entry.valid 5036 << " / count = " << entry.count 5037 << " / is_cnt = " << entry.is_cnt << std::endl; 5038 #endif 5039 5040 break; 5162 5041 } 5163 5042 ///////////////////// … … 5165 5044 // and check data change in cache 5166 5045 { 5167 size_t way = r_cas_way.read();5168 size_t set = m_y[(addr_t)(m_cmd_cas_addr_fifo.read())];5169 5170 // update directory (lock & dirty bits)5171 DirectoryEntry entry;5172 entry.valid = true; 5173 entry.is_cnt = r_cas_is_cnt.read();5174 entry.dirty = true;5175 entry.lock= true;5176 entry.tag = r_cas_tag.read();5177 entry.owner.srcid = r_cas_copy.read();5178 #if L1_MULTI_CACHE 5179 entry.owner.cache_id = r_cas_copy_cache.read();5180 #endif 5181 entry.owner.inst = r_cas_copy_inst.read();5182 entry.count = r_cas_count.read();5183 entry.ptr = r_cas_ptr.read();5184 5185 m_cache_directory.write(set, way, entry);5186 5187 // Storeddata from cache in buffer to do the comparison in next state5188 m_cache_data.read_line(way, set, r_cas_data);5189 5190 r_cas_fsm = CAS_DIR_HIT_COMPARE;5046 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CAS) and 5047 "MEMC ERROR in CAS_DIR_HIT_READ: Bad DIR allocation"); 5048 5049 size_t way = r_cas_way.read(); 5050 size_t set = m_y[(addr_t)(m_cmd_cas_addr_fifo.read())]; 5051 5052 // update directory (lock & dirty bits) 5053 DirectoryEntry entry; 5054 entry.valid = true; 5055 entry.is_cnt = r_cas_is_cnt.read(); 5056 entry.dirty = true; 5057 entry.lock = true; 5058 entry.tag = r_cas_tag.read(); 5059 entry.owner.srcid = r_cas_copy.read(); 5060 entry.owner.inst = r_cas_copy_inst.read(); 5061 entry.count = r_cas_count.read(); 5062 entry.ptr = r_cas_ptr.read(); 5063 5064 m_cache_directory.write(set, way, entry); 5065 5066 // Store data from cache in buffer to do the comparison in next state 5067 m_cache_data.read_line(way, set, r_cas_data); 5068 5069 r_cas_fsm = CAS_DIR_HIT_COMPARE; 5191 5070 5192 5071 #if DEBUG_MEMC_CAS … … 5195 5074 << " cache and store it in buffer" << std::endl; 5196 5075 #endif 5197 break;5198 } 5199 5076 break; 5077 } 5078 //////////////////////// 5200 5079 case CAS_DIR_HIT_COMPARE: 5201 5080 { 5202 size_t word = m_x[(addr_t)(m_cmd_cas_addr_fifo.read())];5203 5204 // Read data in buffer &check data change5081 size_t word = m_x[(addr_t)(m_cmd_cas_addr_fifo.read())]; 5082 5083 // check data change 5205 5084 bool ok = (r_cas_rdata[0].read() == r_cas_data[word].read()); 5206 5085 … … 5212 5091 r_cas_lfsr = (r_cas_lfsr >> 1) ^ ((- (r_cas_lfsr & 1)) & 0xd0000001); 5213 5092 5214 // cas success 5215 if(ok and not forced_fail) 5216 { 5217 r_cas_fsm = CAS_DIR_HIT_WRITE; 5218 } 5219 // cas failure 5220 else 5221 { 5222 r_cas_fsm = CAS_RSP_FAIL; 5223 } 5093 if(ok and not forced_fail) r_cas_fsm = CAS_DIR_HIT_WRITE; 5094 else r_cas_fsm = CAS_RSP_FAIL; 5224 5095 5225 5096 #if DEBUG_MEMC_CAS 5226 5097 if(m_debug) 5227 std::cout << " <MEMC " << name() << " CAS_DIR_HIT_COMPARE> Compare the old" 5228 << " and the new data" 5229 << " / expected value = " << r_cas_rdata[0].read() 5230 << " / actual value = " << r_cas_data[word].read() 5231 << " / forced_fail = " << forced_fail << std::endl; 5098 std::cout << " <MEMC " << name() << " CAS_DIR_HIT_COMPARE> Compare old and new data" 5099 << " / expected value = " << std::hex << r_cas_rdata[0].read() 5100 << " / actual value = " << std::hex << r_cas_data[word].read() 5101 << " / forced_fail = " << std::dec << forced_fail << std::endl; 5232 5102 #endif 5233 5103 break; … … 5235 5105 ////////////////////// 5236 5106 case CAS_DIR_HIT_WRITE: // test if a CC transaction is required 5237 // write data in cache if no CC request 5238 { 5239 // The CAS is a success => sw access to the llsc_global_table 5240 m_llsc_table.sw(m_nline[(addr_t)m_cmd_cas_addr_fifo.read()],m_x[(addr_t)(m_cmd_cas_addr_fifo.read())],m_x[(addr_t)(m_cmd_cas_addr_fifo.read())]); 5241 5242 // test coherence request 5243 if(r_cas_count.read()) // replicated line 5244 { 5245 if(r_cas_is_cnt.read()) 5246 { 5247 r_cas_fsm = CAS_BC_TRT_LOCK; // broadcast invalidate required 5248 } 5249 else if(!r_cas_to_cc_send_multi_req.read() and 5250 !r_cas_to_cc_send_brdcast_req.read()) 5251 { 5252 r_cas_fsm = CAS_UPT_LOCK; // multi update required 5253 } 5254 else 5255 { 5256 r_cas_fsm = CAS_WAIT; 5257 } 5258 } 5259 else // no copies 5260 { 5261 size_t way = r_cas_way.read(); 5262 size_t set = m_y[(addr_t)(m_cmd_cas_addr_fifo.read())]; 5263 size_t word = m_x[(addr_t)(m_cmd_cas_addr_fifo.read())]; 5264 5265 // cache update 5266 m_cache_data.write(way, set, word, r_cas_wdata.read()); 5267 if(r_cas_cpt.read() == 4) 5268 m_cache_data.write(way, set, word+1, m_cmd_cas_wdata_fifo.read()); 5269 5270 r_cas_fsm = CAS_RSP_SUCCESS; 5271 5272 // monitor 5273 if(m_monitor_ok) 5274 { 5275 addr_t address = m_cmd_cas_addr_fifo.read(); 5276 check_monitor( address, r_cas_wdata.read(), false); 5277 5278 if(r_cas_cpt.read() == 4) 5279 check_monitor( address+4, m_cmd_cas_wdata_fifo.read(), false); 5280 } 5107 // write data in cache if no CC request 5108 { 5109 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CAS) and 5110 "MEMC ERROR in CAS_DIR_HIT_WRITE: Bad DIR allocation"); 5111 5112 // The CAS is a success => sw access to the llsc_global_table 5113 m_llsc_table.sw( m_nline[(addr_t)m_cmd_cas_addr_fifo.read()], 5114 m_x[(addr_t)(m_cmd_cas_addr_fifo.read())], 5115 m_x[(addr_t)(m_cmd_cas_addr_fifo.read())] ); 5116 5117 // test coherence request 5118 if(r_cas_count.read()) // replicated line 5119 { 5120 if(r_cas_is_cnt.read()) 5121 { 5122 r_cas_fsm = CAS_BC_TRT_LOCK; // broadcast invalidate required 5123 5124 #if DEBUG_MEMC_CAS 5125 if(m_debug) 5126 std::cout << " <MEMC " << name() << " CAS_DIR_HIT_WRITE>" 5127 << " Broacast Inval required" 5128 << " / copies = " << r_cas_count.read() << std::endl; 5129 #endif 5130 } 5131 else if( not r_cas_to_cc_send_multi_req.read() and 5132 not r_cas_to_cc_send_brdcast_req.read() ) 5133 { 5134 r_cas_fsm = CAS_UPT_LOCK; // multi update required 5135 5136 #if DEBUG_MEMC_CAS 5137 if(m_debug) 5138 std::cout << " <MEMC " << name() << " CAS_DIR_HIT_WRITE>" 5139 << " Multi Inval required" 5140 << " / copies = " << r_cas_count.read() << std::endl; 5141 #endif 5142 } 5143 else 5144 { 5145 r_cas_fsm = CAS_WAIT; 5146 5147 #if DEBUG_MEMC_CAS 5148 if(m_debug) 5149 std::cout << " <MEMC " << name() << " CAS_DIR_HIT_WRITE>" 5150 << " CC_SEND FSM busy: release all locks and retry" << std::endl; 5151 #endif 5152 } 5153 } 5154 else // no copies 5155 { 5156 size_t way = r_cas_way.read(); 5157 size_t set = m_y[(addr_t)(m_cmd_cas_addr_fifo.read())]; 5158 size_t word = m_x[(addr_t)(m_cmd_cas_addr_fifo.read())]; 5159 5160 // cache update 5161 m_cache_data.write(way, set, word, r_cas_wdata.read()); 5162 if(r_cas_cpt.read() == 4) 5163 m_cache_data.write(way, set, word+1, m_cmd_cas_wdata_fifo.read()); 5164 5165 r_cas_fsm = CAS_RSP_SUCCESS; 5281 5166 5282 5167 #if DEBUG_MEMC_CAS … … 5295 5180 ///////////////// 5296 5181 case CAS_UPT_LOCK: // try to register the transaction in UPT 5297 // and write data in cache if successful registration 5298 // releases locks to retry later if UPT full 5299 { 5300 if(r_alloc_upt_fsm.read() == ALLOC_UPT_CAS) 5301 { 5302 bool wok = false; 5303 size_t index = 0; 5304 size_t srcid = m_cmd_cas_srcid_fifo.read(); 5305 size_t trdid = m_cmd_cas_trdid_fifo.read(); 5306 size_t pktid = m_cmd_cas_pktid_fifo.read(); 5307 addr_t nline = m_nline[(addr_t)(m_cmd_cas_addr_fifo.read())]; 5308 size_t nb_copies = r_cas_count.read(); 5309 5310 wok = m_upt.set(true, // it's an update transaction 5311 false, // it's not a broadcast 5312 true, // response required 5313 false, // no acknowledge required 5314 srcid, 5315 trdid, 5316 pktid, 5317 nline, 5318 nb_copies, 5319 index); 5320 if(wok) // coherence transaction registered in UPT 5321 { 5322 // cache update 5323 size_t way = r_cas_way.read(); 5324 size_t set = m_y[(addr_t)(m_cmd_cas_addr_fifo.read())]; 5325 size_t word = m_x[(addr_t)(m_cmd_cas_addr_fifo.read())]; 5326 5327 m_cache_data.write(way, set, word, r_cas_wdata.read()); 5328 if(r_cas_cpt.read() ==4) 5329 m_cache_data.write(way, set, word+1, m_cmd_cas_wdata_fifo.read()); 5330 5331 r_cas_upt_index = index; 5332 r_cas_fsm = CAS_UPT_HEAP_LOCK; 5333 5334 // monitor 5335 if(m_monitor_ok) 5336 { 5337 addr_t address = m_cmd_cas_addr_fifo.read(); 5338 check_monitor( address, r_cas_wdata.read(), false); 5339 5340 if(r_cas_cpt.read() ==4) 5341 check_monitor( address+4, m_cmd_cas_wdata_fifo.read(), false); 5342 } 5343 } 5344 else // releases the locks protecting UPT and DIR UPT full 5345 { 5346 r_cas_fsm = CAS_WAIT; 5347 } 5182 // and write data in cache if successful registration 5183 // releases locks to retry later if UPT full 5184 { 5185 if(r_alloc_upt_fsm.read() == ALLOC_UPT_CAS) 5186 { 5187 bool wok = false; 5188 size_t index = 0; 5189 size_t srcid = m_cmd_cas_srcid_fifo.read(); 5190 size_t trdid = m_cmd_cas_trdid_fifo.read(); 5191 size_t pktid = m_cmd_cas_pktid_fifo.read(); 5192 addr_t nline = m_nline[(addr_t)(m_cmd_cas_addr_fifo.read())]; 5193 size_t nb_copies = r_cas_count.read(); 5194 5195 wok = m_upt.set( true, // it's an update transaction 5196 false, // it's not a broadcast 5197 true, // response required 5198 false, // no acknowledge required 5199 srcid, 5200 trdid, 5201 pktid, 5202 nline, 5203 nb_copies, 5204 index); 5205 if(wok) // coherence transaction registered in UPT 5206 { 5207 // cache update 5208 size_t way = r_cas_way.read(); 5209 size_t set = m_y[(addr_t)(m_cmd_cas_addr_fifo.read())]; 5210 size_t word = m_x[(addr_t)(m_cmd_cas_addr_fifo.read())]; 5211 5212 m_cache_data.write(way, set, word, r_cas_wdata.read()); 5213 if(r_cas_cpt.read() ==4) 5214 m_cache_data.write(way, set, word+1, m_cmd_cas_wdata_fifo.read()); 5215 5216 r_cas_upt_index = index; 5217 r_cas_fsm = CAS_UPT_HEAP_LOCK; 5218 } 5219 else // releases the locks protecting UPT and DIR UPT full 5220 { 5221 r_cas_fsm = CAS_WAIT; 5222 } 5348 5223 5349 5224 #if DEBUG_MEMC_CAS … … 5352 5227 << " CAS_UPT_LOCK> Register multi-update transaction in UPT" 5353 5228 << " / wok = " << wok 5354 << " / nline = " << std::hex << nline5229 << " / address = " << std::hex << nline*m_words*4 5355 5230 << " / count = " << nb_copies << std::endl; 5356 5231 #endif 5357 }5358 break;5232 } 5233 break; 5359 5234 } 5360 5235 ///////////// … … 5363 5238 5364 5239 #if DEBUG_MEMC_CAS 5365 if(m_debug) 5366 { 5367 std::cout << " <MEMC " << name() 5368 << " CAS_WAIT> Release all locks" << std::endl; 5369 } 5370 #endif 5371 r_cas_fsm = CAS_DIR_REQ; 5372 break; 5373 } 5374 ////////////////// 5240 if(m_debug) 5241 std::cout << " <MEMC " << name() << " CAS_WAIT> Release all locks" << std::endl; 5242 #endif 5243 r_cas_fsm = CAS_DIR_REQ; 5244 break; 5245 } 5246 ////////////////////// 5375 5247 case CAS_UPT_HEAP_LOCK: // lock the heap 5376 5248 { … … 5418 5290 cas_to_cc_send_fifo_inst = r_cas_copy_inst.read(); 5419 5291 cas_to_cc_send_fifo_srcid = r_cas_copy.read(); 5420 #if L1_MULTI_CACHE5421 cas_to_cc_send_fifo_cache_id= r_cas_copy_cache.read();5422 #endif5423 5292 if(r_cas_count.read() == 1) // one single copy 5424 5293 { … … 5455 5324 HeapEntry entry = m_heap.read(r_cas_ptr.read()); 5456 5325 cas_to_cc_send_fifo_srcid = entry.owner.srcid; 5457 #if L1_MULTI_CACHE5458 cas_to_cc_send_fifo_cache_id = entry.owner.cache_id;5459 #endif5460 5326 cas_to_cc_send_fifo_inst = entry.owner.inst; 5461 5327 cas_to_cc_send_fifo_put = true; … … 5487 5353 } 5488 5354 ///////////////////// 5489 case CAS_BC_TRT_LOCK: // check the TRT to register a PUT transaction 5490 { 5491 if(r_alloc_trt_fsm.read() == ALLOC_TRT_CAS) 5492 { 5493 if(!r_cas_to_ixr_cmd_req) // we can transfer the request to IXR_CMD FSM 5494 { 5495 // fill the data buffer 5496 size_t word = m_x[(addr_t)(m_cmd_cas_addr_fifo.read())]; 5497 for(size_t i = 0; i<m_words; i++) 5498 { 5499 if(i == word) 5355 case CAS_BC_TRT_LOCK: // get TRT lock to check TRT not full 5356 { 5357 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CAS) and 5358 "MEMC ERROR in CAS_BC_TRT_LOCK state: Bas DIR allocation"); 5359 5360 if(r_alloc_trt_fsm.read() == ALLOC_TRT_CAS) 5361 { 5362 size_t wok_index = 0; 5363 bool wok = !m_trt.full(wok_index); 5364 if( wok ) 5500 5365 { 5501 r_cas_to_ixr_cmd_data[i] = r_cas_wdata.read(); 5502 } 5503 else if((i == word+1) and (r_cas_cpt.read() == 4)) // 64 bit CAS 5504 { 5505 r_cas_to_ixr_cmd_data[i] = m_cmd_cas_wdata_fifo.read(); 5366 r_cas_trt_index = wok_index; 5367 r_cas_fsm = CAS_BC_IVT_LOCK; 5506 5368 } 5507 5369 else 5508 5370 { 5509 r_cas_to_ixr_cmd_data[i] = r_cas_data[i].read();5371 r_cas_fsm = CAS_WAIT; 5510 5372 } 5511 } 5512 size_t wok_index = 0; 5513 bool wok = !m_trt.full(wok_index); 5514 if(wok) 5515 { 5516 r_cas_trt_index = wok_index; 5517 r_cas_fsm = CAS_BC_IVT_LOCK; 5518 } 5519 else 5520 { 5521 r_cas_fsm = CAS_WAIT; 5522 } 5523 } 5524 else 5525 { 5526 r_cas_fsm = CAS_WAIT; 5527 } 5528 } 5529 break; 5373 5374 #if DEBUG_MEMC_CAS 5375 if(m_debug) 5376 std::cout << " <MEMC " << name() << " CAS_BC_TRT_LOCK> Check TRT" 5377 << " : wok = " << wok << " / index = " << wok_index << std::endl; 5378 #endif 5379 } 5380 break; 5530 5381 } 5531 5382 ///////////////////// 5532 case CAS_BC_IVT_LOCK: // register a broadcast inval transaction in IVT 5533 // write data in cache in case of successful registration 5534 { 5535 if(r_alloc_ivt_fsm.read() == ALLOC_IVT_CAS) 5536 { 5537 bool wok = false; 5538 size_t index = 0; 5539 size_t srcid = m_cmd_cas_srcid_fifo.read(); 5540 size_t trdid = m_cmd_cas_trdid_fifo.read(); 5541 size_t pktid = m_cmd_cas_pktid_fifo.read(); 5542 addr_t nline = m_nline[(addr_t)(m_cmd_cas_addr_fifo.read())]; 5543 size_t nb_copies = r_cas_count.read(); 5544 5545 // register a broadcast inval transaction in IVT 5546 wok = m_ivt.set(false, // it's an inval transaction 5547 true, // it's a broadcast 5548 true, // response required 5549 false, // no acknowledge required 5550 srcid, 5551 trdid, 5552 pktid, 5553 nline, 5554 nb_copies, 5555 index); 5556 5557 if(wok) // IVT not full 5558 { 5559 // cache update 5560 size_t way = r_cas_way.read(); 5561 size_t set = m_y[(addr_t)(m_cmd_cas_addr_fifo.read())]; 5562 size_t word = m_x[(addr_t)(m_cmd_cas_addr_fifo.read())]; 5563 5564 m_cache_data.write(way, set, word, r_cas_wdata.read()); 5565 if(r_cas_cpt.read() ==4) 5566 m_cache_data.write(way, set, word+1, m_cmd_cas_wdata_fifo.read()); 5567 5568 // monitor 5569 if(m_monitor_ok) 5570 { 5571 addr_t address = m_cmd_cas_addr_fifo.read(); 5572 check_monitor( address, r_cas_wdata.read(), false); 5573 5574 if(r_cas_cpt.read() ==4) 5575 check_monitor( address+4, m_cmd_cas_wdata_fifo.read(), false); 5576 } 5577 r_cas_upt_index = index; 5578 r_cas_fsm = CAS_BC_DIR_INVAL; 5579 5383 case CAS_BC_IVT_LOCK: // get IVT lock and register BC transaction in IVT 5384 { 5385 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CAS) and 5386 "MEMC ERROR in CAS_BC_IVT_LOCK state: Bas DIR allocation"); 5387 5388 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_CAS) and 5389 "MEMC ERROR in CAS_BC_IVT_LOCK state: Bas TRT allocation"); 5390 5391 if( r_alloc_ivt_fsm.read() == ALLOC_IVT_CAS ) 5392 { 5393 // register broadcast inval transaction in IVT 5394 bool wok = false; 5395 size_t index = 0; 5396 size_t srcid = m_cmd_cas_srcid_fifo.read(); 5397 size_t trdid = m_cmd_cas_trdid_fifo.read(); 5398 size_t pktid = m_cmd_cas_pktid_fifo.read(); 5399 addr_t nline = m_nline[(addr_t)(m_cmd_cas_addr_fifo.read())]; 5400 size_t nb_copies = r_cas_count.read(); 5401 5402 wok = m_ivt.set( false, // it's an inval transaction 5403 true, // it's a broadcast 5404 true, // response required 5405 false, // no acknowledge required 5406 srcid, 5407 trdid, 5408 pktid, 5409 nline, 5410 nb_copies, 5411 index); 5580 5412 #if DEBUG_MEMC_CAS 5581 if(m_debug) 5582 std::cout << " <MEMC " << name() 5583 << " CAS_BC_IVT_LOCK> Register a broadcast inval transaction in IVT" 5584 << " / nline = " << std::hex << nline 5585 << " / count = " << std::dec << nb_copies 5586 << " / ivt_index = " << index << std::endl; 5587 #endif 5588 } 5589 else // releases the lock protecting IVT 5590 { 5591 r_cas_fsm = CAS_WAIT; 5592 } 5593 } 5594 break; 5413 if( m_debug and wok ) 5414 std::cout << " <MEMC " << name() << " CAS_BC_IVT_LOCK> Register broadcast inval in IVT" 5415 << " / copies = " << r_cas_count.read() << std::endl; 5416 #endif 5417 r_cas_upt_index = index; 5418 if( wok ) r_cas_fsm = CAS_BC_DIR_INVAL; 5419 else r_cas_fsm = CAS_WAIT; 5420 } 5421 break; 5595 5422 } 5596 5423 ////////////////////// 5597 case CAS_BC_DIR_INVAL: // Register the PUT transaction in TRT, and inval the DIR entry 5598 { 5599 if((r_alloc_trt_fsm.read() == ALLOC_TRT_CAS) and 5600 (r_alloc_ivt_fsm.read() == ALLOC_IVT_CAS) and 5601 (r_alloc_dir_fsm.read() == ALLOC_DIR_CAS)) 5602 { 5424 case CAS_BC_DIR_INVAL: // Register PUT transaction in TRT, 5425 // and inval the DIR entry 5426 { 5427 assert( (r_alloc_dir_fsm.read() == ALLOC_DIR_CAS) and 5428 "MEMC ERROR in CAS_BC_DIR_INVAL state: Bad DIR allocation"); 5429 5430 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_CAS) and 5431 "MEMC ERROR in CAS_BC_DIR_INVAL state: Bad TRT allocation"); 5432 5433 assert( (r_alloc_ivt_fsm.read() == ALLOC_IVT_CAS) and 5434 "MEMC ERROR in CAS_BC_DIR_INVAL state: Bad IVT allocation"); 5435 5603 5436 // set TRT 5604 m_trt.set(r_cas_trt_index.read(), 5605 false, // PUT request to XRAM 5606 m_nline[(addr_t)(m_cmd_cas_addr_fifo.read())], 5607 0, 5608 0, 5609 0, 5610 false, // not a processor read 5611 0, 5612 0, 5613 std::vector<be_t> (m_words,0), 5614 std::vector<data_t> (m_words,0)); 5437 std::vector<data_t> data_vector; 5438 data_vector.clear(); 5439 size_t word = m_x[(addr_t)(m_cmd_cas_addr_fifo.read())]; 5440 for(size_t i=0; i<m_words; i++) 5441 { 5442 if(i == word) // first modified word 5443 data_vector.push_back( r_cas_wdata.read() ); 5444 else if((i == word+1) and (r_cas_cpt.read() == 4)) // second modified word 5445 data_vector.push_back( m_cmd_cas_wdata_fifo.read() ); 5446 else // unmodified words 5447 data_vector.push_back( r_cas_data[i].read() ); 5448 } 5449 m_trt.set( r_cas_trt_index.read(), 5450 false, // PUT request 5451 m_nline[(addr_t)(m_cmd_cas_addr_fifo.read())], 5452 0, 5453 0, 5454 0, 5455 false, // not a processor read 5456 0, 5457 0, 5458 std::vector<be_t> (m_words,0), 5459 data_vector ); 5615 5460 5616 5461 // invalidate directory entry … … 5618 5463 entry.valid = false; 5619 5464 entry.dirty = false; 5620 entry.tag = 0;5465 entry.tag = 0; 5621 5466 entry.is_cnt = false; 5622 5467 entry.lock = false; 5623 5468 entry.count = 0; 5624 5469 entry.owner.srcid = 0; 5625 #if L1_MULTI_CACHE5626 entry.owner.cache_id= 0;5627 #endif5628 5470 entry.owner.inst = false; 5629 5471 entry.ptr = 0; 5630 5472 size_t set = m_y[(addr_t)(m_cmd_cas_addr_fifo.read())]; 5631 5473 size_t way = r_cas_way.read(); 5474 5632 5475 m_cache_directory.write(set, way, entry); 5633 5476 … … 5636 5479 #if DEBUG_MEMC_CAS 5637 5480 if(m_debug) 5638 std::cout << " <MEMC " << name() 5639 << " CAS_BC_DIR_INVAL> Register the PUT in TRT and invalidate DIR entry" 5640 << " / nline = " << std::hex << m_nline[(addr_t)(m_cmd_cas_addr_fifo.read())] 5641 << " / set = " << std::dec << set << " / way = " << way << std::endl; 5642 #endif 5643 } 5644 else 5645 { 5646 assert(false and "LOCK ERROR in CAS_FSM, STATE = CAS_BC_DIR_INVAL"); 5647 } 5648 break; 5481 std::cout << " <MEMC " << name() << " CAS_BC_DIR_INVAL> Inval DIR & register in TRT:" 5482 << " address = " << m_cmd_cas_addr_fifo.read() << std::endl; 5483 #endif 5484 break; 5649 5485 } 5650 5486 /////////////////// 5651 5487 case CAS_BC_CC_SEND: // Request the broadcast inval to CC_SEND FSM 5652 5488 { 5653 if(!r_cas_to_cc_send_multi_req.read() and 5654 !r_cas_to_cc_send_brdcast_req.read()) 5655 { 5656 r_cas_to_cc_send_multi_req = false; 5657 r_cas_to_cc_send_brdcast_req = true; 5658 r_cas_to_cc_send_trdid = r_cas_upt_index.read(); 5659 r_cas_to_cc_send_nline = m_nline[(addr_t)(m_cmd_cas_addr_fifo.read())]; 5660 r_cas_to_cc_send_index = 0; 5661 r_cas_to_cc_send_wdata = 0; 5662 5663 r_cas_fsm = CAS_BC_XRAM_REQ; 5664 } 5665 break; 5489 if( not r_cas_to_cc_send_multi_req.read() and 5490 not r_cas_to_cc_send_brdcast_req.read() ) 5491 { 5492 r_cas_to_cc_send_multi_req = false; 5493 r_cas_to_cc_send_brdcast_req = true; 5494 r_cas_to_cc_send_trdid = r_cas_upt_index.read(); 5495 r_cas_to_cc_send_nline = m_nline[(addr_t)(m_cmd_cas_addr_fifo.read())]; 5496 r_cas_to_cc_send_index = 0; 5497 r_cas_to_cc_send_wdata = 0; 5498 5499 r_cas_fsm = CAS_BC_XRAM_REQ; 5500 5501 #if DEBUG_MEMC_CAS 5502 if(m_debug) 5503 std::cout << " <MEMC " << name() 5504 << " CAS_BC_CC_SEND> Post a broadcast request to CC_SEND FSM" << std::endl; 5505 #endif 5506 } 5507 break; 5666 5508 } 5667 5509 //////////////////// 5668 case CAS_BC_XRAM_REQ: // request the IXR FSM to start a put transaction 5669 { 5670 if(!r_cas_to_ixr_cmd_req) 5671 { 5672 r_cas_to_ixr_cmd_req = true; 5673 r_cas_to_ixr_cmd_write = true; 5674 r_cas_to_ixr_cmd_nline = m_nline[(addr_t)(m_cmd_cas_addr_fifo.read())]; 5675 r_cas_to_ixr_cmd_trdid = r_cas_trt_index.read(); 5676 r_cas_fsm = CAS_IDLE; 5677 cmd_cas_fifo_get = true; 5678 r_cas_cpt = 0; 5510 case CAS_BC_XRAM_REQ: // request the IXR FSM to start a PUT transaction 5511 { 5512 if( not r_cas_to_ixr_cmd_req.read() ) 5513 { 5514 r_cas_to_ixr_cmd_req = true; 5515 r_cas_to_ixr_cmd_put = true; 5516 r_cas_to_ixr_cmd_index = r_cas_trt_index.read(); 5517 r_cas_fsm = CAS_IDLE; 5518 cmd_cas_fifo_get = true; 5519 r_cas_cpt = 0; 5679 5520 5680 5521 #if DEBUG_MEMC_CAS … … 5682 5523 std::cout << " <MEMC " << name() 5683 5524 << " CAS_BC_XRAM_REQ> Request a PUT transaction to IXR_CMD FSM" << std::hex 5684 << " / nline = " << m_nline[(addr_t) m_cmd_cas_addr_fifo.read()]5525 << " / address = " << (addr_t) m_cmd_cas_addr_fifo.read() 5685 5526 << " / trt_index = " << r_cas_trt_index.read() << std::endl; 5686 5527 #endif 5687 } 5688 else 5689 { 5690 std::cout << "ERROR in MEM_CACHE / CAS_BC_XRAM_REQ state" 5691 << " : request should not have been previously set" << std::endl; 5692 } 5693 break; 5528 } 5529 break; 5694 5530 } 5695 5531 ///////////////// 5696 5532 case CAS_RSP_FAIL: // request TGT_RSP FSM to send a failure response 5697 5533 { 5698 if(!r_cas_to_tgt_rsp_req)5699 {5700 cmd_cas_fifo_get= true;5701 r_cas_cpt = 0;5702 r_cas_to_tgt_rsp_req= true;5703 r_cas_to_tgt_rsp_data = 1;5704 r_cas_to_tgt_rsp_srcid = m_cmd_cas_srcid_fifo.read();5705 r_cas_to_tgt_rsp_trdid = m_cmd_cas_trdid_fifo.read();5706 r_cas_to_tgt_rsp_pktid = m_cmd_cas_pktid_fifo.read();5707 r_cas_fsm = CAS_IDLE;5534 if( not r_cas_to_tgt_rsp_req.read() ) 5535 { 5536 cmd_cas_fifo_get = true; 5537 r_cas_cpt = 0; 5538 r_cas_to_tgt_rsp_req = true; 5539 r_cas_to_tgt_rsp_data = 1; 5540 r_cas_to_tgt_rsp_srcid = m_cmd_cas_srcid_fifo.read(); 5541 r_cas_to_tgt_rsp_trdid = m_cmd_cas_trdid_fifo.read(); 5542 r_cas_to_tgt_rsp_pktid = m_cmd_cas_pktid_fifo.read(); 5543 r_cas_fsm = CAS_IDLE; 5708 5544 5709 5545 #if DEBUG_MEMC_CAS … … 5712 5548 << " CAS_RSP_FAIL> Request TGT_RSP to send a failure response" << std::endl; 5713 5549 #endif 5714 }5715 break;5550 } 5551 break; 5716 5552 } 5717 5553 //////////////////// 5718 5554 case CAS_RSP_SUCCESS: // request TGT_RSP FSM to send a success response 5719 5555 { 5720 if(!r_cas_to_tgt_rsp_req)5721 {5722 cmd_cas_fifo_get = true;5723 r_cas_cpt = 0;5724 r_cas_to_tgt_rsp_req= true;5725 r_cas_to_tgt_rsp_data = 0;5726 r_cas_to_tgt_rsp_srcid = m_cmd_cas_srcid_fifo.read();5727 r_cas_to_tgt_rsp_trdid = m_cmd_cas_trdid_fifo.read();5728 r_cas_to_tgt_rsp_pktid = m_cmd_cas_pktid_fifo.read();5729 r_cas_fsm = CAS_IDLE;5556 if( not r_cas_to_tgt_rsp_req.read() ) 5557 { 5558 cmd_cas_fifo_get = true; 5559 r_cas_cpt = 0; 5560 r_cas_to_tgt_rsp_req = true; 5561 r_cas_to_tgt_rsp_data = 0; 5562 r_cas_to_tgt_rsp_srcid = m_cmd_cas_srcid_fifo.read(); 5563 r_cas_to_tgt_rsp_trdid = m_cmd_cas_trdid_fifo.read(); 5564 r_cas_to_tgt_rsp_pktid = m_cmd_cas_pktid_fifo.read(); 5565 r_cas_fsm = CAS_IDLE; 5730 5566 5731 5567 #if DEBUG_MEMC_CAS … … 5734 5570 << " CAS_RSP_SUCCESS> Request TGT_RSP to send a success response" << std::endl; 5735 5571 #endif 5736 }5737 break;5738 } 5739 ///////////////////// 5572 } 5573 break; 5574 } 5575 /////////////////////// 5740 5576 case CAS_MISS_TRT_LOCK: // cache miss : request access to transaction Table 5741 5577 { 5742 if(r_alloc_trt_fsm.read() == ALLOC_TRT_CAS)5743 {5744 size_t index = 0;5745 bool hit_read= m_trt.hit_read(5746 m_nline[(addr_t) m_cmd_cas_addr_fifo.read()],index);5747 bool hit_write = m_trt.hit_write(5748 m_nline[(addr_t) m_cmd_cas_addr_fifo.read()]);5749 bool wok = !m_trt.full(index);5578 if(r_alloc_trt_fsm.read() == ALLOC_TRT_CAS) 5579 { 5580 size_t index = 0; 5581 bool hit_read = m_trt.hit_read( 5582 m_nline[(addr_t) m_cmd_cas_addr_fifo.read()],index); 5583 bool hit_write = m_trt.hit_write( 5584 m_nline[(addr_t) m_cmd_cas_addr_fifo.read()]); 5585 bool wok = not m_trt.full(index); 5750 5586 5751 5587 #if DEBUG_MEMC_CAS 5752 if(m_debug) 5753 { 5754 std::cout << " <MEMC " << name() << " CAS_MISS_TRT_LOCK> Check TRT state" 5755 << " / hit_read = " << hit_read 5756 << " / hit_write = " << hit_write 5757 << " / wok = " << wok 5758 << " / index = " << index << std::endl; 5759 } 5760 #endif 5761 5762 if(hit_read or !wok or hit_write) // missing line already requested or no space in TRT 5763 { 5764 r_cas_fsm = CAS_WAIT; 5765 } 5766 else 5767 { 5768 r_cas_trt_index = index; 5769 r_cas_fsm = CAS_MISS_TRT_SET; 5770 } 5771 } 5772 break; 5773 } 5774 //////////////////// 5588 if(m_debug) 5589 std::cout << " <MEMC " << name() << " CAS_MISS_TRT_LOCK> Check TRT state" 5590 << " / hit_read = " << hit_read 5591 << " / hit_write = " << hit_write 5592 << " / wok = " << wok 5593 << " / index = " << index << std::endl; 5594 #endif 5595 5596 if(hit_read or !wok or hit_write) // missing line already requested or TRT full 5597 { 5598 r_cas_fsm = CAS_WAIT; 5599 } 5600 else 5601 { 5602 r_cas_trt_index = index; 5603 r_cas_fsm = CAS_MISS_TRT_SET; 5604 } 5605 } 5606 break; 5607 } 5608 ////////////////////// 5775 5609 case CAS_MISS_TRT_SET: // register the GET transaction in TRT 5776 5610 { 5777 if(r_alloc_trt_fsm.read() == ALLOC_TRT_CAS) 5778 { 5611 assert( (r_alloc_trt_fsm.read() == ALLOC_TRT_CAS) and 5612 "MEMC ERROR in CAS_MISS_TRT_SET state: Bad TRT allocation"); 5613 5779 5614 std::vector<be_t> be_vector; 5780 5615 std::vector<data_t> data_vector; … … 5787 5622 } 5788 5623 5789 m_trt.set(r_cas_trt_index.read(), 5790 true, // read request 5791 m_nline[(addr_t) m_cmd_cas_addr_fifo.read()], 5792 m_cmd_cas_srcid_fifo.read(), 5793 m_cmd_cas_trdid_fifo.read(), 5794 m_cmd_cas_pktid_fifo.read(), 5795 false, // write request from processor 5796 0, 5797 0, 5798 be_vector, 5799 data_vector); 5624 m_trt.set( r_cas_trt_index.read(), 5625 true, // GET 5626 m_nline[(addr_t) m_cmd_cas_addr_fifo.read()], 5627 m_cmd_cas_srcid_fifo.read(), 5628 m_cmd_cas_trdid_fifo.read(), 5629 m_cmd_cas_pktid_fifo.read(), 5630 false, // write request from processor 5631 0, 5632 0, 5633 std::vector<be_t>(m_words,0), 5634 std::vector<data_t>(m_words,0) ); 5635 5800 5636 r_cas_fsm = CAS_MISS_XRAM_REQ; 5801 5637 5802 5638 #if DEBUG_MEMC_CAS 5803 if(m_debug) 5804 { 5805 std::cout << " <MEMC " << name() << " CAS_MISS_TRT_SET> Register a GET transaction in TRT" << std::hex 5806 << " / nline = " << m_nline[(addr_t) m_cmd_cas_addr_fifo.read()] 5807 << " / trt_index = " << r_cas_trt_index.read() << std::endl; 5808 } 5809 #endif 5810 } 5811 break; 5639 if(m_debug) 5640 std::cout << " <MEMC " << name() << " CAS_MISS_TRT_SET> Register GET transaction in TRT" 5641 << " / address = " << std::hex << (addr_t)m_cmd_cas_addr_fifo.read() 5642 << " / trt_index = " << std::dec << r_cas_trt_index.read() << std::endl; 5643 #endif 5644 break; 5812 5645 } 5813 5646 ////////////////////// 5814 case CAS_MISS_XRAM_REQ: // request the IXR_CMD FSM to fetch the missing line 5815 { 5816 if(!r_cas_to_ixr_cmd_req) 5817 { 5818 r_cas_to_ixr_cmd_req = true; 5819 r_cas_to_ixr_cmd_write = false; 5820 r_cas_to_ixr_cmd_trdid = r_cas_trt_index.read(); 5821 r_cas_to_ixr_cmd_nline = m_nline[(addr_t) m_cmd_cas_addr_fifo.read()]; 5822 r_cas_fsm = CAS_WAIT; 5647 case CAS_MISS_XRAM_REQ: // request the IXR_CMD FSM a GET request 5648 { 5649 if( not r_cas_to_ixr_cmd_req.read() ) 5650 { 5651 r_cas_to_ixr_cmd_req = true; 5652 r_cas_to_ixr_cmd_put = false; 5653 r_cas_to_ixr_cmd_index = r_cas_trt_index.read(); 5654 r_cas_fsm = CAS_WAIT; 5823 5655 5824 5656 #if DEBUG_MEMC_CAS 5825 if(m_debug) 5826 { 5827 std::cout << " <MEMC " << name() << " CAS_MISS_XRAM_REQ> Request a GET transaction to IXR_CMD FSM" << std::hex 5828 << " / nline = " << m_nline[(addr_t) m_cmd_cas_addr_fifo.read()] 5829 << " / trt_index = " << r_cas_trt_index.read() << std::endl; 5830 } 5831 #endif 5832 } 5833 break; 5657 if(m_debug) 5658 std::cout << " <MEMC " << name() << " CAS_MISS_XRAM_REQ> Request a GET transaction" 5659 << " / address = " << std::hex << (addr_t) m_cmd_cas_addr_fifo.read() 5660 << " / trt_index = " << std::dec << r_cas_trt_index.read() << std::endl; 5661 #endif 5662 } 5663 break; 5834 5664 } 5835 5665 } // end switch r_cas_fsm … … 5864 5694 /////////////////////////////////////////////////////////////////////////////// 5865 5695 5696 //std::cout << std::endl << "cc_send_fsm" << std::endl; 5697 5866 5698 switch(r_cc_send_fsm.read()) 5867 5699 { … … 6264 6096 if(m_debug) 6265 6097 std::cout << " <MEMC " << name() 6266 << " CC_SEND_WRITE_UPDT_NLINE> Multicast-Update for line"6267 << r_write_to_cc_send_nline.read() << std::endl;6098 << " CC_SEND_WRITE_UPDT_NLINE> Multicast-Update for address " 6099 << r_write_to_cc_send_nline.read()*m_words*4 << std::endl; 6268 6100 #endif 6269 6101 break; 6270 6102 } 6271 6103 ///////////////////////////// 6272 case CC_SEND_WRITE_UPDT_DATA: // send N data flits for amulti-update (from WRITE FSM)6104 case CC_SEND_WRITE_UPDT_DATA: // send data flits for multi-update (from WRITE FSM) 6273 6105 { 6274 6106 if(not p_dspin_m2p.read) break; … … 6302 6134 if(m_debug) 6303 6135 std::cout << " <MEMC " << name() 6304 << " CC_SEND_CAS_BRDCAST_NLINE> Broadcast-Inval for line"6305 << r_cas_to_cc_send_nline.read() << std::endl;6136 << " CC_SEND_CAS_BRDCAST_NLINE> Broadcast-Inval for address: " 6137 << r_cas_to_cc_send_nline.read()*m_words*4 << std::endl; 6306 6138 #endif 6307 6139 break; … … 6340 6172 if(m_debug) 6341 6173 std::cout << " <MEMC " << name() 6342 << " CC_SEND_CAS_UPDT_NLINE> Multicast-Update for line"6343 << r_cas_to_cc_send_nline.read() << std::endl;6174 << " CC_SEND_CAS_UPDT_NLINE> Multicast-Update for address " 6175 << r_cas_to_cc_send_nline.read()*m_words*4 << std::endl; 6344 6176 #endif 6345 6177 break; … … 6361 6193 } 6362 6194 //////////////////////////////// 6363 case CC_SEND_CAS_UPDT_DATA_HIGH: // send second data for amulti-update (from CAS FSM)6195 case CC_SEND_CAS_UPDT_DATA_HIGH: // send second data for multi-update (from CAS FSM) 6364 6196 { 6365 6197 if(not p_dspin_m2p.read) break; … … 6378 6210 ////////////////////////////////////////////////////////////////////////////// 6379 6211 6212 //std::cout << std::endl << "cc_receive_fsm" << std::endl; 6213 6380 6214 switch(r_cc_receive_fsm.read()) 6381 6215 { … … 6462 6296 } 6463 6297 } 6298 6464 6299 ////////////////////////////////////////////////////////////////////////// 6465 6300 // TGT_RSP FSM … … 6480 6315 ////////////////////////////////////////////////////////////////////////// 6481 6316 6317 //std::cout << std::endl << "tgt_rsp_fsm" << std::endl; 6318 6482 6319 switch(r_tgt_rsp_fsm.read()) 6483 6320 { … … 6675 6512 } 6676 6513 ///////////////////// 6677 case TGT_RSP_TGT_CMD: // send the response for a segmentation violation6514 case TGT_RSP_TGT_CMD: // send the response for a configuration access 6678 6515 { 6679 6516 if ( p_vci_tgt.rspack ) … … 6687 6524 std::cout 6688 6525 << " <MEMC " << name() 6689 << " TGT_RSP_TGT_CMD> Se gmentation violation response"6526 << " TGT_RSP_TGT_CMD> Send response for a configuration access" 6690 6527 << " / rsrcid = " << std::hex << r_tgt_cmd_to_tgt_rsp_srcid.read() 6691 6528 << " / rtrdid = " << r_tgt_cmd_to_tgt_rsp_trdid.read() 6692 6529 << " / rpktid = " << r_tgt_cmd_to_tgt_rsp_pktid.read() 6530 << " / error = " << r_tgt_cmd_to_tgt_rsp_error.read() 6693 6531 << std::endl; 6694 6532 } … … 6871 6709 // The resource is always allocated. 6872 6710 ///////////////////////////////////////////////////////////////////////////////////// 6711 6712 //std::cout << std::endl << "alloc_upt_fsm" << std::endl; 6713 6873 6714 switch(r_alloc_upt_fsm.read()) 6874 6715 { … … 6926 6767 // The resource is always allocated. 6927 6768 ///////////////////////////////////////////////////////////////////////////////////// 6769 6770 //std::cout << std::endl << "alloc_ivt_fsm" << std::endl; 6771 6928 6772 switch(r_alloc_ivt_fsm.read()) 6929 6773 { 6930 ///////////////////// /////6774 ///////////////////// 6931 6775 case ALLOC_IVT_WRITE: // allocated to WRITE FSM 6932 6776 if (r_write_fsm.read() != WRITE_BC_IVT_LOCK) 6933 6777 { 6934 if (r_xram_rsp_fsm.read() == XRAM_RSP_I NVAL_LOCK)6778 if (r_xram_rsp_fsm.read() == XRAM_RSP_IVT_LOCK) 6935 6779 r_alloc_ivt_fsm = ALLOC_IVT_XRAM_RSP; 6936 6780 … … 6941 6785 r_alloc_ivt_fsm = ALLOC_IVT_CAS; 6942 6786 6943 else if (r_config_fsm.read() == CONFIG_ DIR_IVT_LOCK)6787 else if (r_config_fsm.read() == CONFIG_IVT_LOCK) 6944 6788 r_alloc_ivt_fsm = ALLOC_IVT_CONFIG; 6945 6789 } 6946 6790 break; 6947 6791 6948 //////////////////////// //6792 //////////////////////// 6949 6793 case ALLOC_IVT_XRAM_RSP: // allocated to XRAM_RSP FSM 6950 if(r_xram_rsp_fsm.read() != XRAM_RSP_I NVAL_LOCK)6794 if(r_xram_rsp_fsm.read() != XRAM_RSP_IVT_LOCK) 6951 6795 { 6952 6796 if(r_cleanup_fsm.read() == CLEANUP_IVT_LOCK) … … 6956 6800 r_alloc_ivt_fsm = ALLOC_IVT_CAS; 6957 6801 6958 else if (r_config_fsm.read() == CONFIG_ DIR_IVT_LOCK)6802 else if (r_config_fsm.read() == CONFIG_IVT_LOCK) 6959 6803 r_alloc_ivt_fsm = ALLOC_IVT_CONFIG; 6960 6804 … … 6964 6808 break; 6965 6809 6966 /////////////////////// ///6810 /////////////////////// 6967 6811 case ALLOC_IVT_CLEANUP: // allocated to CLEANUP FSM 6968 6812 if ((r_cleanup_fsm.read() != CLEANUP_IVT_LOCK ) and … … 6972 6816 r_alloc_ivt_fsm = ALLOC_IVT_CAS; 6973 6817 6974 else if (r_config_fsm.read() == CONFIG_ DIR_IVT_LOCK)6818 else if (r_config_fsm.read() == CONFIG_IVT_LOCK) 6975 6819 r_alloc_ivt_fsm = ALLOC_IVT_CONFIG; 6976 6820 … … 6978 6822 r_alloc_ivt_fsm = ALLOC_IVT_WRITE; 6979 6823 6980 else if (r_xram_rsp_fsm.read() == XRAM_RSP_I NVAL_LOCK)6824 else if (r_xram_rsp_fsm.read() == XRAM_RSP_IVT_LOCK) 6981 6825 r_alloc_ivt_fsm = ALLOC_IVT_XRAM_RSP; 6982 6826 } … … 6987 6831 if (r_cas_fsm.read() != CAS_BC_IVT_LOCK) 6988 6832 { 6989 if (r_config_fsm.read() == CONFIG_ DIR_IVT_LOCK)6833 if (r_config_fsm.read() == CONFIG_IVT_LOCK) 6990 6834 r_alloc_ivt_fsm = ALLOC_IVT_CONFIG; 6991 6835 … … 6993 6837 r_alloc_ivt_fsm = ALLOC_IVT_WRITE; 6994 6838 6995 else if (r_xram_rsp_fsm.read() == XRAM_RSP_I NVAL_LOCK)6839 else if (r_xram_rsp_fsm.read() == XRAM_RSP_IVT_LOCK) 6996 6840 r_alloc_ivt_fsm = ALLOC_IVT_XRAM_RSP; 6997 6841 … … 7003 6847 ////////////////////////// 7004 6848 case ALLOC_IVT_CONFIG: // allocated to CONFIG FSM 7005 if (r_config_fsm.read() != CONFIG_ DIR_IVT_LOCK)6849 if (r_config_fsm.read() != CONFIG_IVT_LOCK) 7006 6850 { 7007 6851 if (r_write_fsm.read() == WRITE_BC_IVT_LOCK) 7008 6852 r_alloc_ivt_fsm = ALLOC_IVT_WRITE; 7009 6853 7010 else if (r_xram_rsp_fsm.read() == XRAM_RSP_I NVAL_LOCK)6854 else if (r_xram_rsp_fsm.read() == XRAM_RSP_IVT_LOCK) 7011 6855 r_alloc_ivt_fsm = ALLOC_IVT_XRAM_RSP; 7012 6856 … … 7030 6874 ///////////////////////////////////////////////////////////////////////////////////// 7031 6875 6876 //std::cout << std::endl << "alloc_dir_fsm" << std::endl; 6877 7032 6878 switch(r_alloc_dir_fsm.read()) 7033 6879 { … … 7049 6895 if ( (r_config_fsm.read() != CONFIG_DIR_REQ) and 7050 6896 (r_config_fsm.read() != CONFIG_DIR_ACCESS) and 7051 (r_config_fsm.read() != CONFIG_DIR_IVT_LOCK) ) 6897 (r_config_fsm.read() != CONFIG_TRT_LOCK) and 6898 (r_config_fsm.read() != CONFIG_TRT_SET) and 6899 (r_config_fsm.read() != CONFIG_IVT_LOCK) ) 7052 6900 { 7053 6901 if(r_read_fsm.read() == READ_DIR_REQ) … … 7099 6947 if(((r_write_fsm.read() != WRITE_DIR_REQ) and 7100 6948 (r_write_fsm.read() != WRITE_DIR_LOCK) and 7101 (r_write_fsm.read() != WRITE_ DIR_READ) and6949 (r_write_fsm.read() != WRITE_BC_DIR_READ) and 7102 6950 (r_write_fsm.read() != WRITE_DIR_HIT) and 7103 6951 (r_write_fsm.read() != WRITE_BC_TRT_LOCK) and … … 7194 7042 if( (r_xram_rsp_fsm.read() != XRAM_RSP_DIR_LOCK) and 7195 7043 (r_xram_rsp_fsm.read() != XRAM_RSP_TRT_COPY) and 7196 (r_xram_rsp_fsm.read() != XRAM_RSP_I NVAL_LOCK))7044 (r_xram_rsp_fsm.read() != XRAM_RSP_IVT_LOCK)) 7197 7045 { 7198 7046 if(r_config_fsm.read() == CONFIG_DIR_REQ) … … 7219 7067 //////////////////////////////////////////////////////////////////////////////////// 7220 7068 // The ALLOC_TRT fsm allocates the access to the Transaction Table (write buffer) 7221 // with a round robin priority between 4user FSMs :7222 // The cyclic priority is READ > WRITE > CAS > XRAM_RSP7069 // with a round robin priority between 7 user FSMs : 7070 // The priority is READ > WRITE > CAS > IXR_CMD > XRAM_RSP > IXR_RSP > CONFIG 7223 7071 // The ressource is always allocated. 7224 7072 /////////////////////////////////////////////////////////////////////////////////// 7225 7073 7074 //std::cout << std::endl << "alloc_trt_fsm" << std::endl; 7075 7226 7076 switch(r_alloc_trt_fsm.read()) 7227 7077 { 7228 //////////////////// 7229 case ALLOC_TRT_READ: 7230 if(r_read_fsm.read() != READ_TRT_LOCK) 7231 { 7232 if((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) or 7233 (r_write_fsm.read() == WRITE_BC_TRT_LOCK)) 7234 r_alloc_trt_fsm = ALLOC_TRT_WRITE; 7235 7236 else if((r_cas_fsm.read() == CAS_MISS_TRT_LOCK) or 7237 (r_cas_fsm.read() == CAS_BC_TRT_LOCK)) 7238 r_alloc_trt_fsm = ALLOC_TRT_CAS; 7239 7240 else if((r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) and 7241 (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP)) 7242 r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; 7243 7244 else if((r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) or 7245 (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ)) 7246 r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; 7247 } 7248 break; 7249 7250 ///////////////////// 7251 case ALLOC_TRT_WRITE: 7252 if((r_write_fsm.read() != WRITE_MISS_TRT_LOCK) and 7253 (r_write_fsm.read() != WRITE_BC_TRT_LOCK) and 7254 (r_write_fsm.read() != WRITE_BC_IVT_LOCK)) 7255 { 7256 if((r_cas_fsm.read() == CAS_MISS_TRT_LOCK) or 7257 (r_cas_fsm.read() == CAS_BC_TRT_LOCK)) 7258 r_alloc_trt_fsm = ALLOC_TRT_CAS; 7259 7260 else if((r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) and 7261 (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP)) 7262 r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; 7263 7264 else if((r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) or 7265 (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ)) 7266 r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; 7267 7268 else if(r_read_fsm.read() == READ_TRT_LOCK) 7269 r_alloc_trt_fsm = ALLOC_TRT_READ; 7270 } 7271 break; 7272 7273 //////////////////// 7274 case ALLOC_TRT_CAS: 7275 if((r_cas_fsm.read() != CAS_MISS_TRT_LOCK) and 7276 (r_cas_fsm.read() != CAS_BC_TRT_LOCK) and 7277 (r_cas_fsm.read() != CAS_BC_IVT_LOCK)) 7278 { 7279 if((r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) and 7280 (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP)) 7281 r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; 7282 7283 else if((r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) or 7284 (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ)) 7285 r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; 7286 7287 else if(r_read_fsm.read() == READ_TRT_LOCK) 7288 r_alloc_trt_fsm = ALLOC_TRT_READ; 7289 7290 else if((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) or 7291 (r_write_fsm.read() == WRITE_BC_TRT_LOCK)) 7292 r_alloc_trt_fsm = ALLOC_TRT_WRITE; 7293 } 7294 break; 7295 7296 //////////////////////// 7297 case ALLOC_TRT_XRAM_RSP: 7298 if(((r_xram_rsp_fsm.read() != XRAM_RSP_DIR_LOCK) or 7299 (r_alloc_dir_fsm.read() != ALLOC_DIR_XRAM_RSP)) and 7300 (r_xram_rsp_fsm.read() != XRAM_RSP_TRT_COPY) and 7301 (r_xram_rsp_fsm.read() != XRAM_RSP_DIR_UPDT) and 7302 (r_xram_rsp_fsm.read() != XRAM_RSP_INVAL_LOCK)) 7303 { 7304 if((r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) or 7305 (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ)) 7306 r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; 7307 7308 else if(r_read_fsm.read() == READ_TRT_LOCK) 7309 r_alloc_trt_fsm = ALLOC_TRT_READ; 7310 7311 else if((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) or 7312 (r_write_fsm.read() == WRITE_BC_TRT_LOCK)) 7313 r_alloc_trt_fsm = ALLOC_TRT_WRITE; 7314 7315 else if((r_cas_fsm.read() == CAS_MISS_TRT_LOCK) or 7316 (r_cas_fsm.read() == CAS_BC_TRT_LOCK)) 7317 r_alloc_trt_fsm = ALLOC_TRT_CAS; 7318 } 7319 break; 7320 7321 //////////////////////// 7322 case ALLOC_TRT_IXR_RSP: 7323 if((r_ixr_rsp_fsm.read() != IXR_RSP_TRT_ERASE) and 7324 (r_ixr_rsp_fsm.read() != IXR_RSP_TRT_READ)) 7325 { 7326 if(r_read_fsm.read() == READ_TRT_LOCK) 7327 r_alloc_trt_fsm = ALLOC_TRT_READ; 7328 7329 else if((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) or 7330 (r_write_fsm.read() == WRITE_BC_TRT_LOCK)) 7331 r_alloc_trt_fsm = ALLOC_TRT_WRITE; 7332 7333 else if((r_cas_fsm.read() == CAS_MISS_TRT_LOCK) or 7334 (r_cas_fsm.read() == CAS_BC_TRT_LOCK)) 7335 r_alloc_trt_fsm = ALLOC_TRT_CAS; 7336 7337 else if((r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) and 7338 (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP)) 7339 r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; 7340 } 7341 break; 7078 //////////////////// 7079 case ALLOC_TRT_READ: 7080 if(r_read_fsm.read() != READ_TRT_LOCK) 7081 { 7082 if((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) or 7083 (r_write_fsm.read() == WRITE_BC_TRT_LOCK)) 7084 r_alloc_trt_fsm = ALLOC_TRT_WRITE; 7085 7086 else if((r_cas_fsm.read() == CAS_MISS_TRT_LOCK) or 7087 (r_cas_fsm.read() == CAS_BC_TRT_LOCK)) 7088 r_alloc_trt_fsm = ALLOC_TRT_CAS; 7089 7090 else if((r_ixr_cmd_fsm.read() == IXR_CMD_READ_TRT) or 7091 (r_ixr_cmd_fsm.read() == IXR_CMD_WRITE_TRT) or 7092 (r_ixr_cmd_fsm.read() == IXR_CMD_CAS_TRT) or 7093 (r_ixr_cmd_fsm.read() == IXR_CMD_XRAM_TRT) or 7094 (r_ixr_cmd_fsm.read() == IXR_CMD_CONFIG_TRT) ) 7095 r_alloc_trt_fsm = ALLOC_TRT_IXR_CMD; 7096 7097 else if((r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) and 7098 (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP)) 7099 r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; 7100 7101 else if((r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) or 7102 (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ)) 7103 r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; 7104 7105 else if( r_config_fsm.read() == CONFIG_TRT_LOCK ) 7106 r_alloc_trt_fsm = ALLOC_TRT_CONFIG; 7107 } 7108 break; 7109 7110 ///////////////////// 7111 case ALLOC_TRT_WRITE: 7112 if((r_write_fsm.read() != WRITE_MISS_TRT_LOCK) and 7113 (r_write_fsm.read() != WRITE_BC_TRT_LOCK) and 7114 (r_write_fsm.read() != WRITE_BC_IVT_LOCK)) 7115 { 7116 if((r_cas_fsm.read() == CAS_MISS_TRT_LOCK) or 7117 (r_cas_fsm.read() == CAS_BC_TRT_LOCK)) 7118 r_alloc_trt_fsm = ALLOC_TRT_CAS; 7119 7120 else if((r_ixr_cmd_fsm.read() == IXR_CMD_READ_TRT) or 7121 (r_ixr_cmd_fsm.read() == IXR_CMD_WRITE_TRT) or 7122 (r_ixr_cmd_fsm.read() == IXR_CMD_CAS_TRT) or 7123 (r_ixr_cmd_fsm.read() == IXR_CMD_XRAM_TRT) or 7124 (r_ixr_cmd_fsm.read() == IXR_CMD_CONFIG_TRT) ) 7125 r_alloc_trt_fsm = ALLOC_TRT_IXR_CMD; 7126 7127 else if((r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) and 7128 (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP)) 7129 r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; 7130 7131 else if((r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) or 7132 (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ)) 7133 r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; 7134 7135 else if( r_config_fsm.read() == CONFIG_TRT_LOCK ) 7136 r_alloc_trt_fsm = ALLOC_TRT_CONFIG; 7137 7138 else if(r_read_fsm.read() == READ_TRT_LOCK) 7139 r_alloc_trt_fsm = ALLOC_TRT_READ; 7140 } 7141 break; 7142 7143 /////////////////// 7144 case ALLOC_TRT_CAS: 7145 if((r_cas_fsm.read() != CAS_MISS_TRT_LOCK) and 7146 (r_cas_fsm.read() != CAS_BC_TRT_LOCK) and 7147 (r_cas_fsm.read() != CAS_BC_IVT_LOCK)) 7148 { 7149 if((r_ixr_cmd_fsm.read() == IXR_CMD_READ_TRT) or 7150 (r_ixr_cmd_fsm.read() == IXR_CMD_WRITE_TRT) or 7151 (r_ixr_cmd_fsm.read() == IXR_CMD_CAS_TRT) or 7152 (r_ixr_cmd_fsm.read() == IXR_CMD_XRAM_TRT) or 7153 (r_ixr_cmd_fsm.read() == IXR_CMD_CONFIG_TRT) ) 7154 r_alloc_trt_fsm = ALLOC_TRT_IXR_CMD; 7155 7156 if((r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) and 7157 (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP)) 7158 r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; 7159 7160 else if((r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) or 7161 (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ)) 7162 r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; 7163 7164 else if( r_config_fsm.read() == CONFIG_TRT_LOCK ) 7165 r_alloc_trt_fsm = ALLOC_TRT_CONFIG; 7166 7167 else if(r_read_fsm.read() == READ_TRT_LOCK) 7168 r_alloc_trt_fsm = ALLOC_TRT_READ; 7169 7170 else if((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) or 7171 (r_write_fsm.read() == WRITE_BC_TRT_LOCK)) 7172 r_alloc_trt_fsm = ALLOC_TRT_WRITE; 7173 } 7174 break; 7175 7176 /////////////////////// 7177 case ALLOC_TRT_IXR_CMD: 7178 if((r_ixr_cmd_fsm.read() != IXR_CMD_READ_TRT) and 7179 (r_ixr_cmd_fsm.read() != IXR_CMD_WRITE_TRT) and 7180 (r_ixr_cmd_fsm.read() != IXR_CMD_CAS_TRT) and 7181 (r_ixr_cmd_fsm.read() != IXR_CMD_XRAM_TRT) and 7182 (r_ixr_cmd_fsm.read() != IXR_CMD_CONFIG_TRT)) 7183 { 7184 if((r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) and 7185 (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP)) 7186 r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; 7187 7188 else if((r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) or 7189 (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ)) 7190 r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; 7191 7192 else if( r_config_fsm.read() == CONFIG_TRT_LOCK ) 7193 r_alloc_trt_fsm = ALLOC_TRT_CONFIG; 7194 7195 else if(r_read_fsm.read() == READ_TRT_LOCK) 7196 r_alloc_trt_fsm = ALLOC_TRT_READ; 7197 7198 else if((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) or 7199 (r_write_fsm.read() == WRITE_BC_TRT_LOCK)) 7200 r_alloc_trt_fsm = ALLOC_TRT_WRITE; 7201 7202 else if((r_cas_fsm.read() == CAS_MISS_TRT_LOCK) or 7203 (r_cas_fsm.read() == CAS_BC_TRT_LOCK)) 7204 r_alloc_trt_fsm = ALLOC_TRT_CAS; 7205 } 7206 break; 7207 7208 //////////////////////// 7209 case ALLOC_TRT_XRAM_RSP: 7210 if(((r_xram_rsp_fsm.read() != XRAM_RSP_DIR_LOCK) or 7211 (r_alloc_dir_fsm.read() != ALLOC_DIR_XRAM_RSP)) and 7212 (r_xram_rsp_fsm.read() != XRAM_RSP_TRT_COPY) and 7213 (r_xram_rsp_fsm.read() != XRAM_RSP_DIR_UPDT) and 7214 (r_xram_rsp_fsm.read() != XRAM_RSP_IVT_LOCK)) 7215 { 7216 if((r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) or 7217 (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ)) 7218 r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; 7219 7220 else if( r_config_fsm.read() == CONFIG_TRT_LOCK ) 7221 r_alloc_trt_fsm = ALLOC_TRT_CONFIG; 7222 7223 else if(r_read_fsm.read() == READ_TRT_LOCK) 7224 r_alloc_trt_fsm = ALLOC_TRT_READ; 7225 7226 else if((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) or 7227 (r_write_fsm.read() == WRITE_BC_TRT_LOCK)) 7228 r_alloc_trt_fsm = ALLOC_TRT_WRITE; 7229 7230 else if((r_cas_fsm.read() == CAS_MISS_TRT_LOCK) or 7231 (r_cas_fsm.read() == CAS_BC_TRT_LOCK)) 7232 r_alloc_trt_fsm = ALLOC_TRT_CAS; 7233 7234 else if((r_ixr_cmd_fsm.read() == IXR_CMD_READ_TRT) or 7235 (r_ixr_cmd_fsm.read() == IXR_CMD_WRITE_TRT) or 7236 (r_ixr_cmd_fsm.read() == IXR_CMD_CAS_TRT) or 7237 (r_ixr_cmd_fsm.read() == IXR_CMD_XRAM_TRT) or 7238 (r_ixr_cmd_fsm.read() == IXR_CMD_CONFIG_TRT) ) 7239 r_alloc_trt_fsm = ALLOC_TRT_IXR_CMD; 7240 7241 } 7242 break; 7243 7244 /////////////////////// 7245 case ALLOC_TRT_IXR_RSP: 7246 if((r_ixr_rsp_fsm.read() != IXR_RSP_TRT_ERASE) and 7247 (r_ixr_rsp_fsm.read() != IXR_RSP_TRT_READ)) 7248 { 7249 if(r_config_fsm.read() == CONFIG_TRT_LOCK) 7250 r_alloc_trt_fsm = ALLOC_TRT_CONFIG; 7251 7252 else if(r_read_fsm.read() == READ_TRT_LOCK) 7253 r_alloc_trt_fsm = ALLOC_TRT_READ; 7254 7255 else if((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) or 7256 (r_write_fsm.read() == WRITE_BC_TRT_LOCK)) 7257 r_alloc_trt_fsm = ALLOC_TRT_WRITE; 7258 7259 else if((r_cas_fsm.read() == CAS_MISS_TRT_LOCK) or 7260 (r_cas_fsm.read() == CAS_BC_TRT_LOCK)) 7261 r_alloc_trt_fsm = ALLOC_TRT_CAS; 7262 7263 else if((r_ixr_cmd_fsm.read() == IXR_CMD_READ_TRT) or 7264 (r_ixr_cmd_fsm.read() == IXR_CMD_WRITE_TRT) or 7265 (r_ixr_cmd_fsm.read() == IXR_CMD_CAS_TRT) or 7266 (r_ixr_cmd_fsm.read() == IXR_CMD_XRAM_TRT) or 7267 (r_ixr_cmd_fsm.read() == IXR_CMD_CONFIG_TRT) ) 7268 r_alloc_trt_fsm = ALLOC_TRT_IXR_CMD; 7269 7270 else if((r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) and 7271 (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP)) 7272 r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; 7273 } 7274 break; 7275 7276 ////////////////////// 7277 case ALLOC_TRT_CONFIG: 7278 if((r_config_fsm.read() != CONFIG_TRT_LOCK) and 7279 (r_config_fsm.read() != CONFIG_TRT_SET)) 7280 { 7281 if(r_read_fsm.read() == READ_TRT_LOCK) 7282 r_alloc_trt_fsm = ALLOC_TRT_READ; 7283 7284 else if((r_write_fsm.read() == WRITE_MISS_TRT_LOCK) or 7285 (r_write_fsm.read() == WRITE_BC_TRT_LOCK)) 7286 r_alloc_trt_fsm = ALLOC_TRT_WRITE; 7287 7288 else if((r_cas_fsm.read() == CAS_MISS_TRT_LOCK) or 7289 (r_cas_fsm.read() == CAS_BC_TRT_LOCK)) 7290 r_alloc_trt_fsm = ALLOC_TRT_CAS; 7291 7292 else if((r_ixr_cmd_fsm.read() == IXR_CMD_READ_TRT) or 7293 (r_ixr_cmd_fsm.read() == IXR_CMD_WRITE_TRT) or 7294 (r_ixr_cmd_fsm.read() == IXR_CMD_CAS_TRT) or 7295 (r_ixr_cmd_fsm.read() == IXR_CMD_XRAM_TRT) or 7296 (r_ixr_cmd_fsm.read() == IXR_CMD_CONFIG_TRT) ) 7297 r_alloc_trt_fsm = ALLOC_TRT_IXR_CMD; 7298 7299 else if((r_xram_rsp_fsm.read() == XRAM_RSP_DIR_LOCK) and 7300 (r_alloc_dir_fsm.read() == ALLOC_DIR_XRAM_RSP)) 7301 r_alloc_trt_fsm = ALLOC_TRT_XRAM_RSP; 7302 7303 else if((r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) or 7304 (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ)) 7305 r_alloc_trt_fsm = ALLOC_TRT_IXR_RSP; 7306 } 7307 break; 7342 7308 7343 7309 } // end switch alloc_trt_fsm … … 7352 7318 ///////////////////////////////////////////////////////////////////////////////////// 7353 7319 7320 //std::cout << std::endl << "alloc_heap_fsm" << std::endl; 7321 7354 7322 switch(r_alloc_heap_fsm.read()) 7355 7323 { … … 7507 7475 7508 7476 } // end switch alloc_heap_fsm 7477 7478 //std::cout << std::endl << "fifo_update" << std::endl; 7509 7479 7510 7480 ///////////////////////////////////////////////////////////////////// … … 7579 7549 //////////////////////////////////////////////////////////////////////////////////// 7580 7550 7581 m_write_to_cc_send_inst_fifo.update( write_to_cc_send_fifo_get, write_to_cc_send_fifo_put, 7551 m_write_to_cc_send_inst_fifo.update( write_to_cc_send_fifo_get, 7552 write_to_cc_send_fifo_put, 7582 7553 write_to_cc_send_fifo_inst ); 7583 m_write_to_cc_send_srcid_fifo.update( write_to_cc_send_fifo_get, write_to_cc_send_fifo_put, 7554 m_write_to_cc_send_srcid_fifo.update( write_to_cc_send_fifo_get, 7555 write_to_cc_send_fifo_put, 7584 7556 write_to_cc_send_fifo_srcid ); 7585 #if L1_MULTI_CACHE7586 m_write_to_cc_send_cache_id_fifo.update( write_to_cc_send_fifo_get, write_to_cc_send_fifo_put,7587 write_to_cc_send_fifo_cache_id );7588 #endif7589 7557 7590 7558 //////////////////////////////////////////////////////////////////////////////////// … … 7592 7560 //////////////////////////////////////////////////////////////////////////////////// 7593 7561 7594 m_config_to_cc_send_inst_fifo.update( config_to_cc_send_fifo_get, config_to_cc_send_fifo_put, 7562 m_config_to_cc_send_inst_fifo.update( config_to_cc_send_fifo_get, 7563 config_to_cc_send_fifo_put, 7595 7564 config_to_cc_send_fifo_inst ); 7596 m_config_to_cc_send_srcid_fifo.update( config_to_cc_send_fifo_get, config_to_cc_send_fifo_put, 7565 m_config_to_cc_send_srcid_fifo.update( config_to_cc_send_fifo_get, 7566 config_to_cc_send_fifo_put, 7597 7567 config_to_cc_send_fifo_srcid ); 7598 #if L1_MULTI_CACHE7599 m_config_to_cc_send_cache_id_fifo.update( config_to_cc_send_fifo_get, config_to_cc_send_fifo_put,7600 config_to_cc_send_fifo_cache_id );7601 #endif7602 7568 7603 7569 //////////////////////////////////////////////////////////////////////////////////// … … 7605 7571 //////////////////////////////////////////////////////////////////////////////////// 7606 7572 7607 m_xram_rsp_to_cc_send_inst_fifo.update( xram_rsp_to_cc_send_fifo_get, xram_rsp_to_cc_send_fifo_put, 7573 m_xram_rsp_to_cc_send_inst_fifo.update( xram_rsp_to_cc_send_fifo_get, 7574 xram_rsp_to_cc_send_fifo_put, 7608 7575 xram_rsp_to_cc_send_fifo_inst ); 7609 m_xram_rsp_to_cc_send_srcid_fifo.update( xram_rsp_to_cc_send_fifo_get, xram_rsp_to_cc_send_fifo_put, 7576 m_xram_rsp_to_cc_send_srcid_fifo.update( xram_rsp_to_cc_send_fifo_get, 7577 xram_rsp_to_cc_send_fifo_put, 7610 7578 xram_rsp_to_cc_send_fifo_srcid ); 7611 #if L1_MULTI_CACHE7612 m_xram_rsp_to_cc_send_cache_id_fifo.update( xram_rsp_to_cc_send_fifo_get, xram_rsp_to_cc_send_fifo_put,7613 xram_rsp_to_cc_send_fifo_cache_id );7614 #endif7615 7579 7616 7580 //////////////////////////////////////////////////////////////////////////////////// … … 7618 7582 //////////////////////////////////////////////////////////////////////////////////// 7619 7583 7620 m_cas_to_cc_send_inst_fifo.update( cas_to_cc_send_fifo_get, cas_to_cc_send_fifo_put, 7584 m_cas_to_cc_send_inst_fifo.update( cas_to_cc_send_fifo_get, 7585 cas_to_cc_send_fifo_put, 7621 7586 cas_to_cc_send_fifo_inst ); 7622 m_cas_to_cc_send_srcid_fifo.update( cas_to_cc_send_fifo_get, cas_to_cc_send_fifo_put, 7587 m_cas_to_cc_send_srcid_fifo.update( cas_to_cc_send_fifo_get, 7588 cas_to_cc_send_fifo_put, 7623 7589 cas_to_cc_send_fifo_srcid ); 7624 #if L1_MULTI_CACHE7625 m_cas_to_cc_send_cache_id_fifo.update( cas_to_cc_send_fifo_get, cas_to_cc_send_fifo_put,7626 cas_to_cc_send_fifo_cache_id );7627 #endif7628 7629 7590 m_cpt_cycles++; 7630 7591 … … 7639 7600 //////////////////////////////////////////////////////////// 7640 7601 7641 p_vci_ixr.be = 0xFF; // nor transmited to external ram 7642 p_vci_ixr.pktid = 0; 7602 // DATA width is 8 bytes 7603 // The following values are not transmitted to XRAM 7604 // p_vci_ixr.be 7605 // p_vci_ixr.pktid 7606 // p_vci_ixr.cons 7607 // p_vci_ixr.wrap 7608 // p_vci_ixr.contig 7609 // p_vci_ixr.clen 7610 // p_vci_ixr.cfixed 7611 7612 p_vci_ixr.plen = 64; 7643 7613 p_vci_ixr.srcid = m_srcid_x; 7644 p_vci_ixr. cons = false;7645 p_vci_ixr. wrap = false;7646 p_vci_ixr.contig = true; 7647 p_vci_ixr.clen = 0;7648 p_vci_ixr.cfixed = false;7649 p_vci_ixr.plen = 64;7650 7651 if(r_ixr_cmd_fsm.read() == IXR_CMD_READ)7614 p_vci_ixr.trdid = r_ixr_cmd_trdid.read(); 7615 p_vci_ixr.address = (addr_t)r_ixr_cmd_address.read() + (r_ixr_cmd_word.read()<<2); 7616 7617 if ( (r_ixr_cmd_fsm.read() == IXR_CMD_READ_SEND) or 7618 (r_ixr_cmd_fsm.read() == IXR_CMD_WRITE_SEND) or 7619 (r_ixr_cmd_fsm.read() == IXR_CMD_CAS_SEND) or 7620 (r_ixr_cmd_fsm.read() == IXR_CMD_XRAM_SEND) or 7621 (r_ixr_cmd_fsm.read() == IXR_CMD_CONFIG_SEND) ) 7652 7622 { 7653 p_vci_ixr.cmd = vci_param_ext::CMD_READ;7654 p_vci_ixr.cmdval = true;7655 p_vci_ixr.address = (addr_t)(r_read_to_ixr_cmd_nline.read() * m_words * 4);7656 p_vci_ixr.wdata = 0;7657 p_vci_ixr.trdid = r_read_to_ixr_cmd_trdid.read();7658 p_vci_ixr.eop = true;7659 }7660 else if(r_ixr_cmd_fsm.read() == IXR_CMD_CAS)7661 {7662 if(r_cas_to_ixr_cmd_write.read())7663 {7664 size_t word = r_ixr_cmd_cpt.read();7665 p_vci_ixr.cmd = vci_param_ext::CMD_WRITE;7666 7623 p_vci_ixr.cmdval = true; 7667 p_vci_ixr.address = (addr_t)( (r_cas_to_ixr_cmd_nline.read() * m_words + word) * 4 ); 7668 p_vci_ixr.wdata = ((wide_data_t)(r_cas_to_ixr_cmd_data[word].read())) | 7669 ((wide_data_t)(r_cas_to_ixr_cmd_data[word+1].read()) << 32); 7670 p_vci_ixr.trdid = r_cas_to_ixr_cmd_trdid.read(); 7671 p_vci_ixr.eop = (r_ixr_cmd_cpt == (m_words-2)); 7672 } 7673 else 7674 { 7675 p_vci_ixr.cmd = vci_param_ext::CMD_READ; 7676 p_vci_ixr.cmdval = true; 7677 p_vci_ixr.address = (addr_t)(r_cas_to_ixr_cmd_nline.read() *m_words*4); 7678 p_vci_ixr.wdata = 0; 7679 p_vci_ixr.trdid = r_cas_to_ixr_cmd_trdid.read(); 7680 p_vci_ixr.eop = true; 7681 } 7682 } 7683 else if(r_ixr_cmd_fsm.read() == IXR_CMD_WRITE) 7684 { 7685 if(r_write_to_ixr_cmd_write.read()) 7686 { 7687 p_vci_ixr.cmd = vci_param_ext::CMD_WRITE; 7688 p_vci_ixr.cmdval = true; 7689 p_vci_ixr.address = (addr_t)( (r_write_to_ixr_cmd_nline.read() * m_words + 7690 r_ixr_cmd_cpt.read()) * 4 ); 7691 p_vci_ixr.wdata = ((wide_data_t)(r_write_to_ixr_cmd_data[r_ixr_cmd_cpt.read()].read()) | 7692 ((wide_data_t)(r_write_to_ixr_cmd_data[r_ixr_cmd_cpt.read()+1].read()) << 32)); 7693 p_vci_ixr.trdid = r_write_to_ixr_cmd_trdid.read(); 7694 p_vci_ixr.eop = (r_ixr_cmd_cpt == (m_words-2)); 7695 } 7696 else 7697 { 7698 p_vci_ixr.cmd = vci_param_ext::CMD_READ; 7699 p_vci_ixr.cmdval = true; 7700 p_vci_ixr.address = (addr_t)(r_write_to_ixr_cmd_nline.read() *m_words*4); 7701 p_vci_ixr.wdata = 0; 7702 p_vci_ixr.trdid = r_write_to_ixr_cmd_trdid.read(); 7703 p_vci_ixr.eop = true; 7704 } 7705 } 7706 else if(r_ixr_cmd_fsm.read() == IXR_CMD_XRAM) 7707 { 7708 p_vci_ixr.cmd = vci_param_ext::CMD_WRITE; 7709 p_vci_ixr.cmdval = true; 7710 p_vci_ixr.address = (addr_t)( (r_xram_rsp_to_ixr_cmd_nline.read() * m_words + 7711 r_ixr_cmd_cpt.read()) * 4 ); 7712 p_vci_ixr.wdata = ((wide_data_t)(r_xram_rsp_to_ixr_cmd_data[r_ixr_cmd_cpt.read()].read()) | 7713 ((wide_data_t)(r_xram_rsp_to_ixr_cmd_data[r_ixr_cmd_cpt.read()+1].read()) << 32)); 7714 p_vci_ixr.trdid = r_xram_rsp_to_ixr_cmd_trdid.read(); 7715 p_vci_ixr.eop = (r_ixr_cmd_cpt == (m_words-2)); 7624 7625 if ( r_ixr_cmd_get.read() ) // GET 7626 { 7627 p_vci_ixr.cmd = vci_param_ext::CMD_READ; 7628 p_vci_ixr.wdata = 0; 7629 p_vci_ixr.eop = true; 7630 } 7631 else // PUT 7632 { 7633 size_t word = r_ixr_cmd_word.read(); 7634 p_vci_ixr.cmd = vci_param_ext::CMD_WRITE; 7635 p_vci_ixr.wdata = ((wide_data_t)(r_ixr_cmd_wdata[word].read())) | 7636 ((wide_data_t)(r_ixr_cmd_wdata[word+1].read()) << 32); 7637 p_vci_ixr.eop = (word == (m_words-2)); 7638 } 7716 7639 } 7717 7640 else 7718 7641 { 7719 p_vci_ixr.cmdval = false; 7720 p_vci_ixr.cmd = vci_param_ext::CMD_READ; 7721 p_vci_ixr.address = 0; 7722 p_vci_ixr.wdata = 0; 7723 p_vci_ixr.trdid = 0; 7724 p_vci_ixr.eop = false; 7642 p_vci_ixr.cmdval = false; 7725 7643 } 7726 7644 … … 7729 7647 //////////////////////////////////////////////////// 7730 7648 7731 if(((r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_RSP) and 7732 (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ)) or 7733 (r_ixr_rsp_fsm.read() == IXR_RSP_ACK)) 7734 7735 p_vci_ixr.rspack = true; 7736 7737 else 7738 p_vci_ixr.rspack = false; 7649 if( (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_READ) or 7650 (r_ixr_rsp_fsm.read() == IXR_RSP_TRT_ERASE) ) 7651 { 7652 p_vci_ixr.rspack = (r_alloc_trt_fsm.read() == ALLOC_TRT_IXR_RSP); 7653 } 7654 else // r_ixr_rsp_fsm == IXR_RSP_IDLE 7655 { 7656 p_vci_ixr.rspack = false; 7657 } 7739 7658 7740 7659 //////////////////////////////////////////////////// … … 7802 7721 break; 7803 7722 } 7804 7805 7723 case TGT_RSP_TGT_CMD: 7806 7724 { 7807 7725 p_vci_tgt.rspval = true; 7808 p_vci_tgt.rdata = 0;7726 p_vci_tgt.rdata = r_tgt_cmd_to_tgt_rsp_rdata.read(); 7809 7727 p_vci_tgt.rsrcid = r_tgt_cmd_to_tgt_rsp_srcid.read(); 7810 7728 p_vci_tgt.rtrdid = r_tgt_cmd_to_tgt_rsp_trdid.read(); … … 7815 7733 break; 7816 7734 } 7817 7818 7735 case TGT_RSP_READ: 7819 7736 { … … 8310 8227 //////////////////////////////////////////////////////////////////// 8311 8228 8312 switch(r_cleanup_fsm.read())8229 if ( r_cleanup_fsm.read() == CLEANUP_SEND_CLACK ) 8313 8230 { 8314 case CLEANUP_IDLE: 8315 case CLEANUP_GET_NLINE: 8316 case CLEANUP_DIR_REQ: 8317 case CLEANUP_DIR_LOCK: 8318 case CLEANUP_DIR_WRITE: 8319 case CLEANUP_HEAP_REQ: 8320 case CLEANUP_HEAP_LOCK: 8321 case CLEANUP_HEAP_SEARCH: 8322 case CLEANUP_HEAP_CLEAN: 8323 case CLEANUP_HEAP_FREE: 8324 case CLEANUP_IVT_LOCK: 8325 case CLEANUP_IVT_DECREMENT: 8326 case CLEANUP_IVT_CLEAR: 8327 case CLEANUP_WRITE_RSP: 8328 case CLEANUP_CONFIG_ACK: 8231 uint8_t cleanup_ack_type; 8232 if(r_cleanup_inst.read()) 8233 { 8234 cleanup_ack_type = DspinDhccpParam::TYPE_CLACK_INST; 8235 } 8236 else 8237 { 8238 cleanup_ack_type = DspinDhccpParam::TYPE_CLACK_DATA; 8239 } 8240 8241 uint64_t flit = 0; 8242 uint64_t dest = r_cleanup_srcid.read() << 8243 (DspinDhccpParam::SRCID_WIDTH - vci_param_int::S); 8244 8245 DspinDhccpParam::dspin_set( 8246 flit, 8247 dest, 8248 DspinDhccpParam::CLACK_DEST); 8249 8250 DspinDhccpParam::dspin_set( 8251 flit, 8252 r_cleanup_nline.read() & 0xFFFF, 8253 DspinDhccpParam::CLACK_SET); 8254 8255 DspinDhccpParam::dspin_set( 8256 flit, 8257 r_cleanup_way_index.read(), 8258 DspinDhccpParam::CLACK_WAY); 8259 8260 DspinDhccpParam::dspin_set( 8261 flit, 8262 cleanup_ack_type, 8263 DspinDhccpParam::CLACK_TYPE); 8264 8265 p_dspin_clack.eop = true; 8266 p_dspin_clack.write = true; 8267 p_dspin_clack.data = flit; 8268 } 8269 else 8270 { 8329 8271 p_dspin_clack.write = false; 8330 8272 p_dspin_clack.eop = false; 8331 8273 p_dspin_clack.data = 0; 8332 8333 break;8334 8335 case CLEANUP_SEND_CLACK:8336 {8337 uint8_t cleanup_ack_type;8338 if(r_cleanup_inst.read())8339 {8340 cleanup_ack_type = DspinDhccpParam::TYPE_CLACK_INST;8341 }8342 else8343 {8344 cleanup_ack_type = DspinDhccpParam::TYPE_CLACK_DATA;8345 }8346 8347 uint64_t flit = 0;8348 uint64_t dest =8349 r_cleanup_srcid.read() <<8350 (DspinDhccpParam::SRCID_WIDTH - vci_param_int::S);8351 8352 DspinDhccpParam::dspin_set(8353 flit,8354 dest,8355 DspinDhccpParam::CLACK_DEST);8356 8357 DspinDhccpParam::dspin_set(8358 flit,8359 r_cleanup_nline.read() & 0xFFFF,8360 DspinDhccpParam::CLACK_SET);8361 8362 DspinDhccpParam::dspin_set(8363 flit,8364 r_cleanup_way_index.read(),8365 DspinDhccpParam::CLACK_WAY);8366 8367 DspinDhccpParam::dspin_set(8368 flit,8369 cleanup_ack_type,8370 DspinDhccpParam::CLACK_TYPE);8371 8372 p_dspin_clack.eop = true;8373 p_dspin_clack.write = true;8374 p_dspin_clack.data = flit;8375 }8376 break;8377 8274 } 8378 8275 -
trunk/modules/vci_mem_cache/include/soclib/mem_cache.h
r434 r489 31 31 { 32 32 MEMC_LOCK, 33 MEMC_CMD_TYPE,34 33 MEMC_ADDR_LO, 35 34 MEMC_ADDR_HI, 36 MEMC_BUF_LENGTH 35 MEMC_BUF_LENGTH, 36 MEMC_CMD_TYPE 37 37 }; 38 38
Note: See TracChangeset
for help on using the changeset viewer.