#ifdef SYSTEMC /* * $Id: Load_store_unit_function_speculative_load_commit_transition.cpp 88 2008-12-10 18:31:39Z rosiere $ * * [ Description ] * */ #include "Behavioural/Core/Multi_Execute_loop/Execute_loop/Multi_Execute_unit/Execute_unit/Load_store_unit/include/Load_store_unit.h" namespace morpheo { namespace behavioural { namespace core { namespace multi_execute_loop { namespace execute_loop { namespace multi_execute_unit { namespace execute_unit { namespace load_store_unit { #undef FUNCTION #define FUNCTION "Load_store_unit::function_speculative_load_commit_transition" void Load_store_unit::function_speculative_load_commit_transition (void) { log_printf(FUNC,Load_store_unit,FUNCTION,"Begin"); if (PORT_READ(in_NRESET) == 0) { // Reset : clear all queue _speculative_access_queue_control->clear(); reg_STORE_QUEUE_PTR_READ = 0; reg_LOAD_QUEUE_CHECK_PRIORITY = 0; for (uint32_t i=0; i< _param->_size_store_queue ; i++) _store_queue [i]._state = STORE_QUEUE_EMPTY; for (uint32_t i=0; i< _param->_size_load_queue ; i++) _load_queue [i]._state = LOAD_QUEUE_EMPTY; for (uint32_t i=0; i< _param->_size_speculative_access_queue; i++) _speculative_access_queue [i]._state = SPECULATIVE_ACCESS_QUEUE_EMPTY; } else { //================================================================ // Interface "PORT_CHECK" //================================================================ // Plusieurs moyens de faire la verification de dépendance entre les loads et les stores. // 1) un load ne peut vérifier qu'un store par cycle. Dans ce cas port_check <= size_load_queue // 2) un load tente de vérifier le maximum de store par cycle. Dans ce cas ce n'est pas du pointeur d'écriture qu'il lui faut mais un vecteur de bit indiquant quel store à déjà été testé. De plus il faut un bit indiquant qu'il y a un match mais que ce n'est pas forcément le premier. // solution 1) log_printf(TRACE,Load_store_unit,FUNCTION,"CHECK"); for (uint32_t i=0, nb_check=0; (nb_check<_param->_nb_port_check) and (i<_param->_size_load_queue); i++) { uint32_t index_load = (i + reg_LOAD_QUEUE_CHECK_PRIORITY)%_param->_size_load_queue; if (((_load_queue[index_load]._state == LOAD_QUEUE_WAIT_CHECK) or (_load_queue[index_load]._state == LOAD_QUEUE_COMMIT_CHECK) or (_load_queue[index_load]._state == LOAD_QUEUE_CHECK)) and is_operation_memory_load(_load_queue[index_load]._operation)) { log_printf(TRACE,Load_store_unit,FUNCTION," * Find a load : %d",index_load); nb_check++; // use one port // find a entry that it need a check Tlsq_ptr_t index_store = _load_queue[index_load]._store_queue_ptr_write; bool end_check = false; bool change_state = false; bool next = false; // At the first store queue empty, stop check. // Explication : // * rename logic keep a empty case in the store queue (also size_store_queue > 1) // * when a store is out of store queue, also it was in head of re order buffer. Also, they are none previous load. log_printf(TRACE,Load_store_unit,FUNCTION," * index_store : %d",index_store); if (index_store == reg_STORE_QUEUE_PTR_READ) { log_printf(TRACE,Load_store_unit,FUNCTION," * index_store == reg_STORE_QUEUE_PTR_READ"); end_check = true; change_state = true; } else { log_printf(TRACE,Load_store_unit,FUNCTION," * index_store != reg_STORE_QUEUE_PTR_READ"); index_store = (index_store-1)%(_param->_size_store_queue); // store_queue_ptr_write target the next slot to write, also the slot is not significatif when the load is renaming log_printf(TRACE,Load_store_unit,FUNCTION," * index_store : %d",index_store); switch (_store_queue[index_store]._state) { case STORE_QUEUE_VALID_NO_SPECULATIVE : case STORE_QUEUE_COMMIT : case STORE_QUEUE_VALID_SPECULATIVE : { log_printf(TRACE,Load_store_unit,FUNCTION," * store have a valid entry"); // TODO : MMU - nous considérons que les adresses sont physique bool test_thread_id = true; // Test thread id. if (_param->_have_port_context_id) test_thread_id &= (_load_queue[index_load]._context_id == _store_queue[index_store]._context_id); if (_param->_have_port_front_end_id) test_thread_id &= (_load_queue[index_load]._front_end_id == _store_queue[index_store]._front_end_id); if (_param->_have_port_ooo_engine_id) test_thread_id &= (_load_queue[index_load]._ooo_engine_id == _store_queue[index_store]._ooo_engine_id); if (test_thread_id) { log_printf(TRACE,Load_store_unit,FUNCTION," * load and store is the same thread."); // the load and store are in the same thread. Now, we must test address. Tdcache_address_t load_addr = _load_queue [index_load ]._address; Tdcache_address_t store_addr = _store_queue[index_store]._address; log_printf(TRACE,Load_store_unit,FUNCTION," * load_addr : %.8x.",load_addr ); log_printf(TRACE,Load_store_unit,FUNCTION," * store_addr : %.8x.",store_addr); log_printf(TRACE,Load_store_unit,FUNCTION," * load_addr & mask_address_msb : %.8x.",load_addr & _param->_mask_address_msb); log_printf(TRACE,Load_store_unit,FUNCTION," * store_addr & mask_address_msb : %.8x.",store_addr & _param->_mask_address_msb); // Test if the both address target the same word if ((load_addr & _param->_mask_address_msb) == (store_addr & _param->_mask_address_msb)) { log_printf(TRACE,Load_store_unit,FUNCTION," * address_msb is the same."); // all case - [] : store, () : load // (1) store_max >= load_max and store_min <= load_min ...[...(...)...]... Ok - inclusion in store // (2) store_min > load_max ...[...]...(...)... Ok - no conflit // (3) store_max < load_min ...(...)...[...]... Ok - no conflit // (4) store_max < load_max and store_min > load_min ...(...[...]...)... Ko - inclusion in load // (5) store_max >= load_max and store_min > load_min ...[...(...]...)... Ko - conflit // (6) store_max < load_max and store_min <= load_min ...(...[...)...]... Ko - conflit // but : // load in the cache is a word ! // the mask can be make when the load is commited. Also, the rdata content a full word. // the only case is (4) Tgeneral_data_t load_data = _load_queue [index_load ]._rdata ; Tgeneral_data_t store_data = _store_queue[index_store]._wdata ; log_printf(TRACE,Load_store_unit,FUNCTION," * load_data (init) : %.8x",load_data); log_printf(TRACE,Load_store_unit,FUNCTION," * store_data (init) : %.8x",store_data); uint32_t store_num_byte_min = (store_addr & _param->_mask_address_lsb); uint32_t store_num_byte_max = store_num_byte_min+(1<(load_data, store_data, index+8-1, index); _load_queue[index_load]._check_hit_byte |= mask; _load_queue[index_load]._check_hit = 1; change_state = true; log_printf(TRACE,Load_store_unit,FUNCTION," * rdata_new : %.8x", load_data); } } _load_queue[index_load]._rdata = load_data; log_printf(TRACE,Load_store_unit,FUNCTION," * check_hit : %x",_load_queue[index_load]._check_hit); log_printf(TRACE,Load_store_unit,FUNCTION," * check_hit_byte : %x",_load_queue[index_load]._check_hit_byte); log_printf(TRACE,Load_store_unit,FUNCTION," * mask_end_check : %x",(-1& _param->_mask_address_lsb)); // The check is finish if all bit is set end_check = (_load_queue[index_load]._check_hit_byte == MASK_CHECK_BYTE_HIT); } } next = true; break; } case STORE_QUEUE_EMPTY : case STORE_QUEUE_NO_VALID_NO_SPECULATIVE : { log_printf(TRACE,Load_store_unit,FUNCTION," * store have an invalid entry"); break; } } } if (next) { log_printf(TRACE,Load_store_unit,FUNCTION," * next"); // if (_load_queue[index_load]._store_queue_ptr_write == 0) // _load_queue[index_load]._store_queue_ptr_write = _param->_size_store_queue-1; // else // _load_queue[index_load]._store_queue_ptr_write --; _load_queue[index_load]._store_queue_ptr_write = index_store; // because the index store have be decrease // FIXME : peut n'est pas obliger de faire cette comparaison. Au prochain cycle on le détectera que les pointeur sont égaux. Ceci évitera d'avoir deux comparateurs avec le registre "reg_STORE_QUEUE_PTR_READ" if (index_store == reg_STORE_QUEUE_PTR_READ) { end_check = true; change_state = true; } } if (change_state) { log_printf(TRACE,Load_store_unit,FUNCTION," * change_state"); switch (_load_queue[index_load]._state) { case LOAD_QUEUE_WAIT_CHECK : _load_queue[index_load]._state = LOAD_QUEUE_WAIT ; break; case LOAD_QUEUE_COMMIT_CHECK : { if (end_check) _load_queue[index_load]._state = LOAD_QUEUE_COMMIT; else _load_queue[index_load]._state = LOAD_QUEUE_CHECK; break; } case LOAD_QUEUE_CHECK : { if (end_check) _load_queue[index_load]._state = LOAD_QUEUE_COMMIT; // check find a bypass. A speculative load have been committed : report a speculation miss. if (_load_queue[index_load]._check_hit != 0) { _load_queue[index_load]._exception = EXCEPTION_MEMORY_MISS_SPECULATION; _load_queue[index_load]._write_rd = 1; // write the good result } break; } default : break; } log_printf(TRACE,Load_store_unit,FUNCTION," * new state : %d",_load_queue[index_load]._state); log_printf(TRACE,Load_store_unit,FUNCTION," * exception : %d",_load_queue[index_load]._exception); } } // else : don't use a port } //================================================================ // Interface "MEMORY_IN" //================================================================ if ((PORT_READ(in_MEMORY_IN_VAL [internal_MEMORY_IN_PORT]) == 1) and ( internal_MEMORY_IN_ACK == 1)) { // Test operation : //~~~~~~~~~~~~~~~~~ // store in store_queue // load in speculation_access_queue // others in speculation_access_queue #ifdef DEBUG_TEST if (PORT_READ(in_MEMORY_IN_TYPE [internal_MEMORY_IN_PORT]) != TYPE_MEMORY) throw ERRORMORPHEO(FUNCTION,"The type is different at 'TYPE_MEMORY'"); #endif Toperation_t operation = PORT_READ(in_MEMORY_IN_OPERATION[internal_MEMORY_IN_PORT]); Tgeneral_data_t address = (PORT_READ(in_MEMORY_IN_IMMEDIAT[internal_MEMORY_IN_PORT]) + PORT_READ(in_MEMORY_IN_DATA_RA [internal_MEMORY_IN_PORT])); bool exception_alignement = (mask_memory_access(operation) & address) != 0; if (is_operation_memory_store(operation) == true) { // ======================= // ===== STORE_QUEUE ===== // ======================= // There a two store request type : // - first is operation with address and data // - second is the information of re order buffer : the store become not speculative and can access at the data cache log_printf(TRACE,Load_store_unit,FUNCTION,"store_queue"); log_printf(TRACE,Load_store_unit,FUNCTION," * PUSH"); // Write pointer is define in rename stage : Tlsq_ptr_t index = PORT_READ(in_MEMORY_IN_STORE_QUEUE_PTR_WRITE[internal_MEMORY_IN_PORT]); log_printf(TRACE,Load_store_unit,FUNCTION," * index : %d",index); // Need read : state and exception. Tstore_queue_state_t old_state = _store_queue [index]._state; Tstore_queue_state_t new_state = old_state; bool update_info = false; Texception_t old_exception = _store_queue [index]._exception; Texception_t new_exception = old_exception; // Compute next state switch (old_state) { case STORE_QUEUE_EMPTY : { if (is_operation_memory_store_head(operation) == true) { new_state = STORE_QUEUE_NO_VALID_NO_SPECULATIVE; // test if is a speculation if (operation == OPERATION_MEMORY_STORE_HEAD_KO) new_exception = EXCEPTION_MEMORY_MISS_SPECULATION; else new_exception = EXCEPTION_MEMORY_NONE; } else { new_state = STORE_QUEUE_VALID_SPECULATIVE; // Test if have an exception if (exception_alignement == true) new_exception = EXCEPTION_MEMORY_ALIGNMENT; else new_exception = EXCEPTION_MEMORY_NONE; update_info = true; } break; } case STORE_QUEUE_NO_VALID_NO_SPECULATIVE : { #ifdef DEBUG_TEST if (is_operation_memory_store_head(operation) == true) throw ErrorMorpheo(_("Transaction in memory_in's interface, actual state of store_queue is \"STORE_QUEUE_NO_VALID_NO_SPECULATIVE\", also a previous store_head have been receiveid. But this operation is a store_head.")); #endif // Test if have a new exception (priority : miss_speculation) if ((exception_alignement == true) and (old_exception == EXCEPTION_MEMORY_NONE)) new_exception = EXCEPTION_MEMORY_ALIGNMENT; if (new_exception != EXCEPTION_MEMORY_NONE) new_state = STORE_QUEUE_COMMIT; else new_state = STORE_QUEUE_VALID_NO_SPECULATIVE; update_info = true; break; } case STORE_QUEUE_VALID_SPECULATIVE : { #ifdef DEBUG_TEST if (is_operation_memory_store_head(operation) == false) throw ErrorMorpheo(_("Transaction in memory_in's interface, actual state of store_queue is \"STORE_QUEUE_VALID_SPECULATIVE\", also a previous access with register and address have been receiveid. But this operation is a not store_head.")); #endif if (operation == OPERATION_MEMORY_STORE_HEAD_KO) new_exception = EXCEPTION_MEMORY_MISS_SPECULATION; // great prioritary if (new_exception != EXCEPTION_MEMORY_NONE) new_state = STORE_QUEUE_COMMIT; else new_state = STORE_QUEUE_VALID_NO_SPECULATIVE; break; } case STORE_QUEUE_VALID_NO_SPECULATIVE : case STORE_QUEUE_COMMIT : { throw ErrorMorpheo(" Invalid state and operation"); } } _store_queue [index]._state = new_state; _store_queue [index]._exception = new_exception; if (update_info == true) { log_printf(TRACE,Load_store_unit,FUNCTION," * Update information"); _store_queue [index]._context_id = (not _param->_have_port_context_id )?0:PORT_READ(in_MEMORY_IN_CONTEXT_ID [internal_MEMORY_IN_PORT]); _store_queue [index]._front_end_id = (not _param->_have_port_front_end_id )?0:PORT_READ(in_MEMORY_IN_FRONT_END_ID [internal_MEMORY_IN_PORT]); _store_queue [index]._ooo_engine_id = (not _param->_have_port_ooo_engine_id)?0:PORT_READ(in_MEMORY_IN_OOO_ENGINE_ID[internal_MEMORY_IN_PORT]); _store_queue [index]._packet_id = (not _param->_have_port_rob_ptr )?0:PORT_READ(in_MEMORY_IN_PACKET_ID [internal_MEMORY_IN_PORT]); _store_queue [index]._operation = operation; _store_queue [index]._load_queue_ptr_write = (not _param->_have_port_load_queue_ptr)?0:PORT_READ(in_MEMORY_IN_LOAD_QUEUE_PTR_WRITE[internal_MEMORY_IN_PORT]); _store_queue [index]._address = address; // reordering data _store_queue [index]._wdata = duplicate(_param->_size_general_data,PORT_READ(in_MEMORY_IN_DATA_RB[internal_MEMORY_IN_PORT]), memory_size(operation), 0); // _store_queue [index]._num_reg_rd = PORT_READ(in_MEMORY_IN_NUM_REG_RD [internal_MEMORY_IN_PORT]); } } else { // ==================================== // ===== SPECULATIVE_ACCESS_QUEUE ===== // ==================================== // In speculative access queue, they are many type's request log_printf(TRACE,Load_store_unit,FUNCTION,"speculative_access_queue"); log_printf(TRACE,Load_store_unit,FUNCTION," * PUSH"); // Write in reservation station uint32_t index = _speculative_access_queue_control->push(); log_printf(TRACE,Load_store_unit,FUNCTION," * index : %d", index); Texception_t exception; if (exception_alignement == true) exception = EXCEPTION_MEMORY_ALIGNMENT; else exception = EXCEPTION_MEMORY_NONE; // if exception, don't access at the cache // NOTE : type "other" (lock, invalidate, flush and sync) can't make an alignement exception (access is equivalent at a 8 bits) _speculative_access_queue [index]._state = (exception == EXCEPTION_MEMORY_NONE)?SPECULATIVE_ACCESS_QUEUE_WAIT_CACHE:SPECULATIVE_ACCESS_QUEUE_WAIT_LOAD_QUEUE; _speculative_access_queue [index]._context_id = (not _param->_have_port_context_id )?0:PORT_READ(in_MEMORY_IN_CONTEXT_ID [internal_MEMORY_IN_PORT]); _speculative_access_queue [index]._front_end_id = (not _param->_have_port_front_end_id )?0:PORT_READ(in_MEMORY_IN_FRONT_END_ID [internal_MEMORY_IN_PORT]); _speculative_access_queue [index]._ooo_engine_id = (not _param->_have_port_ooo_engine_id)?0:PORT_READ(in_MEMORY_IN_OOO_ENGINE_ID[internal_MEMORY_IN_PORT]); _speculative_access_queue [index]._packet_id = (not _param->_have_port_rob_ptr )?0:PORT_READ(in_MEMORY_IN_PACKET_ID [internal_MEMORY_IN_PORT]); _speculative_access_queue [index]._operation = operation; _speculative_access_queue [index]._load_queue_ptr_write = (not _param->_have_port_load_queue_ptr)?0:PORT_READ(in_MEMORY_IN_LOAD_QUEUE_PTR_WRITE[internal_MEMORY_IN_PORT]); _speculative_access_queue [index]._store_queue_ptr_write= PORT_READ(in_MEMORY_IN_STORE_QUEUE_PTR_WRITE[internal_MEMORY_IN_PORT]); _speculative_access_queue [index]._address = address; // NOTE : is operation is a load, then they are a result and must write in the register file _speculative_access_queue [index]._write_rd = is_operation_memory_load(operation); _speculative_access_queue [index]._num_reg_rd = PORT_READ(in_MEMORY_IN_NUM_REG_RD [internal_MEMORY_IN_PORT]); _speculative_access_queue [index]._exception = exception; log_printf(TRACE,Load_store_unit,FUNCTION," * index : %d",index); } } //================================================================ // Interface "MEMORY_OUT" //================================================================ if (( internal_MEMORY_OUT_VAL == 1) and (PORT_READ(in_MEMORY_OUT_ACK[0]) == 1)) { log_printf(TRACE,Load_store_unit,FUNCTION,"MEMORY_OUT transaction"); switch (internal_MEMORY_OUT_SELECT_QUEUE) { case SELECT_STORE_QUEUE : { // ======================= // ===== STORE_QUEUE ===== // ======================= log_printf(TRACE,Load_store_unit,FUNCTION," * store_queue [%d]",reg_STORE_QUEUE_PTR_READ); // Entry flush and increase the read pointer _store_queue [reg_STORE_QUEUE_PTR_READ]._state = STORE_QUEUE_EMPTY; reg_STORE_QUEUE_PTR_READ = (reg_STORE_QUEUE_PTR_READ+1)%_param->_size_store_queue; break; } case SELECT_LOAD_QUEUE : { // ====================== // ===== LOAD_QUEUE ===== // ====================== log_printf(TRACE,Load_store_unit,FUNCTION," * load_queue [%d]",internal_MEMORY_OUT_PTR); // Entry flush and increase the read pointer _load_queue [internal_MEMORY_OUT_PTR]._state = LOAD_QUEUE_EMPTY; // reg_LOAD_QUEUE_PTR_READ = (reg_LOAD_QUEUE_PTR_READ+1)%_param->_size_load_queue; break; } case SELECT_LOAD_QUEUE_SPECULATIVE : { log_printf(TRACE,Load_store_unit,FUNCTION," * load_queue [%d] (speculative)",internal_MEMORY_OUT_PTR); _load_queue [internal_MEMORY_OUT_PTR]._state = LOAD_QUEUE_CHECK; // NOTE : a speculative load write in the register file. // if the speculation is a miss, write_rd is re set at 1. _load_queue [internal_MEMORY_OUT_PTR]._write_rd = 0; break; } break; } } //================================================================ // Interface "DCACHE_REQ" //================================================================ bool load_queue_push = (_speculative_access_queue [internal_SPECULATIVE_ACCESS_QUEUE_PTR_READ]._state == SPECULATIVE_ACCESS_QUEUE_WAIT_LOAD_QUEUE); if (( internal_DCACHE_REQ_VAL == 1) and (PORT_READ(in_DCACHE_REQ_ACK[0]) == 1)) { log_printf(TRACE,Load_store_unit,FUNCTION,"DCACHE_REQ"); switch (internal_DCACHE_REQ_SELECT_QUEUE) { case SELECT_STORE_QUEUE : { // ======================= // ===== STORE_QUEUE ===== // ======================= // Entry flush and increase the read pointer _store_queue [reg_STORE_QUEUE_PTR_READ]._state = STORE_QUEUE_COMMIT; break; } case SELECT_LOAD_QUEUE_SPECULATIVE : { // ========================================= // ===== SELECT_LOAD_QUEUE_SPECULATIVE ===== // ========================================= load_queue_push = true; break; } case SELECT_LOAD_QUEUE : { throw ErrorMorpheo(_("Invalid selection")); break; } break; } } if (load_queue_push) { Tlsq_ptr_t ptr_write = _speculative_access_queue[internal_SPECULATIVE_ACCESS_QUEUE_PTR_READ]._load_queue_ptr_write; Toperation_t operation = _speculative_access_queue[internal_SPECULATIVE_ACCESS_QUEUE_PTR_READ]._operation; Texception_t exception = _speculative_access_queue[internal_SPECULATIVE_ACCESS_QUEUE_PTR_READ]._exception; bool have_exception = (exception != EXCEPTION_MEMORY_NONE); if (have_exception) _load_queue [ptr_write]._state = LOAD_QUEUE_COMMIT; else { if (have_dcache_rsp(operation)) { // load and synchronisation if (must_check(operation)) { // load _load_queue [ptr_write]._state = LOAD_QUEUE_WAIT_CHECK; } else { // synchronisation _load_queue [ptr_write]._state = LOAD_QUEUE_WAIT; } } else { // lock, prefecth, flush and invalidate _load_queue [ptr_write]._state = LOAD_QUEUE_COMMIT; } } Tdcache_address_t address = _speculative_access_queue [internal_SPECULATIVE_ACCESS_QUEUE_PTR_READ]._address; Tdcache_address_t address_lsb = (address & _param->_mask_address_lsb); Tdcache_address_t check_hit_byte = gen_mask_not(address_lsb+memory_access(operation)+1,address_lsb); _load_queue [ptr_write]._context_id = _speculative_access_queue [internal_SPECULATIVE_ACCESS_QUEUE_PTR_READ]._context_id ; _load_queue [ptr_write]._front_end_id = _speculative_access_queue [internal_SPECULATIVE_ACCESS_QUEUE_PTR_READ]._front_end_id ; _load_queue [ptr_write]._ooo_engine_id = _speculative_access_queue [internal_SPECULATIVE_ACCESS_QUEUE_PTR_READ]._ooo_engine_id ; _load_queue [ptr_write]._packet_id = _speculative_access_queue [internal_SPECULATIVE_ACCESS_QUEUE_PTR_READ]._packet_id ; _load_queue [ptr_write]._operation = operation; _load_queue [ptr_write]._store_queue_ptr_write = _speculative_access_queue [internal_SPECULATIVE_ACCESS_QUEUE_PTR_READ]._store_queue_ptr_write; _load_queue [ptr_write]._address = address; _load_queue [ptr_write]._check_hit_byte = check_hit_byte; _load_queue [ptr_write]._check_hit = 0; _load_queue [ptr_write]._shift = address<<3; _load_queue [ptr_write]._is_load_signed = is_operation_memory_load_signed(operation); _load_queue [ptr_write]._access_size = memory_size(operation); // NOTE : if have an exception, must write in register, because a depend instruction wait the load data. _load_queue [ptr_write]._write_rd = _speculative_access_queue [internal_SPECULATIVE_ACCESS_QUEUE_PTR_READ]._write_rd ; _load_queue [ptr_write]._num_reg_rd = _speculative_access_queue [internal_SPECULATIVE_ACCESS_QUEUE_PTR_READ]._num_reg_rd ; _load_queue [ptr_write]._exception = exception; _load_queue [ptr_write]._rdata = address; // to the exception log_printf(TRACE,Load_store_unit,FUNCTION," * speculative_access_queue"); log_printf(TRACE,Load_store_unit,FUNCTION," * POP[%d]",(*_speculative_access_queue_control)[0]); _speculative_access_queue [(*_speculative_access_queue_control)[0]]._state = SPECULATIVE_ACCESS_QUEUE_EMPTY; _speculative_access_queue_control->pop(); } //================================================================ // Interface "DCACHE_RSP" //================================================================ if ((PORT_READ(in_DCACHE_RSP_VAL[0])== 1) and ( internal_DCACHE_RSP_ACK == 1)) { log_printf(TRACE,Load_store_unit,FUNCTION,"DCACHE_RSP"); // don't use context_id : because there are one queue for all thread //Tcontext_t context_id = PORT_READ(in_DCACHE_RSP_CONTEXT_ID[0]); Tpacket_t packet_id = PORT_READ(in_DCACHE_RSP_PACKET_ID [0]); Tdcache_data_t rdata = PORT_READ(in_DCACHE_RSP_RDATA [0]); Tdcache_error_t error = PORT_READ(in_DCACHE_RSP_ERROR [0]); log_printf(TRACE,Load_store_unit,FUNCTION," * original packet_id : %d", packet_id); if (DCACHE_RSP_IS_LOAD(packet_id) == 1) { packet_id >>= 1; log_printf(TRACE,Load_store_unit,FUNCTION," * packet is a LOAD : %d", packet_id); #ifdef DEBUG_TEST if (not have_dcache_rsp(_load_queue [packet_id]._operation)) throw ErrorMorpheo(_("Receive of respons, but the corresponding operation don't wait a respons.")); #endif if (error != DCACHE_ERROR_NONE) { log_printf(TRACE,Load_store_unit,FUNCTION," * have a bus error !!!"); _load_queue [packet_id]._exception = EXCEPTION_MEMORY_BUS_ERROR; _load_queue [packet_id]._state = LOAD_QUEUE_COMMIT; } else { log_printf(TRACE,Load_store_unit,FUNCTION," * have no bus error."); log_printf(TRACE,Load_store_unit,FUNCTION," * previous state : %d.",_load_queue [packet_id]._state); // FIXME : convention : if bus error, the cache return the fautive address ! // But, the load's address is aligned ! _load_queue [packet_id]._rdata = rdata; switch (_load_queue [packet_id]._state) { case LOAD_QUEUE_WAIT_CHECK : _load_queue [packet_id]._state = LOAD_QUEUE_COMMIT_CHECK; break; case LOAD_QUEUE_WAIT : _load_queue [packet_id]._state = LOAD_QUEUE_COMMIT ; break; default : throw ErrorMorpheo(_("Illegal state (dcache_rsp).")); break; } } } else { log_printf(TRACE,Load_store_unit,FUNCTION," * packet is a STORE"); // TODO : les stores ne génére pas de réponse sauf quand c'est un bus error !!! throw ERRORMORPHEO(FUNCTION,_("dcache_rsp : no respons to a write. (TODO : manage bus error to the store operation.)")); } } // this register is to manage the priority of check -> Round robin reg_LOAD_QUEUE_CHECK_PRIORITY = (reg_LOAD_QUEUE_CHECK_PRIORITY+1)%_param->_size_load_queue; #if defined(DEBUG) and (DEBUG>=DEBUG_TRACE) // ***** dump store queue std::cout << "Dump STORE_QUEUE :" << std::endl << "ptr_read : " << toString(static_cast(reg_STORE_QUEUE_PTR_READ)) << std::endl; for (uint32_t i=0; i<_param->_size_store_queue; i++) { uint32_t j = (reg_STORE_QUEUE_PTR_READ+i)%_param->_size_store_queue; std::cout << "{" << j << "}" << std::endl << _store_queue[j] << std::endl; } // ***** dump speculative_access queue std::cout << "Dump SPECULATIVE_ACCESS_QUEUE :" << std::endl; for (uint32_t i=0; i<_param->_size_speculative_access_queue; i++) { uint32_t j = (*_speculative_access_queue_control)[i]; std::cout << "{" << j << "}" << std::endl << _speculative_access_queue[j] << std::endl; } // ***** dump load queue std::cout << "Dump LOAD_QUEUE :" << std::endl << "ptr_read_check_priority : " << toString(static_cast(reg_LOAD_QUEUE_CHECK_PRIORITY)) << std::endl; for (uint32_t i=0; i<_param->_size_load_queue; i++) { uint32_t j = i; std::cout << "{" << j << "}" << std::endl << _load_queue[j] << std::endl; } #endif #ifdef STATISTICS if (usage_is_set(_usage,USE_STATISTICS)) { for (uint32_t i=0; i<_param->_size_store_queue; i++) if (_store_queue[i]._state != STORE_QUEUE_EMPTY) (*_stat_use_store_queue) ++; for (uint32_t i=0; i<_param->_size_speculative_access_queue; i++) if (_speculative_access_queue[i]._state != SPECULATIVE_ACCESS_QUEUE_EMPTY) (*_stat_use_speculative_access_queue) ++; for (uint32_t i=0; i<_param->_size_load_queue; i++) if (_load_queue[i]._state != LOAD_QUEUE_EMPTY) (*_stat_use_load_queue) ++; } #endif } log_printf(FUNC,Load_store_unit,FUNCTION,"End"); }; }; // end namespace load_store_unit }; // end namespace execute_unit }; // end namespace multi_execute_unit }; // end namespace execute_loop }; // end namespace multi_execute_loop }; // end namespace core }; // end namespace behavioural }; // end namespace morpheo #endif