[59] | 1 | #ifdef SYSTEMC |
---|
| 2 | /* |
---|
| 3 | * $Id$ |
---|
| 4 | * |
---|
| 5 | * [ Description ] |
---|
| 6 | * |
---|
| 7 | */ |
---|
| 8 | |
---|
| 9 | #include "Behavioural/Core/Multi_Execute_loop/Execute_loop/Multi_Execute_unit/Execute_unit/Load_store_unit/include/Load_store_unit.h" |
---|
| 10 | |
---|
| 11 | namespace morpheo { |
---|
| 12 | namespace behavioural { |
---|
| 13 | namespace core { |
---|
| 14 | namespace multi_execute_loop { |
---|
| 15 | namespace execute_loop { |
---|
| 16 | namespace multi_execute_unit { |
---|
| 17 | namespace execute_unit { |
---|
| 18 | namespace load_store_unit { |
---|
| 19 | |
---|
| 20 | |
---|
| 21 | #undef FUNCTION |
---|
| 22 | #define FUNCTION "Load_store_unit::function_speculative_load_commit_transition" |
---|
| 23 | void Load_store_unit::function_speculative_load_commit_transition (void) |
---|
| 24 | { |
---|
| 25 | log_printf(FUNC,Load_store_unit,FUNCTION,"Begin"); |
---|
| 26 | |
---|
| 27 | if (PORT_READ(in_NRESET) == 0) |
---|
| 28 | { |
---|
| 29 | // Reset : clear all queue |
---|
| 30 | _speculative_access_queue_control->clear(); |
---|
| 31 | |
---|
[71] | 32 | reg_STORE_QUEUE_PTR_READ = 0; |
---|
| 33 | reg_LOAD_QUEUE_CHECK_PRIORITY = 0; |
---|
| 34 | |
---|
[59] | 35 | for (uint32_t i=0; i< _param->_size_store_queue ; i++) |
---|
| 36 | _store_queue [i]._state = STORE_QUEUE_EMPTY; |
---|
| 37 | |
---|
| 38 | for (uint32_t i=0; i< _param->_size_load_queue ; i++) |
---|
| 39 | _load_queue [i]._state = LOAD_QUEUE_EMPTY; |
---|
| 40 | |
---|
| 41 | for (uint32_t i=0; i< _param->_size_speculative_access_queue; i++) |
---|
| 42 | _speculative_access_queue [i]._state = SPECULATIVE_ACCESS_QUEUE_EMPTY; |
---|
| 43 | } |
---|
| 44 | else |
---|
| 45 | { |
---|
| 46 | //================================================================ |
---|
[71] | 47 | // Interface "PORT_CHECK" |
---|
| 48 | //================================================================ |
---|
| 49 | |
---|
| 50 | // Plusieurs moyens de faire la verification de dépendance entre les loads et les stores. |
---|
| 51 | // 1) un load ne peut vérifier qu'un store par cycle. Dans ce cas port_check <= size_load_queue |
---|
| 52 | // 2) un load tente de vérifier le maximum de store par cycle. Dans ce cas ce n'est pas du pointeur d'écriture qu'il lui faut mais un vecteur de bit indiquant quel store à déjà été testé. De plus il faut un bit indiquant qu'il y a un match mais que ce n'est pas forcément le premier. |
---|
| 53 | |
---|
| 54 | // solution 1) |
---|
| 55 | log_printf(TRACE,Load_store_unit,FUNCTION,"CHECK"); |
---|
| 56 | for (uint32_t i=0, nb_check=0; (nb_check<_param->_nb_port_check) and (i<_param->_size_load_queue); i++) |
---|
| 57 | { |
---|
| 58 | uint32_t index_load = (i + reg_LOAD_QUEUE_CHECK_PRIORITY)%_param->_size_load_queue; |
---|
| 59 | |
---|
| 60 | if (((_load_queue[index_load]._state == LOAD_QUEUE_WAIT_CHECK) or |
---|
| 61 | (_load_queue[index_load]._state == LOAD_QUEUE_COMMIT_CHECK) or |
---|
| 62 | (_load_queue[index_load]._state == LOAD_QUEUE_CHECK)) and |
---|
| 63 | is_operation_memory_load(_load_queue[index_load]._operation)) |
---|
| 64 | { |
---|
| 65 | log_printf(TRACE,Load_store_unit,FUNCTION," * Find a load : %d",index_load); |
---|
| 66 | |
---|
| 67 | nb_check++; // use one port |
---|
| 68 | |
---|
| 69 | // find a entry that it need a check |
---|
| 70 | |
---|
| 71 | Tlsq_ptr_t index_store = _load_queue[index_load]._store_queue_ptr_write; |
---|
| 72 | bool end_check = false; |
---|
| 73 | bool change_state = false; |
---|
| 74 | bool next = false; |
---|
| 75 | |
---|
| 76 | // At the first store queue empty, stop check. |
---|
| 77 | // Explication : |
---|
| 78 | // * rename logic keep a empty case in the store queue (also size_store_queue > 1) |
---|
| 79 | // * when a store is out of store queue, also it was in head of re order buffer. Also, they are none previous load. |
---|
| 80 | |
---|
| 81 | log_printf(TRACE,Load_store_unit,FUNCTION," * index_store : %d",index_store); |
---|
| 82 | if (index_store == reg_STORE_QUEUE_PTR_READ) |
---|
| 83 | { |
---|
| 84 | log_printf(TRACE,Load_store_unit,FUNCTION," * index_store == reg_STORE_QUEUE_PTR_READ"); |
---|
| 85 | end_check = true; |
---|
| 86 | change_state = true; |
---|
| 87 | } |
---|
| 88 | else |
---|
| 89 | { |
---|
| 90 | log_printf(TRACE,Load_store_unit,FUNCTION," * index_store != reg_STORE_QUEUE_PTR_READ"); |
---|
| 91 | |
---|
| 92 | index_store = (index_store-1)%(_param->_size_store_queue); // store_queue_ptr_write target the next slot to write, also the slot is not significatif when the load is renaming |
---|
| 93 | |
---|
| 94 | log_printf(TRACE,Load_store_unit,FUNCTION," * index_store : %d",index_store); |
---|
| 95 | |
---|
| 96 | switch (_store_queue[index_store]._state) |
---|
| 97 | { |
---|
| 98 | case STORE_QUEUE_VALID_NO_SPECULATIVE : |
---|
| 99 | case STORE_QUEUE_COMMIT : |
---|
| 100 | case STORE_QUEUE_VALID_SPECULATIVE : |
---|
| 101 | { |
---|
| 102 | |
---|
| 103 | log_printf(TRACE,Load_store_unit,FUNCTION," * store have a valid entry"); |
---|
| 104 | |
---|
| 105 | // TODO : MMU - nous considérons que les adresses sont physique |
---|
| 106 | bool test_thread_id = true; |
---|
| 107 | |
---|
| 108 | // Test thread id. |
---|
| 109 | if (_param->_have_port_context_id) |
---|
| 110 | test_thread_id &= (_load_queue[index_load]._context_id == _store_queue[index_store]._context_id); |
---|
| 111 | if (_param->_have_port_front_end_id) |
---|
| 112 | test_thread_id &= (_load_queue[index_load]._front_end_id == _store_queue[index_store]._front_end_id); |
---|
| 113 | if (_param->_have_port_ooo_engine_id) |
---|
| 114 | test_thread_id &= (_load_queue[index_load]._ooo_engine_id == _store_queue[index_store]._ooo_engine_id); |
---|
| 115 | |
---|
| 116 | if (test_thread_id) |
---|
| 117 | { |
---|
| 118 | log_printf(TRACE,Load_store_unit,FUNCTION," * load and store is the same thread."); |
---|
| 119 | // the load and store are in the same thread. Now, we must test address. |
---|
| 120 | Tdcache_address_t load_addr = _load_queue [index_load ]._address; |
---|
| 121 | Tdcache_address_t store_addr = _store_queue[index_store]._address; |
---|
| 122 | |
---|
| 123 | log_printf(TRACE,Load_store_unit,FUNCTION," * load_addr : %.8x.",load_addr ); |
---|
| 124 | log_printf(TRACE,Load_store_unit,FUNCTION," * store_addr : %.8x.",store_addr); |
---|
| 125 | log_printf(TRACE,Load_store_unit,FUNCTION," * load_addr & mask_address_msb : %.8x.",load_addr & _param->_mask_address_msb); |
---|
| 126 | log_printf(TRACE,Load_store_unit,FUNCTION," * store_addr & mask_address_msb : %.8x.",store_addr & _param->_mask_address_msb); |
---|
| 127 | // Test if the both address target the same word |
---|
| 128 | if ((load_addr & _param->_mask_address_msb) == |
---|
| 129 | (store_addr & _param->_mask_address_msb)) |
---|
| 130 | { |
---|
| 131 | log_printf(TRACE,Load_store_unit,FUNCTION," * address_msb is the same."); |
---|
| 132 | // all case - [] : store, () : load |
---|
| 133 | // (1) store_max >= load_max and store_min <= load_min ...[...(...)...]... Ok - inclusion in store |
---|
| 134 | // (2) store_min > load_max ...[...]...(...)... Ok - no conflit |
---|
| 135 | // (3) store_max < load_min ...(...)...[...]... Ok - no conflit |
---|
| 136 | // (4) store_max < load_max and store_min > load_min ...(...[...]...)... Ko - inclusion in load |
---|
| 137 | // (5) store_max >= load_max and store_min > load_min ...[...(...]...)... Ko - conflit |
---|
| 138 | // (6) store_max < load_max and store_min <= load_min ...(...[...)...]... Ko - conflit |
---|
| 139 | // but : |
---|
| 140 | // load in the cache is a word ! |
---|
| 141 | // the mask can be make when the load is commited. Also, the rdata content a full word. |
---|
| 142 | // the only case is (4) |
---|
| 143 | |
---|
| 144 | Tgeneral_data_t load_data = _load_queue [index_load ]._rdata ; |
---|
| 145 | Tgeneral_data_t store_data = _store_queue[index_store]._wdata ; |
---|
| 146 | |
---|
| 147 | log_printf(TRACE,Load_store_unit,FUNCTION," * load_data (init) : %.8x",load_data); |
---|
| 148 | log_printf(TRACE,Load_store_unit,FUNCTION," * store_data (init) : %.8x",store_data); |
---|
| 149 | uint32_t store_num_byte_min = (store_addr & _param->_mask_address_lsb); |
---|
| 150 | uint32_t store_num_byte_max = store_num_byte_min+(1<<memory_access(_store_queue[index_store]._operation)); |
---|
| 151 | log_printf(TRACE,Load_store_unit,FUNCTION," * store_num_byte_min : %d",store_num_byte_min); |
---|
| 152 | log_printf(TRACE,Load_store_unit,FUNCTION," * store_num_byte_max : %d",store_num_byte_max); |
---|
| 153 | log_printf(TRACE,Load_store_unit,FUNCTION," * check_hit : %x",_load_queue[index_load]._check_hit); |
---|
| 154 | log_printf(TRACE,Load_store_unit,FUNCTION," * check_hit_byte : %x",_load_queue[index_load]._check_hit_byte); |
---|
| 155 | // The bypass is checked byte per byte |
---|
| 156 | for (uint32_t byte=store_num_byte_min; byte<store_num_byte_max; byte ++) |
---|
| 157 | { |
---|
| 158 | uint32_t mask = 1<<byte; |
---|
| 159 | uint32_t index = byte<<3; |
---|
| 160 | log_printf(TRACE,Load_store_unit,FUNCTION," * byte : %d",byte); |
---|
| 161 | log_printf(TRACE,Load_store_unit,FUNCTION," * mask : %d",mask); |
---|
| 162 | log_printf(TRACE,Load_store_unit,FUNCTION," * index : %d",index); |
---|
| 163 | // Accept the bypass if they had not a previous bypass with an another store |
---|
| 164 | if ((_load_queue[index_load]._check_hit_byte&mask)==0) |
---|
| 165 | { |
---|
| 166 | log_printf(TRACE,Load_store_unit,FUNCTION," * bypass !!!"); |
---|
| 167 | log_printf(TRACE,Load_store_unit,FUNCTION," * rdata_old : %.8x", load_data); |
---|
| 168 | load_data = insert<Tdcache_data_t>(load_data, store_data, index+8-1, index); |
---|
| 169 | _load_queue[index_load]._check_hit_byte |= mask; |
---|
| 170 | _load_queue[index_load]._check_hit = 1; |
---|
| 171 | change_state = true; |
---|
| 172 | |
---|
| 173 | log_printf(TRACE,Load_store_unit,FUNCTION," * rdata_new : %.8x", load_data); |
---|
| 174 | } |
---|
| 175 | } |
---|
| 176 | |
---|
| 177 | _load_queue[index_load]._rdata = load_data; |
---|
| 178 | |
---|
| 179 | log_printf(TRACE,Load_store_unit,FUNCTION," * check_hit : %x",_load_queue[index_load]._check_hit); |
---|
| 180 | log_printf(TRACE,Load_store_unit,FUNCTION," * check_hit_byte : %x",_load_queue[index_load]._check_hit_byte); |
---|
| 181 | |
---|
| 182 | log_printf(TRACE,Load_store_unit,FUNCTION," * mask_end_check : %x",(-1& _param->_mask_address_lsb)); |
---|
| 183 | // The check is finish if all bit is set |
---|
| 184 | end_check = (_load_queue[index_load]._check_hit_byte == MASK_CHECK_BYTE_HIT); |
---|
| 185 | } |
---|
| 186 | } |
---|
| 187 | |
---|
| 188 | next = true; |
---|
| 189 | break; |
---|
| 190 | } |
---|
| 191 | case STORE_QUEUE_EMPTY : |
---|
| 192 | case STORE_QUEUE_NO_VALID_NO_SPECULATIVE : |
---|
| 193 | { |
---|
| 194 | log_printf(TRACE,Load_store_unit,FUNCTION," * store have an invalid entry"); |
---|
| 195 | break; |
---|
| 196 | } |
---|
| 197 | } |
---|
| 198 | } |
---|
| 199 | |
---|
| 200 | if (next) |
---|
| 201 | { |
---|
| 202 | log_printf(TRACE,Load_store_unit,FUNCTION," * next"); |
---|
| 203 | // if (_load_queue[index_load]._store_queue_ptr_write == 0) |
---|
| 204 | // _load_queue[index_load]._store_queue_ptr_write = _param->_size_store_queue-1; |
---|
| 205 | // else |
---|
| 206 | // _load_queue[index_load]._store_queue_ptr_write --; |
---|
| 207 | _load_queue[index_load]._store_queue_ptr_write = index_store; // because the index store have be decrease |
---|
| 208 | |
---|
| 209 | // FIXME : peut n'est pas obliger de faire cette comparaison. Au prochain cycle on le détectera que les pointeur sont égaux. Ceci évitera d'avoir deux comparateurs avec le registre "reg_STORE_QUEUE_PTR_READ" |
---|
| 210 | if (index_store == reg_STORE_QUEUE_PTR_READ) |
---|
| 211 | { |
---|
| 212 | end_check = true; |
---|
| 213 | change_state = true; |
---|
| 214 | } |
---|
| 215 | } |
---|
| 216 | |
---|
| 217 | if (change_state) |
---|
| 218 | { |
---|
| 219 | log_printf(TRACE,Load_store_unit,FUNCTION," * change_state"); |
---|
| 220 | |
---|
| 221 | switch (_load_queue[index_load]._state) |
---|
| 222 | { |
---|
| 223 | case LOAD_QUEUE_WAIT_CHECK : _load_queue[index_load]._state = LOAD_QUEUE_WAIT ; break; |
---|
| 224 | case LOAD_QUEUE_COMMIT_CHECK : |
---|
| 225 | { |
---|
| 226 | if (end_check) |
---|
| 227 | _load_queue[index_load]._state = LOAD_QUEUE_COMMIT; |
---|
| 228 | else |
---|
| 229 | _load_queue[index_load]._state = LOAD_QUEUE_CHECK; |
---|
| 230 | break; |
---|
| 231 | } |
---|
| 232 | case LOAD_QUEUE_CHECK : |
---|
| 233 | { |
---|
| 234 | if (end_check) |
---|
| 235 | _load_queue[index_load]._state = LOAD_QUEUE_COMMIT; |
---|
| 236 | // check find a bypass. A speculative load have been committed : report a speculation miss. |
---|
| 237 | if (_load_queue[index_load]._check_hit != 0) |
---|
| 238 | { |
---|
| 239 | _load_queue[index_load]._exception = EXCEPTION_MEMORY_MISS_SPECULATION; |
---|
| 240 | _load_queue[index_load]._write_rd = 1; // write the good result |
---|
| 241 | } |
---|
| 242 | |
---|
| 243 | break; |
---|
| 244 | } |
---|
| 245 | default : break; |
---|
| 246 | } |
---|
| 247 | log_printf(TRACE,Load_store_unit,FUNCTION," * new state : %d",_load_queue[index_load]._state); |
---|
| 248 | log_printf(TRACE,Load_store_unit,FUNCTION," * exception : %d",_load_queue[index_load]._exception); |
---|
| 249 | } |
---|
| 250 | } |
---|
| 251 | // else : don't use a port |
---|
| 252 | } |
---|
| 253 | |
---|
| 254 | //================================================================ |
---|
[59] | 255 | // Interface "MEMORY_IN" |
---|
| 256 | //================================================================ |
---|
| 257 | |
---|
| 258 | if ((PORT_READ(in_MEMORY_IN_VAL) == 1) and |
---|
| 259 | ( internal_MEMORY_IN_ACK == 1)) |
---|
| 260 | { |
---|
| 261 | // Test operation : |
---|
| 262 | //~~~~~~~~~~~~~~~~~ |
---|
| 263 | // store in store_queue |
---|
| 264 | // load in speculation_access_queue |
---|
| 265 | // others in speculation_access_queue |
---|
| 266 | |
---|
[62] | 267 | Toperation_t operation = PORT_READ(in_MEMORY_IN_OPERATION); |
---|
| 268 | Tgeneral_data_t address = (PORT_READ(in_MEMORY_IN_IMMEDIAT) + |
---|
| 269 | PORT_READ(in_MEMORY_IN_DATA_RA )); |
---|
| 270 | bool exception_alignement = (mask_memory_access(operation) & address) != 0; |
---|
[59] | 271 | |
---|
| 272 | if (is_operation_memory_store(operation) == true) |
---|
| 273 | { |
---|
| 274 | // ======================= |
---|
| 275 | // ===== STORE_QUEUE ===== |
---|
| 276 | // ======================= |
---|
| 277 | // There a two store request type : |
---|
| 278 | // - first is operation with address and data |
---|
| 279 | // - second is the information of re order buffer : the store become not speculative and can access at the data cache |
---|
| 280 | |
---|
| 281 | log_printf(TRACE,Load_store_unit,FUNCTION,"store_queue"); |
---|
| 282 | log_printf(TRACE,Load_store_unit,FUNCTION," * PUSH"); |
---|
| 283 | |
---|
| 284 | // Write pointer is define in rename stage : |
---|
| 285 | Tlsq_ptr_t index = PORT_READ(in_MEMORY_IN_STORE_QUEUE_PTR_WRITE); |
---|
| 286 | log_printf(TRACE,Load_store_unit,FUNCTION," * index : %d",index); |
---|
| 287 | |
---|
| 288 | // Need read : state and exception. |
---|
| 289 | Tstore_queue_state_t old_state = _store_queue [index]._state; |
---|
| 290 | Tstore_queue_state_t new_state = old_state; |
---|
| 291 | bool update_info = false; |
---|
| 292 | |
---|
| 293 | Texception_t old_exception = _store_queue [index]._exception; |
---|
| 294 | Texception_t new_exception = old_exception; |
---|
| 295 | |
---|
| 296 | // Compute next state |
---|
| 297 | switch (old_state) |
---|
| 298 | { |
---|
| 299 | case STORE_QUEUE_EMPTY : |
---|
| 300 | { |
---|
| 301 | if (is_operation_memory_store_head(operation) == true) |
---|
| 302 | { |
---|
| 303 | new_state = STORE_QUEUE_NO_VALID_NO_SPECULATIVE; |
---|
| 304 | |
---|
| 305 | // test if is a speculation |
---|
| 306 | if (operation == OPERATION_MEMORY_STORE_HEAD_KO) |
---|
| 307 | new_exception = EXCEPTION_MEMORY_MISS_SPECULATION; |
---|
| 308 | else |
---|
| 309 | new_exception = EXCEPTION_MEMORY_NONE; |
---|
| 310 | } |
---|
| 311 | else |
---|
| 312 | { |
---|
| 313 | new_state = STORE_QUEUE_VALID_SPECULATIVE; |
---|
| 314 | |
---|
| 315 | // Test if have an exception |
---|
| 316 | if (exception_alignement == true) |
---|
| 317 | new_exception = EXCEPTION_MEMORY_ALIGNMENT; |
---|
| 318 | else |
---|
| 319 | new_exception = EXCEPTION_MEMORY_NONE; |
---|
| 320 | |
---|
| 321 | update_info = true; |
---|
| 322 | } |
---|
| 323 | break; |
---|
| 324 | } |
---|
| 325 | case STORE_QUEUE_NO_VALID_NO_SPECULATIVE : |
---|
| 326 | { |
---|
[71] | 327 | #ifdef DEBUG_TEST |
---|
| 328 | if (is_operation_memory_store_head(operation) == true) |
---|
| 329 | throw ErrorMorpheo(_("Transaction in memory_in's interface, actual state of store_queue is \"STORE_QUEUE_NO_VALID_NO_SPECULATIVE\", also a previous store_head have been receiveid. But this operation is a store_head.")); |
---|
| 330 | #endif |
---|
| 331 | // Test if have a new exception (priority : miss_speculation) |
---|
| 332 | if ((exception_alignement == true) and (old_exception == EXCEPTION_MEMORY_NONE)) |
---|
| 333 | new_exception = EXCEPTION_MEMORY_ALIGNMENT; |
---|
| 334 | |
---|
| 335 | if (new_exception != EXCEPTION_MEMORY_NONE) |
---|
| 336 | new_state = STORE_QUEUE_COMMIT; |
---|
| 337 | else |
---|
| 338 | new_state = STORE_QUEUE_VALID_NO_SPECULATIVE; |
---|
| 339 | |
---|
| 340 | update_info = true; |
---|
| 341 | break; |
---|
[59] | 342 | } |
---|
| 343 | case STORE_QUEUE_VALID_SPECULATIVE : |
---|
| 344 | { |
---|
[71] | 345 | #ifdef DEBUG_TEST |
---|
| 346 | if (is_operation_memory_store_head(operation) == false) |
---|
| 347 | throw ErrorMorpheo(_("Transaction in memory_in's interface, actual state of store_queue is \"STORE_QUEUE_VALID_SPECULATIVE\", also a previous access with register and address have been receiveid. But this operation is a not store_head.")); |
---|
| 348 | #endif |
---|
| 349 | if (operation == OPERATION_MEMORY_STORE_HEAD_KO) |
---|
| 350 | new_exception = EXCEPTION_MEMORY_MISS_SPECULATION; // great prioritary |
---|
| 351 | |
---|
| 352 | if (new_exception != EXCEPTION_MEMORY_NONE) |
---|
| 353 | new_state = STORE_QUEUE_COMMIT; |
---|
| 354 | else |
---|
| 355 | new_state = STORE_QUEUE_VALID_NO_SPECULATIVE; |
---|
| 356 | |
---|
| 357 | break; |
---|
[59] | 358 | } |
---|
| 359 | case STORE_QUEUE_VALID_NO_SPECULATIVE : |
---|
| 360 | case STORE_QUEUE_COMMIT : |
---|
| 361 | { |
---|
[71] | 362 | throw ErrorMorpheo("<Load_store_unit::function_speculative_load_commit_transition> Invalid state and operation"); |
---|
[59] | 363 | } |
---|
| 364 | } |
---|
| 365 | |
---|
| 366 | _store_queue [index]._state = new_state; |
---|
| 367 | _store_queue [index]._exception = new_exception; |
---|
| 368 | |
---|
| 369 | if (update_info == true) |
---|
| 370 | { |
---|
| 371 | log_printf(TRACE,Load_store_unit,FUNCTION," * Update information"); |
---|
| 372 | |
---|
[71] | 373 | _store_queue [index]._context_id = (not _param->_have_port_context_id )?0:PORT_READ(in_MEMORY_IN_CONTEXT_ID); |
---|
| 374 | _store_queue [index]._front_end_id = (not _param->_have_port_front_end_id )?0:PORT_READ(in_MEMORY_IN_FRONT_END_ID); |
---|
| 375 | _store_queue [index]._ooo_engine_id = (not _param->_have_port_ooo_engine_id)?0:PORT_READ(in_MEMORY_IN_OOO_ENGINE_ID); |
---|
| 376 | _store_queue [index]._packet_id = (not _param->_have_port_packet_id )?0:PORT_READ(in_MEMORY_IN_PACKET_ID ); |
---|
| 377 | _store_queue [index]._operation = operation; |
---|
[59] | 378 | _store_queue [index]._load_queue_ptr_write = PORT_READ(in_MEMORY_IN_LOAD_QUEUE_PTR_WRITE); |
---|
| 379 | _store_queue [index]._address = address; |
---|
[71] | 380 | |
---|
| 381 | // reordering data |
---|
| 382 | _store_queue [index]._wdata = duplicate<Tgeneral_data_t>(_param->_size_general_data,PORT_READ(in_MEMORY_IN_DATA_RB), memory_size(operation), 0); |
---|
[59] | 383 | // _store_queue [index]._num_reg_rd = PORT_READ(in_MEMORY_IN_NUM_REG_RD ); |
---|
| 384 | } |
---|
| 385 | } |
---|
| 386 | else |
---|
| 387 | { |
---|
[71] | 388 | // ==================================== |
---|
| 389 | // ===== SPECULATIVE_ACCESS_QUEUE ===== |
---|
| 390 | // ==================================== |
---|
[59] | 391 | |
---|
[71] | 392 | // In speculative access queue, they are many type's request |
---|
| 393 | log_printf(TRACE,Load_store_unit,FUNCTION,"speculative_access_queue"); |
---|
| 394 | log_printf(TRACE,Load_store_unit,FUNCTION," * PUSH"); |
---|
[59] | 395 | |
---|
[71] | 396 | // Write in reservation station |
---|
| 397 | uint32_t index = _speculative_access_queue_control->push(); |
---|
| 398 | |
---|
| 399 | log_printf(TRACE,Load_store_unit,FUNCTION," * index : %d", index); |
---|
| 400 | |
---|
| 401 | Texception_t exception; |
---|
| 402 | |
---|
| 403 | if (exception_alignement == true) |
---|
| 404 | exception = EXCEPTION_MEMORY_ALIGNMENT; |
---|
| 405 | else |
---|
| 406 | exception = EXCEPTION_MEMORY_NONE; |
---|
| 407 | |
---|
| 408 | // if exception, don't access at the cache |
---|
| 409 | // NOTE : type "other" (lock, invalidate, flush and sync) can't make an alignement exception (access is equivalent at a 8 bits) |
---|
| 410 | _speculative_access_queue [index]._state = (exception == EXCEPTION_MEMORY_NONE)?SPECULATIVE_ACCESS_QUEUE_WAIT_CACHE:SPECULATIVE_ACCESS_QUEUE_WAIT_LOAD_QUEUE; |
---|
| 411 | _speculative_access_queue [index]._context_id = (not _param->_have_port_context_id )?0:PORT_READ(in_MEMORY_IN_CONTEXT_ID); |
---|
| 412 | _speculative_access_queue [index]._front_end_id = (not _param->_have_port_front_end_id )?0:PORT_READ(in_MEMORY_IN_FRONT_END_ID); |
---|
| 413 | _speculative_access_queue [index]._ooo_engine_id = (not _param->_have_port_ooo_engine_id)?0:PORT_READ(in_MEMORY_IN_OOO_ENGINE_ID); |
---|
| 414 | _speculative_access_queue [index]._packet_id = (not _param->_have_port_packet_id )?0:PORT_READ(in_MEMORY_IN_PACKET_ID); |
---|
| 415 | |
---|
| 416 | _speculative_access_queue [index]._operation = operation; |
---|
| 417 | _speculative_access_queue [index]._load_queue_ptr_write = PORT_READ(in_MEMORY_IN_LOAD_QUEUE_PTR_WRITE); |
---|
| 418 | _speculative_access_queue [index]._store_queue_ptr_write= PORT_READ(in_MEMORY_IN_STORE_QUEUE_PTR_WRITE); |
---|
| 419 | _speculative_access_queue [index]._address = address; |
---|
| 420 | // NOTE : is operation is a load, then they are a result and must write in the register file |
---|
| 421 | _speculative_access_queue [index]._write_rd = is_operation_memory_load(operation); |
---|
| 422 | _speculative_access_queue [index]._num_reg_rd = PORT_READ(in_MEMORY_IN_NUM_REG_RD ); |
---|
| 423 | |
---|
| 424 | _speculative_access_queue [index]._exception = exception; |
---|
[59] | 425 | |
---|
[71] | 426 | log_printf(TRACE,Load_store_unit,FUNCTION," * index : %d",index); |
---|
[59] | 427 | } |
---|
| 428 | } |
---|
| 429 | |
---|
| 430 | //================================================================ |
---|
| 431 | // Interface "MEMORY_OUT" |
---|
| 432 | //================================================================ |
---|
| 433 | |
---|
| 434 | if (( internal_MEMORY_OUT_VAL == 1) and |
---|
| 435 | (PORT_READ(in_MEMORY_OUT_ACK) == 1)) |
---|
| 436 | { |
---|
[71] | 437 | log_printf(TRACE,Load_store_unit,FUNCTION,"MEMORY_OUT transaction"); |
---|
| 438 | |
---|
[59] | 439 | switch (internal_MEMORY_OUT_SELECT_QUEUE) |
---|
| 440 | { |
---|
| 441 | case SELECT_STORE_QUEUE : |
---|
| 442 | { |
---|
| 443 | // ======================= |
---|
| 444 | // ===== STORE_QUEUE ===== |
---|
| 445 | // ======================= |
---|
| 446 | |
---|
[71] | 447 | log_printf(TRACE,Load_store_unit,FUNCTION," * store_queue [%d]",reg_STORE_QUEUE_PTR_READ); |
---|
| 448 | |
---|
[59] | 449 | // Entry flush and increase the read pointer |
---|
[71] | 450 | _store_queue [reg_STORE_QUEUE_PTR_READ]._state = STORE_QUEUE_EMPTY; |
---|
[59] | 451 | |
---|
[71] | 452 | reg_STORE_QUEUE_PTR_READ = (reg_STORE_QUEUE_PTR_READ+1)%_param->_size_store_queue; |
---|
| 453 | |
---|
| 454 | break; |
---|
| 455 | } |
---|
| 456 | case SELECT_LOAD_QUEUE : |
---|
| 457 | { |
---|
| 458 | // ====================== |
---|
| 459 | // ===== LOAD_QUEUE ===== |
---|
| 460 | // ====================== |
---|
[59] | 461 | |
---|
[71] | 462 | log_printf(TRACE,Load_store_unit,FUNCTION," * load_queue [%d]",internal_MEMORY_OUT_PTR); |
---|
| 463 | |
---|
| 464 | // Entry flush and increase the read pointer |
---|
| 465 | |
---|
| 466 | _load_queue [internal_MEMORY_OUT_PTR]._state = LOAD_QUEUE_EMPTY; |
---|
| 467 | |
---|
| 468 | // reg_LOAD_QUEUE_PTR_READ = (reg_LOAD_QUEUE_PTR_READ+1)%_param->_size_load_queue; |
---|
[59] | 469 | |
---|
| 470 | break; |
---|
| 471 | } |
---|
| 472 | case SELECT_LOAD_QUEUE_SPECULATIVE : |
---|
[71] | 473 | { |
---|
| 474 | log_printf(TRACE,Load_store_unit,FUNCTION," * load_queue [%d] (speculative)",internal_MEMORY_OUT_PTR); |
---|
| 475 | |
---|
| 476 | _load_queue [internal_MEMORY_OUT_PTR]._state = LOAD_QUEUE_CHECK; |
---|
| 477 | // NOTE : a speculative load write in the register file. |
---|
| 478 | // if the speculation is a miss, write_rd is re set at 1. |
---|
| 479 | _load_queue [internal_MEMORY_OUT_PTR]._write_rd = 0; |
---|
| 480 | break; |
---|
| 481 | } |
---|
| 482 | |
---|
[59] | 483 | break; |
---|
| 484 | } |
---|
| 485 | } |
---|
[62] | 486 | |
---|
| 487 | //================================================================ |
---|
| 488 | // Interface "DCACHE_REQ" |
---|
| 489 | //================================================================ |
---|
[71] | 490 | bool load_queue_push = (_speculative_access_queue [internal_SPECULATIVE_ACCESS_QUEUE_PTR_READ]._state == SPECULATIVE_ACCESS_QUEUE_WAIT_LOAD_QUEUE); |
---|
| 491 | |
---|
[62] | 492 | if (( internal_DCACHE_REQ_VAL == 1) and |
---|
| 493 | (PORT_READ(in_DCACHE_REQ_ACK) == 1)) |
---|
| 494 | { |
---|
[71] | 495 | log_printf(TRACE,Load_store_unit,FUNCTION,"DCACHE_REQ"); |
---|
| 496 | |
---|
[62] | 497 | switch (internal_DCACHE_REQ_SELECT_QUEUE) |
---|
| 498 | { |
---|
| 499 | case SELECT_STORE_QUEUE : |
---|
| 500 | { |
---|
| 501 | // ======================= |
---|
| 502 | // ===== STORE_QUEUE ===== |
---|
| 503 | // ======================= |
---|
| 504 | |
---|
| 505 | // Entry flush and increase the read pointer |
---|
| 506 | |
---|
[71] | 507 | _store_queue [reg_STORE_QUEUE_PTR_READ]._state = STORE_QUEUE_COMMIT; |
---|
[62] | 508 | |
---|
| 509 | break; |
---|
| 510 | } |
---|
[71] | 511 | case SELECT_LOAD_QUEUE_SPECULATIVE : |
---|
| 512 | { |
---|
| 513 | // ========================================= |
---|
| 514 | // ===== SELECT_LOAD_QUEUE_SPECULATIVE ===== |
---|
| 515 | // ========================================= |
---|
| 516 | |
---|
| 517 | load_queue_push = true; |
---|
| 518 | break; |
---|
| 519 | } |
---|
[62] | 520 | case SELECT_LOAD_QUEUE : |
---|
[71] | 521 | { |
---|
| 522 | throw ErrorMorpheo(_("Invalid selection")); |
---|
| 523 | break; |
---|
| 524 | } |
---|
| 525 | |
---|
[62] | 526 | break; |
---|
| 527 | } |
---|
| 528 | } |
---|
| 529 | |
---|
[71] | 530 | if (load_queue_push) |
---|
| 531 | { |
---|
| 532 | Tlsq_ptr_t ptr_write = _speculative_access_queue[internal_SPECULATIVE_ACCESS_QUEUE_PTR_READ]._load_queue_ptr_write; |
---|
| 533 | Toperation_t operation = _speculative_access_queue[internal_SPECULATIVE_ACCESS_QUEUE_PTR_READ]._operation; |
---|
| 534 | Texception_t exception = _speculative_access_queue[internal_SPECULATIVE_ACCESS_QUEUE_PTR_READ]._exception; |
---|
| 535 | bool have_exception = (exception != EXCEPTION_MEMORY_NONE); |
---|
| 536 | |
---|
| 537 | |
---|
| 538 | if (have_exception) |
---|
| 539 | _load_queue [ptr_write]._state = LOAD_QUEUE_COMMIT; |
---|
| 540 | else |
---|
| 541 | { |
---|
| 542 | if (have_dcache_rsp(operation)) |
---|
| 543 | { |
---|
| 544 | // load and synchronisation |
---|
| 545 | if (must_check(operation)) |
---|
| 546 | { |
---|
| 547 | // load |
---|
| 548 | _load_queue [ptr_write]._state = LOAD_QUEUE_WAIT_CHECK; |
---|
| 549 | } |
---|
| 550 | else |
---|
| 551 | { |
---|
| 552 | // synchronisation |
---|
| 553 | _load_queue [ptr_write]._state = LOAD_QUEUE_WAIT; |
---|
| 554 | } |
---|
| 555 | } |
---|
| 556 | else |
---|
| 557 | { |
---|
| 558 | // lock, prefecth, flush and invalidate |
---|
| 559 | _load_queue [ptr_write]._state = LOAD_QUEUE_COMMIT; |
---|
| 560 | } |
---|
| 561 | } |
---|
| 562 | |
---|
| 563 | Tdcache_address_t address = _speculative_access_queue [internal_SPECULATIVE_ACCESS_QUEUE_PTR_READ]._address; |
---|
| 564 | Tdcache_address_t address_lsb = (address & _param->_mask_address_lsb); |
---|
| 565 | Tdcache_address_t check_hit_byte = gen_mask_not<Tdcache_address_t>(address_lsb+memory_access(operation)+1,address_lsb); |
---|
| 566 | _load_queue [ptr_write]._context_id = _speculative_access_queue [internal_SPECULATIVE_ACCESS_QUEUE_PTR_READ]._context_id ; |
---|
| 567 | _load_queue [ptr_write]._front_end_id = _speculative_access_queue [internal_SPECULATIVE_ACCESS_QUEUE_PTR_READ]._front_end_id ; |
---|
| 568 | _load_queue [ptr_write]._ooo_engine_id = _speculative_access_queue [internal_SPECULATIVE_ACCESS_QUEUE_PTR_READ]._ooo_engine_id ; |
---|
| 569 | _load_queue [ptr_write]._packet_id = _speculative_access_queue [internal_SPECULATIVE_ACCESS_QUEUE_PTR_READ]._packet_id ; |
---|
| 570 | _load_queue [ptr_write]._operation = operation; |
---|
| 571 | _load_queue [ptr_write]._store_queue_ptr_write = _speculative_access_queue [internal_SPECULATIVE_ACCESS_QUEUE_PTR_READ]._store_queue_ptr_write; |
---|
| 572 | _load_queue [ptr_write]._address = address; |
---|
| 573 | _load_queue [ptr_write]._check_hit_byte = check_hit_byte; |
---|
| 574 | _load_queue [ptr_write]._check_hit = 0; |
---|
| 575 | _load_queue [ptr_write]._shift = address<<3; |
---|
| 576 | _load_queue [ptr_write]._is_load_signed = is_operation_memory_load_signed(operation); |
---|
| 577 | _load_queue [ptr_write]._access_size = memory_size(operation); |
---|
| 578 | // NOTE : if have an exception, must write in register, because a depend instruction wait the load data. |
---|
| 579 | _load_queue [ptr_write]._write_rd = _speculative_access_queue [internal_SPECULATIVE_ACCESS_QUEUE_PTR_READ]._write_rd ; |
---|
| 580 | |
---|
| 581 | _load_queue [ptr_write]._num_reg_rd = _speculative_access_queue [internal_SPECULATIVE_ACCESS_QUEUE_PTR_READ]._num_reg_rd ; |
---|
| 582 | _load_queue [ptr_write]._exception = exception; |
---|
| 583 | _load_queue [ptr_write]._rdata = address; // to the exception |
---|
| 584 | |
---|
| 585 | log_printf(TRACE,Load_store_unit,FUNCTION," * speculative_access_queue"); |
---|
| 586 | log_printf(TRACE,Load_store_unit,FUNCTION," * POP[%d]",(*_speculative_access_queue_control)[0]); |
---|
| 587 | |
---|
| 588 | _speculative_access_queue [(*_speculative_access_queue_control)[0]]._state = SPECULATIVE_ACCESS_QUEUE_EMPTY; |
---|
| 589 | |
---|
| 590 | _speculative_access_queue_control->pop(); |
---|
| 591 | } |
---|
| 592 | |
---|
| 593 | //================================================================ |
---|
| 594 | // Interface "DCACHE_RSP" |
---|
| 595 | //================================================================ |
---|
| 596 | if ((PORT_READ(in_DCACHE_RSP_VAL)== 1) and |
---|
| 597 | ( internal_DCACHE_RSP_ACK == 1)) |
---|
| 598 | { |
---|
| 599 | log_printf(TRACE,Load_store_unit,FUNCTION,"DCACHE_RSP"); |
---|
| 600 | |
---|
| 601 | // don't use context_id : because there are one queue for all thread |
---|
| 602 | //Tcontext_t context_id = PORT_READ(in_DCACHE_RSP_CONTEXT_ID); |
---|
| 603 | Tpacket_t packet_id = PORT_READ(in_DCACHE_RSP_PACKET_ID ); |
---|
| 604 | Tdcache_data_t rdata = PORT_READ(in_DCACHE_RSP_RDATA ); |
---|
| 605 | Tdcache_error_t error = PORT_READ(in_DCACHE_RSP_ERROR ); |
---|
| 606 | |
---|
| 607 | log_printf(TRACE,Load_store_unit,FUNCTION," * original packet_id : %d", packet_id); |
---|
| 608 | |
---|
| 609 | if (DCACHE_RSP_IS_LOAD(packet_id) == 1) |
---|
| 610 | { |
---|
| 611 | packet_id >>= 1; |
---|
| 612 | |
---|
| 613 | log_printf(TRACE,Load_store_unit,FUNCTION," * packet is a LOAD : %d", packet_id); |
---|
| 614 | |
---|
| 615 | |
---|
| 616 | #ifdef DEBUG_TEST |
---|
| 617 | if (not have_dcache_rsp(_load_queue [packet_id]._operation)) |
---|
| 618 | throw ErrorMorpheo(_("Receive of respons, but the corresponding operation don't wait a respons.")); |
---|
| 619 | #endif |
---|
| 620 | |
---|
| 621 | |
---|
[72] | 622 | if (error != DCACHE_ERROR_NONE) |
---|
[71] | 623 | { |
---|
| 624 | log_printf(TRACE,Load_store_unit,FUNCTION," * have a bus error !!!"); |
---|
| 625 | |
---|
| 626 | _load_queue [packet_id]._exception = EXCEPTION_MEMORY_BUS_ERROR; |
---|
| 627 | _load_queue [packet_id]._state = LOAD_QUEUE_COMMIT; |
---|
| 628 | } |
---|
| 629 | else |
---|
| 630 | { |
---|
| 631 | log_printf(TRACE,Load_store_unit,FUNCTION," * have no bus error."); |
---|
| 632 | log_printf(TRACE,Load_store_unit,FUNCTION," * previous state : %d.",_load_queue [packet_id]._state); |
---|
| 633 | |
---|
| 634 | // FIXME : convention : if bus error, the cache return the fautive address ! |
---|
| 635 | // But, the load's address is aligned ! |
---|
| 636 | _load_queue [packet_id]._rdata = rdata; |
---|
| 637 | |
---|
| 638 | switch (_load_queue [packet_id]._state) |
---|
| 639 | { |
---|
| 640 | case LOAD_QUEUE_WAIT_CHECK : _load_queue [packet_id]._state = LOAD_QUEUE_COMMIT_CHECK; break; |
---|
| 641 | case LOAD_QUEUE_WAIT : _load_queue [packet_id]._state = LOAD_QUEUE_COMMIT ; break; |
---|
| 642 | default : throw ErrorMorpheo(_("Illegal state (dcache_rsp).")); break; |
---|
| 643 | } |
---|
| 644 | } |
---|
| 645 | } |
---|
| 646 | else |
---|
| 647 | { |
---|
| 648 | log_printf(TRACE,Load_store_unit,FUNCTION," * packet is a STORE"); |
---|
| 649 | |
---|
| 650 | // TODO : les stores ne génére pas de réponse sauf quand c'est un bus error !!! |
---|
| 651 | throw ERRORMORPHEO(FUNCTION,_("dcache_rsp : no respons to a write. (TODO : manage bus error to the store operation.)")); |
---|
| 652 | } |
---|
| 653 | |
---|
| 654 | } |
---|
| 655 | |
---|
| 656 | // this register is to manage the priority of check -> Round robin |
---|
| 657 | reg_LOAD_QUEUE_CHECK_PRIORITY = (reg_LOAD_QUEUE_CHECK_PRIORITY+1)%_param->_size_load_queue; |
---|
| 658 | |
---|
| 659 | |
---|
[62] | 660 | #if DEBUG>=DEBUG_TRACE |
---|
| 661 | // ***** dump store queue |
---|
[75] | 662 | std::cout << "Dump STORE_QUEUE :" << std::endl |
---|
| 663 | << "ptr_read : " << toString(static_cast<uint32_t>(reg_STORE_QUEUE_PTR_READ)) << std::endl; |
---|
[62] | 664 | |
---|
| 665 | for (uint32_t i=0; i<_param->_size_store_queue; i++) |
---|
| 666 | { |
---|
[71] | 667 | uint32_t j = (reg_STORE_QUEUE_PTR_READ+i)%_param->_size_store_queue; |
---|
[75] | 668 | std::cout << "{" << j << "}" << std::endl |
---|
| 669 | << _store_queue[j] << std::endl; |
---|
[62] | 670 | } |
---|
[71] | 671 | |
---|
| 672 | // ***** dump speculative_access queue |
---|
[75] | 673 | std::cout << "Dump SPECULATIVE_ACCESS_QUEUE :" << std::endl; |
---|
[71] | 674 | |
---|
| 675 | for (uint32_t i=0; i<_param->_size_speculative_access_queue; i++) |
---|
| 676 | { |
---|
| 677 | uint32_t j = (*_speculative_access_queue_control)[i]; |
---|
[75] | 678 | std::cout << "{" << j << "}" << std::endl |
---|
| 679 | << _speculative_access_queue[j] << std::endl; |
---|
[71] | 680 | } |
---|
| 681 | |
---|
| 682 | // ***** dump load queue |
---|
[75] | 683 | std::cout << "Dump LOAD_QUEUE :" << std::endl |
---|
| 684 | << "ptr_read_check_priority : " << toString(static_cast<uint32_t>(reg_LOAD_QUEUE_CHECK_PRIORITY)) << std::endl; |
---|
[71] | 685 | |
---|
| 686 | for (uint32_t i=0; i<_param->_size_load_queue; i++) |
---|
| 687 | { |
---|
| 688 | uint32_t j = i; |
---|
[75] | 689 | std::cout << "{" << j << "}" << std::endl |
---|
| 690 | << _load_queue[j] << std::endl; |
---|
[71] | 691 | } |
---|
| 692 | |
---|
[62] | 693 | #endif |
---|
[71] | 694 | |
---|
| 695 | #ifdef STATISTICS |
---|
| 696 | for (uint32_t i=0; i<_param->_size_store_queue; i++) |
---|
| 697 | if (_store_queue[i]._state != STORE_QUEUE_EMPTY) |
---|
| 698 | (*_stat_use_store_queue) ++; |
---|
| 699 | for (uint32_t i=0; i<_param->_size_speculative_access_queue; i++) |
---|
| 700 | if (_speculative_access_queue[i]._state != SPECULATIVE_ACCESS_QUEUE_EMPTY) |
---|
| 701 | (*_stat_use_speculative_access_queue) ++; |
---|
| 702 | for (uint32_t i=0; i<_param->_size_load_queue; i++) |
---|
| 703 | if (_load_queue[i]._state != LOAD_QUEUE_EMPTY) |
---|
| 704 | (*_stat_use_load_queue) ++; |
---|
| 705 | #endif |
---|
[59] | 706 | } |
---|
| 707 | |
---|
| 708 | log_printf(FUNC,Load_store_unit,FUNCTION,"End"); |
---|
| 709 | }; |
---|
| 710 | |
---|
| 711 | }; // end namespace load_store_unit |
---|
| 712 | }; // end namespace execute_unit |
---|
| 713 | }; // end namespace multi_execute_unit |
---|
| 714 | }; // end namespace execute_loop |
---|
| 715 | }; // end namespace multi_execute_loop |
---|
| 716 | }; // end namespace core |
---|
| 717 | |
---|
| 718 | }; // end namespace behavioural |
---|
| 719 | }; // end namespace morpheo |
---|
| 720 | #endif |
---|