[59] | 1 | #ifdef SYSTEMC |
---|
| 2 | /* |
---|
| 3 | * $Id: Load_store_unit_function_speculative_load_commit_transition.cpp 112 2009-03-18 22:36:26Z rosiere $ |
---|
| 4 | * |
---|
| 5 | * [ Description ] |
---|
| 6 | * |
---|
| 7 | */ |
---|
| 8 | |
---|
| 9 | #include "Behavioural/Core/Multi_Execute_loop/Execute_loop/Multi_Execute_unit/Execute_unit/Load_store_unit/include/Load_store_unit.h" |
---|
| 10 | |
---|
| 11 | namespace morpheo { |
---|
| 12 | namespace behavioural { |
---|
| 13 | namespace core { |
---|
| 14 | namespace multi_execute_loop { |
---|
| 15 | namespace execute_loop { |
---|
| 16 | namespace multi_execute_unit { |
---|
| 17 | namespace execute_unit { |
---|
| 18 | namespace load_store_unit { |
---|
| 19 | |
---|
[106] | 20 | template <typename T> |
---|
| 21 | T swapBytes (T data, uint32_t size_data, uint32_t size_access) |
---|
| 22 | { |
---|
| 23 | uint64_t x = static_cast<uint64_t>(data); |
---|
[59] | 24 | |
---|
[106] | 25 | // switch (size_data) |
---|
| 26 | // { |
---|
| 27 | // case 2 : // 16 bits |
---|
| 28 | // { |
---|
| 29 | // switch (size_access) |
---|
| 30 | // { |
---|
| 31 | // case 2 : |
---|
| 32 | // { |
---|
| 33 | // x = ((((x>> 8)&0xff) << 0) | |
---|
| 34 | // (((x>> 0)&0xff) << 8) ); |
---|
| 35 | // break; |
---|
| 36 | // } |
---|
| 37 | // default : |
---|
| 38 | // { |
---|
| 39 | // break; |
---|
| 40 | // } |
---|
| 41 | // } |
---|
| 42 | // break; |
---|
| 43 | // } |
---|
| 44 | // case 4 : // 32 bits |
---|
| 45 | // { |
---|
| 46 | // switch (size_access) |
---|
| 47 | // { |
---|
| 48 | // case 2 : |
---|
| 49 | // { |
---|
| 50 | // x = ((((x>> 8)&0xff) << 0) | |
---|
| 51 | // (((x>> 0)&0xff) << 8) | |
---|
| 52 | // (((x>>24)&0xff) << 16) | |
---|
| 53 | // (((x>>16)&0xff) << 24) ); |
---|
| 54 | // break; |
---|
| 55 | // } |
---|
| 56 | // case 4 : |
---|
| 57 | // { |
---|
| 58 | // x = ((((x>>24)&0xff) << 0) | |
---|
| 59 | // (((x>>16)&0xff) << 8) | |
---|
| 60 | // (((x>> 8)&0xff) << 16) | |
---|
| 61 | // (((x>> 0)&0xff) << 24) ); |
---|
| 62 | // break; |
---|
| 63 | // } |
---|
| 64 | // default : |
---|
| 65 | // { |
---|
| 66 | // break; |
---|
| 67 | // } |
---|
| 68 | // } |
---|
| 69 | // break; |
---|
| 70 | // } |
---|
| 71 | // case 8 : // 64 bits |
---|
| 72 | // { |
---|
| 73 | // switch (size_access) |
---|
| 74 | // { |
---|
| 75 | // case 2 : |
---|
| 76 | // { |
---|
| 77 | // x = ((((x>> 8)&0xff) << 0) | |
---|
| 78 | // (((x>> 0)&0xff) << 8) | |
---|
| 79 | // (((x>>24)&0xff) << 16) | |
---|
| 80 | // (((x>>16)&0xff) << 24) | |
---|
| 81 | // (((x>>40)&0xff) << 32) | |
---|
| 82 | // (((x>>32)&0xff) << 40) | |
---|
| 83 | // (((x>>56)&0xff) << 48) | |
---|
| 84 | // (((x>>48)&0xff) << 56) ); |
---|
| 85 | // break; |
---|
| 86 | // } |
---|
| 87 | // case 4 : |
---|
| 88 | // { |
---|
| 89 | // x = ((((x>>24)&0xff) << 0) | |
---|
| 90 | // (((x>>16)&0xff) << 8) | |
---|
| 91 | // (((x>> 8)&0xff) << 16) | |
---|
| 92 | // (((x>> 0)&0xff) << 24) | |
---|
| 93 | // (((x>>56)&0xff) << 32) | |
---|
| 94 | // (((x>>48)&0xff) << 40) | |
---|
| 95 | // (((x>>40)&0xff) << 48) | |
---|
| 96 | // (((x>>32)&0xff) << 56) ); |
---|
| 97 | // break; |
---|
| 98 | // } |
---|
| 99 | // case 8 : |
---|
| 100 | // { |
---|
| 101 | // x = ((((x>>56)&0xff) << 0) | |
---|
| 102 | // (((x>>48)&0xff) << 8) | |
---|
| 103 | // (((x>>40)&0xff) << 16) | |
---|
| 104 | // (((x>>32)&0xff) << 24) | |
---|
| 105 | // (((x>>24)&0xff) << 32) | |
---|
| 106 | // (((x>>16)&0xff) << 40) | |
---|
| 107 | // (((x>> 8)&0xff) << 48) | |
---|
| 108 | // (((x>> 0)&0xff) << 56) ); |
---|
| 109 | // break; |
---|
| 110 | // } |
---|
| 111 | // default : |
---|
| 112 | // { |
---|
| 113 | // break; |
---|
| 114 | // } |
---|
| 115 | // } |
---|
| 116 | // break; |
---|
| 117 | // } |
---|
| 118 | // default : |
---|
| 119 | // { |
---|
| 120 | // break; |
---|
| 121 | // } |
---|
| 122 | // } |
---|
| 123 | |
---|
| 124 | |
---|
| 125 | uint64_t y=0; |
---|
| 126 | |
---|
| 127 | for (uint32_t i=0; i<size_data; i+=size_access) |
---|
| 128 | { |
---|
| 129 | uint32_t offset = i<<3; |
---|
| 130 | |
---|
| 131 | switch (size_access) |
---|
| 132 | { |
---|
| 133 | case 1 : |
---|
| 134 | { |
---|
| 135 | y = x; |
---|
| 136 | break; |
---|
| 137 | } |
---|
| 138 | case 2 : |
---|
| 139 | { |
---|
| 140 | y |= ((((x>>( 8+offset))&0xff) << ( 0+offset)) | |
---|
| 141 | (((x>>( 0+offset))&0xff) << ( 8+offset)) ); |
---|
| 142 | break; |
---|
| 143 | } |
---|
| 144 | case 4 : |
---|
| 145 | { |
---|
| 146 | y |= ((((x>>(24+offset))&0xff) << ( 0+offset)) | |
---|
| 147 | (((x>>(16+offset))&0xff) << ( 8+offset)) | |
---|
| 148 | (((x>>( 8+offset))&0xff) << (16+offset)) | |
---|
| 149 | (((x>>( 0+offset))&0xff) << (24+offset)) ); |
---|
| 150 | break; |
---|
| 151 | } |
---|
| 152 | case 8 : |
---|
| 153 | { |
---|
| 154 | y |= ((((x>>(56+offset))&0xff) << ( 0+offset)) | |
---|
| 155 | (((x>>(48+offset))&0xff) << ( 8+offset)) | |
---|
| 156 | (((x>>(40+offset))&0xff) << (16+offset)) | |
---|
| 157 | (((x>>(32+offset))&0xff) << (24+offset)) | |
---|
| 158 | (((x>>(24+offset))&0xff) << (32+offset)) | |
---|
| 159 | (((x>>(16+offset))&0xff) << (40+offset)) | |
---|
| 160 | (((x>>( 8+offset))&0xff) << (48+offset)) | |
---|
| 161 | (((x>>( 0+offset))&0xff) << (56+offset)) ); |
---|
| 162 | break; |
---|
| 163 | } |
---|
| 164 | default : |
---|
| 165 | { |
---|
| 166 | break; |
---|
| 167 | } |
---|
| 168 | } |
---|
| 169 | } |
---|
| 170 | |
---|
| 171 | return static_cast<T>(y); |
---|
| 172 | } |
---|
| 173 | |
---|
| 174 | template <typename T> |
---|
| 175 | T swapBits (T data, uint32_t size_data, uint32_t size_access) |
---|
| 176 | { |
---|
| 177 | uint8_t x = static_cast<uint8_t>(data); |
---|
| 178 | |
---|
| 179 | uint8_t y=0; |
---|
| 180 | |
---|
| 181 | for (uint32_t i=0; i<size_data; i+=size_access) |
---|
| 182 | { |
---|
| 183 | uint32_t offset = i; |
---|
| 184 | |
---|
| 185 | switch (size_access) |
---|
| 186 | { |
---|
| 187 | case 1 : |
---|
| 188 | { |
---|
| 189 | y = x; |
---|
| 190 | break; |
---|
| 191 | } |
---|
| 192 | case 2 : |
---|
| 193 | { |
---|
| 194 | y |= ((((x>>( 1+offset))&0x1) << ( 0+offset)) | |
---|
| 195 | (((x>>( 0+offset))&0x1) << ( 1+offset)) ); |
---|
| 196 | break; |
---|
| 197 | } |
---|
| 198 | case 4 : |
---|
| 199 | { |
---|
| 200 | y |= ((((x>>( 3+offset))&0x1) << ( 0+offset)) | |
---|
| 201 | (((x>>( 2+offset))&0x1) << ( 1+offset)) | |
---|
| 202 | (((x>>( 1+offset))&0x1) << ( 2+offset)) | |
---|
| 203 | (((x>>( 0+offset))&0x1) << ( 3+offset)) ); |
---|
| 204 | break; |
---|
| 205 | } |
---|
| 206 | case 8 : |
---|
| 207 | { |
---|
| 208 | y |= ((((x>>( 7+offset))&0x1) << ( 0+offset)) | |
---|
| 209 | (((x>>( 6+offset))&0x1) << ( 1+offset)) | |
---|
| 210 | (((x>>( 5+offset))&0x1) << ( 2+offset)) | |
---|
| 211 | (((x>>( 4+offset))&0x1) << ( 3+offset)) | |
---|
| 212 | (((x>>( 3+offset))&0x1) << ( 4+offset)) | |
---|
| 213 | (((x>>( 2+offset))&0x1) << ( 5+offset)) | |
---|
| 214 | (((x>>( 1+offset))&0x1) << ( 6+offset)) | |
---|
| 215 | (((x>>( 0+offset))&0x1) << ( 7+offset)) ); |
---|
| 216 | break; |
---|
| 217 | } |
---|
| 218 | default : |
---|
| 219 | { |
---|
| 220 | break; |
---|
| 221 | } |
---|
| 222 | } |
---|
| 223 | } |
---|
| 224 | |
---|
| 225 | return static_cast<T>(y); |
---|
| 226 | } |
---|
| 227 | |
---|
[59] | 228 | #undef FUNCTION |
---|
| 229 | #define FUNCTION "Load_store_unit::function_speculative_load_commit_transition" |
---|
| 230 | void Load_store_unit::function_speculative_load_commit_transition (void) |
---|
| 231 | { |
---|
[97] | 232 | log_begin(Load_store_unit,FUNCTION); |
---|
| 233 | log_function(Load_store_unit,FUNCTION,_name.c_str()); |
---|
[59] | 234 | |
---|
| 235 | if (PORT_READ(in_NRESET) == 0) |
---|
| 236 | { |
---|
| 237 | // Reset : clear all queue |
---|
| 238 | _speculative_access_queue_control->clear(); |
---|
| 239 | |
---|
[71] | 240 | reg_STORE_QUEUE_PTR_READ = 0; |
---|
| 241 | reg_LOAD_QUEUE_CHECK_PRIORITY = 0; |
---|
| 242 | |
---|
[59] | 243 | for (uint32_t i=0; i< _param->_size_store_queue ; i++) |
---|
| 244 | _store_queue [i]._state = STORE_QUEUE_EMPTY; |
---|
| 245 | |
---|
| 246 | for (uint32_t i=0; i< _param->_size_load_queue ; i++) |
---|
| 247 | _load_queue [i]._state = LOAD_QUEUE_EMPTY; |
---|
| 248 | |
---|
| 249 | for (uint32_t i=0; i< _param->_size_speculative_access_queue; i++) |
---|
| 250 | _speculative_access_queue [i]._state = SPECULATIVE_ACCESS_QUEUE_EMPTY; |
---|
| 251 | } |
---|
| 252 | else |
---|
| 253 | { |
---|
| 254 | //================================================================ |
---|
[71] | 255 | // Interface "PORT_CHECK" |
---|
| 256 | //================================================================ |
---|
| 257 | |
---|
| 258 | // Plusieurs moyens de faire la verification de dépendance entre les loads et les stores. |
---|
| 259 | // 1) un load ne peut vérifier qu'un store par cycle. Dans ce cas port_check <= size_load_queue |
---|
| 260 | // 2) un load tente de vérifier le maximum de store par cycle. Dans ce cas ce n'est pas du pointeur d'écriture qu'il lui faut mais un vecteur de bit indiquant quel store à déjà été testé. De plus il faut un bit indiquant qu'il y a un match mais que ce n'est pas forcément le premier. |
---|
| 261 | |
---|
| 262 | // solution 1) |
---|
[97] | 263 | log_printf(TRACE,Load_store_unit,FUNCTION," * CHECK"); |
---|
[71] | 264 | for (uint32_t i=0, nb_check=0; (nb_check<_param->_nb_port_check) and (i<_param->_size_load_queue); i++) |
---|
| 265 | { |
---|
[104] | 266 | // Get an index from load queue |
---|
[71] | 267 | uint32_t index_load = (i + reg_LOAD_QUEUE_CHECK_PRIORITY)%_param->_size_load_queue; |
---|
[104] | 268 | |
---|
| 269 | // Test if this load must ckecked store queue |
---|
| 270 | if (((_load_queue[index_load]._state == LOAD_QUEUE_WAIT_CHECK) or |
---|
[71] | 271 | (_load_queue[index_load]._state == LOAD_QUEUE_COMMIT_CHECK) or |
---|
| 272 | (_load_queue[index_load]._state == LOAD_QUEUE_CHECK)) and |
---|
| 273 | is_operation_memory_load(_load_queue[index_load]._operation)) |
---|
| 274 | { |
---|
[97] | 275 | log_printf(TRACE,Load_store_unit,FUNCTION," * Find a load : %d",index_load); |
---|
[71] | 276 | |
---|
| 277 | nb_check++; // use one port |
---|
| 278 | |
---|
| 279 | // find a entry that it need a check |
---|
| 280 | Tlsq_ptr_t index_store = _load_queue[index_load]._store_queue_ptr_write; |
---|
[104] | 281 | // Init variable |
---|
[71] | 282 | bool end_check = false; |
---|
| 283 | bool change_state = false; |
---|
| 284 | bool next = false; |
---|
| 285 | |
---|
| 286 | // At the first store queue empty, stop check. |
---|
| 287 | // Explication : |
---|
| 288 | // * rename logic keep a empty case in the store queue (also size_store_queue > 1) |
---|
| 289 | // * when a store is out of store queue, also it was in head of re order buffer. Also, they are none previous load. |
---|
| 290 | |
---|
| 291 | log_printf(TRACE,Load_store_unit,FUNCTION," * index_store : %d",index_store); |
---|
[106] | 292 | log_printf(TRACE,Load_store_unit,FUNCTION," * ptr_read : %d",reg_STORE_QUEUE_PTR_READ); |
---|
| 293 | |
---|
[71] | 294 | if (index_store == reg_STORE_QUEUE_PTR_READ) |
---|
| 295 | { |
---|
| 296 | log_printf(TRACE,Load_store_unit,FUNCTION," * index_store == reg_STORE_QUEUE_PTR_READ"); |
---|
| 297 | end_check = true; |
---|
| 298 | change_state = true; |
---|
| 299 | } |
---|
| 300 | else |
---|
| 301 | { |
---|
| 302 | log_printf(TRACE,Load_store_unit,FUNCTION," * index_store != reg_STORE_QUEUE_PTR_READ"); |
---|
| 303 | |
---|
| 304 | index_store = (index_store-1)%(_param->_size_store_queue); // store_queue_ptr_write target the next slot to write, also the slot is not significatif when the load is renaming |
---|
| 305 | |
---|
| 306 | log_printf(TRACE,Load_store_unit,FUNCTION," * index_store : %d",index_store); |
---|
| 307 | |
---|
[104] | 308 | // switch on store_queue state |
---|
[71] | 309 | switch (_store_queue[index_store]._state) |
---|
| 310 | { |
---|
| 311 | case STORE_QUEUE_VALID_NO_SPECULATIVE : |
---|
| 312 | case STORE_QUEUE_COMMIT : |
---|
| 313 | case STORE_QUEUE_VALID_SPECULATIVE : |
---|
| 314 | { |
---|
| 315 | |
---|
| 316 | log_printf(TRACE,Load_store_unit,FUNCTION," * store have a valid entry"); |
---|
| 317 | |
---|
| 318 | // TODO : MMU - nous considérons que les adresses sont physique |
---|
| 319 | bool test_thread_id = true; |
---|
| 320 | |
---|
[104] | 321 | // Test thread id |
---|
[71] | 322 | if (_param->_have_port_context_id) |
---|
| 323 | test_thread_id &= (_load_queue[index_load]._context_id == _store_queue[index_store]._context_id); |
---|
| 324 | if (_param->_have_port_front_end_id) |
---|
| 325 | test_thread_id &= (_load_queue[index_load]._front_end_id == _store_queue[index_store]._front_end_id); |
---|
| 326 | if (_param->_have_port_ooo_engine_id) |
---|
| 327 | test_thread_id &= (_load_queue[index_load]._ooo_engine_id == _store_queue[index_store]._ooo_engine_id); |
---|
| 328 | |
---|
| 329 | if (test_thread_id) |
---|
| 330 | { |
---|
[104] | 331 | // the load and store are in the same thread. Now, we must test address. |
---|
| 332 | |
---|
[71] | 333 | log_printf(TRACE,Load_store_unit,FUNCTION," * load and store is the same thread."); |
---|
| 334 | Tdcache_address_t load_addr = _load_queue [index_load ]._address; |
---|
| 335 | Tdcache_address_t store_addr = _store_queue[index_store]._address; |
---|
| 336 | |
---|
| 337 | log_printf(TRACE,Load_store_unit,FUNCTION," * load_addr : %.8x.",load_addr ); |
---|
| 338 | log_printf(TRACE,Load_store_unit,FUNCTION," * store_addr : %.8x.",store_addr); |
---|
| 339 | log_printf(TRACE,Load_store_unit,FUNCTION," * load_addr & mask_address_msb : %.8x.",load_addr & _param->_mask_address_msb); |
---|
| 340 | log_printf(TRACE,Load_store_unit,FUNCTION," * store_addr & mask_address_msb : %.8x.",store_addr & _param->_mask_address_msb); |
---|
[104] | 341 | // Test if the both address target the same "word" |
---|
[71] | 342 | if ((load_addr & _param->_mask_address_msb) == |
---|
| 343 | (store_addr & _param->_mask_address_msb)) |
---|
| 344 | { |
---|
| 345 | log_printf(TRACE,Load_store_unit,FUNCTION," * address_msb is the same."); |
---|
| 346 | // all case - [] : store, () : load |
---|
| 347 | // (1) store_max >= load_max and store_min <= load_min ...[...(...)...]... Ok - inclusion in store |
---|
| 348 | // (2) store_min > load_max ...[...]...(...)... Ok - no conflit |
---|
| 349 | // (3) store_max < load_min ...(...)...[...]... Ok - no conflit |
---|
| 350 | // (4) store_max < load_max and store_min > load_min ...(...[...]...)... Ko - inclusion in load |
---|
| 351 | // (5) store_max >= load_max and store_min > load_min ...[...(...]...)... Ko - conflit |
---|
| 352 | // (6) store_max < load_max and store_min <= load_min ...(...[...)...]... Ko - conflit |
---|
| 353 | // but : |
---|
| 354 | // load in the cache is a word ! |
---|
| 355 | // the mask can be make when the load is commited. Also, the rdata content a full word. |
---|
| 356 | // the only case is (4) |
---|
| 357 | |
---|
[104] | 358 | // Read data |
---|
[106] | 359 | bool is_big_endian = true; |
---|
[104] | 360 | |
---|
[106] | 361 | Tgeneral_data_t load_data = _load_queue [index_load ]._rdata ; |
---|
| 362 | Tgeneral_data_t store_data = _store_queue[index_store]._wdata ; |
---|
| 363 | Tdcache_address_t check_hit_byte = _load_queue [index_load ]._check_hit_byte; |
---|
| 364 | Tcontrol_t check_hit = _load_queue [index_load ]._check_hit; |
---|
| 365 | uint32_t load_size_access = memory_size(_load_queue [index_load ]._operation)>>3; |
---|
| 366 | uint32_t store_size_access = memory_size(_store_queue[index_store]._operation)>>3; |
---|
| 367 | |
---|
| 368 | log_printf(TRACE,Load_store_unit,FUNCTION," * is_big_endian : %d",is_big_endian); |
---|
| 369 | log_printf(TRACE,Load_store_unit,FUNCTION," * load_data : 0x%.8x",load_data); |
---|
| 370 | log_printf(TRACE,Load_store_unit,FUNCTION," * store_data : 0x%.8x",store_data); |
---|
| 371 | log_printf(TRACE,Load_store_unit,FUNCTION," * check_hit_byte : %x",check_hit_byte); |
---|
| 372 | log_printf(TRACE,Load_store_unit,FUNCTION," * check_hit : %d",check_hit); |
---|
| 373 | |
---|
| 374 | log_printf(TRACE,Load_store_unit,FUNCTION," * load_size_access : %d",load_size_access ); |
---|
| 375 | log_printf(TRACE,Load_store_unit,FUNCTION," * store_size_access : %d",store_size_access); |
---|
| 376 | |
---|
| 377 | if (is_big_endian) |
---|
| 378 | { |
---|
| 379 | // swap in little endian |
---|
| 380 | load_data = swapBytes<Tgeneral_data_t >(load_data , _param->_size_general_data>>3,load_size_access); |
---|
| 381 | store_data = swapBytes<Tgeneral_data_t >(store_data , _param->_size_general_data>>3,store_size_access); |
---|
| 382 | check_hit_byte = swapBits <Tdcache_address_t>(check_hit_byte, _param->_size_general_data>>3,load_size_access); |
---|
| 383 | |
---|
| 384 | |
---|
| 385 | log_printf(TRACE,Load_store_unit,FUNCTION," * load_data (swap 1) : 0x%.8x",load_data); |
---|
| 386 | log_printf(TRACE,Load_store_unit,FUNCTION," * store_data (swap 1) : 0x%.8x",store_data); |
---|
| 387 | log_printf(TRACE,Load_store_unit,FUNCTION," * check_hit_byte (swap 1) : %x",check_hit_byte); |
---|
| 388 | } |
---|
| 389 | |
---|
[104] | 390 | uint32_t store_nb_byte = (1<<memory_access(_store_queue[index_store]._operation)); |
---|
| 391 | |
---|
| 392 | // Take interval to the store |
---|
[71] | 393 | uint32_t store_num_byte_min = (store_addr & _param->_mask_address_lsb); |
---|
[104] | 394 | uint32_t store_num_byte_max = store_num_byte_min+store_nb_byte; |
---|
| 395 | |
---|
[106] | 396 | log_printf(TRACE,Load_store_unit,FUNCTION," * store_num_byte_min : %d",store_num_byte_min); |
---|
| 397 | log_printf(TRACE,Load_store_unit,FUNCTION," * store_num_byte_max : %d",store_num_byte_max); |
---|
| 398 | |
---|
| 399 | // uint32_t load_nb_byte = (1<<memory_access(_load_queue[index_load]._operation)); |
---|
| 400 | |
---|
| 401 | // uint32_t load_num_byte_min = (load_addr & _param->_mask_address_lsb); |
---|
| 402 | // uint32_t load_num_byte_max = load_num_byte_min+load_nb_byte; |
---|
| 403 | |
---|
| 404 | // log_printf(TRACE,Load_store_unit,FUNCTION," * load_num_byte_min : %d",load_num_byte_min); |
---|
| 405 | // log_printf(TRACE,Load_store_unit,FUNCTION," * load_num_byte_max : %d",load_num_byte_max); |
---|
| 406 | |
---|
| 407 | // for (uint32_t num_load_byte=load_num_byte_min; num_load_byte<load_num_byte_max; num_load_byte ++) |
---|
| 408 | // { |
---|
| 409 | // // Make a mask |
---|
| 410 | // uint32_t num_store_byte = num_load_byte; |
---|
| 411 | |
---|
| 412 | |
---|
| 413 | |
---|
[71] | 414 | // The bypass is checked byte per byte |
---|
[104] | 415 | // Is same endianness : because to change endianness, we must write in special register. Also the pipeline is flushed. |
---|
| 416 | for (uint32_t num_store_byte=store_num_byte_min; num_store_byte<store_num_byte_max; num_store_byte ++) |
---|
[71] | 417 | { |
---|
[104] | 418 | // Make a mask |
---|
[106] | 419 | uint32_t num_load_byte = num_store_byte; |
---|
[104] | 420 | |
---|
[106] | 421 | // if (is_big_endian) |
---|
| 422 | // { |
---|
| 423 | // // sd 0 : 0 1 2 3 4 5 6 7 |
---|
| 424 | // // ld 0 : 0 1 2 3 4 5 6 7 >> 0 |
---|
| 425 | // // lw 0 : 0 1 2 3 >> 0 -4 |
---|
| 426 | // // lw 4 : 4 5 6 7 >> 32 +4 |
---|
| 427 | // // lh 0 : 0 1 >> 0 -6 |
---|
| 428 | // // lh 2 : 2 3 >> 16 -2 |
---|
| 429 | // // lh 4 : 4 5 >> 32 +2 |
---|
| 430 | // // lh 6 : 6 7 >> 48 +6 |
---|
| 431 | // // lb 0 : 0 >> 0 -7 |
---|
| 432 | // // lb 1 : 1 >> 8 -5 |
---|
| 433 | // // lb 2 : 2 >> 16 -3 |
---|
| 434 | // // lb 3 : 3 >> 24 -1 |
---|
| 435 | // // lb 4 : 4 >> 32 +1 |
---|
| 436 | // // lb 5 : 5 >> 40 +3 |
---|
| 437 | // // lb 6 : 6 >> 48 +5 |
---|
| 438 | // // lb 7 : 7 >> 56 +7 |
---|
[104] | 439 | |
---|
[106] | 440 | // // diff : (store_nb_byte + load_nb_byte) - 2*nb_load_byte*((num_store_byte+1) |
---|
[104] | 441 | |
---|
[106] | 442 | // // store duplicate = all store access can be see as full size_data store |
---|
| 443 | // // uint32_t load_nb_byte = (1<<memory_access(_load_queue [index_load ]._operation)); |
---|
[104] | 444 | |
---|
[106] | 445 | // // int32_t diff = ((_param->_size_general_data>>3)+load_nb_byte-2*load_nb_byte*((num_store_byte/load_nb_byte)+1)); |
---|
| 446 | |
---|
| 447 | // // num_load_byte =num_store_byte+diff; |
---|
| 448 | |
---|
| 449 | // // log_printf(TRACE,Load_store_unit,FUNCTION," * load_nb_byte : %d",load_nb_byte); |
---|
| 450 | // // log_printf(TRACE,Load_store_unit,FUNCTION," * diff : %d",diff); |
---|
| 451 | |
---|
| 452 | |
---|
| 453 | // num_load_byte = num_store_byte; |
---|
| 454 | // } |
---|
| 455 | // else |
---|
| 456 | // { |
---|
| 457 | // // sd 0 : 0 1 2 3 4 5 6 7 |
---|
| 458 | // // ld 0 : 0 1 2 3 4 5 6 7 >> 0 |
---|
| 459 | // // lw 0 : 4 5 6 7 >> 0 |
---|
| 460 | // // lw 4 : 0 1 2 3 >> 32 |
---|
| 461 | // // lh 0 : 6 7 >> 0 |
---|
| 462 | // // lh 2 : 4 5 >> 16 |
---|
| 463 | // // lh 4 : 2 3 >> 32 |
---|
| 464 | // // lh 6 : 0 1 >> 48 |
---|
| 465 | // // lb 0 : 7 >> 0 |
---|
| 466 | // // lb 1 : 6 >> 8 |
---|
| 467 | // // lb 2 : 5 >> 16 |
---|
| 468 | // // lb 3 : 4 >> 24 |
---|
| 469 | // // lb 4 : 3 >> 32 |
---|
| 470 | // // lb 5 : 2 >> 40 |
---|
| 471 | // // lb 6 : 1 >> 48 |
---|
| 472 | // // lb 7 : 0 >> 56 |
---|
[104] | 473 | |
---|
[106] | 474 | // num_load_byte = num_store_byte; |
---|
| 475 | // } |
---|
[104] | 476 | |
---|
| 477 | uint32_t mask = 1<<num_load_byte; |
---|
| 478 | |
---|
| 479 | log_printf(TRACE,Load_store_unit,FUNCTION," * num_store_byte : %d",num_store_byte); |
---|
| 480 | log_printf(TRACE,Load_store_unit,FUNCTION," * num_load_byte : %d",num_load_byte); |
---|
| 481 | log_printf(TRACE,Load_store_unit,FUNCTION," * mask : %d",mask); |
---|
| 482 | |
---|
| 483 | // Accept the bypass if : |
---|
| 484 | // * they have not a previous bypass with an another store |
---|
| 485 | // * it's a valid request of load |
---|
[106] | 486 | if ((check_hit_byte&mask)==0) |
---|
[71] | 487 | { |
---|
[104] | 488 | // Note : Store is duplicate = all store access can be see as full size_data store |
---|
| 489 | |
---|
| 490 | uint32_t num_store_bit_min = num_store_byte<<3; //*8 |
---|
[106] | 491 | // uint32_t num_store_bit_max = num_store_bit_min+8-1; |
---|
[104] | 492 | uint32_t num_load_bit_min = num_load_byte <<3; //*8 |
---|
| 493 | uint32_t num_load_bit_max = num_load_bit_min+8-1; |
---|
| 494 | |
---|
[71] | 495 | log_printf(TRACE,Load_store_unit,FUNCTION," * bypass !!!"); |
---|
[104] | 496 | // log_printf(TRACE,Load_store_unit,FUNCTION," * interval store : [%d:%d]",num_store_bit_max,num_store_bit_min); |
---|
| 497 | log_printf(TRACE,Load_store_unit,FUNCTION," * interval store : [..:%d]",num_store_bit_min); |
---|
| 498 | log_printf(TRACE,Load_store_unit,FUNCTION," * interval load : [%d:%d]",num_load_bit_max,num_load_bit_min); |
---|
| 499 | log_printf(TRACE,Load_store_unit,FUNCTION," * rdata_old : 0x%.8x", load_data); |
---|
| 500 | |
---|
| 501 | load_data = ((((store_data>>num_store_bit_min) & 0xff) << num_load_bit_min) | |
---|
| 502 | mask_not<Tdcache_data_t>(load_data,num_load_bit_max,num_load_bit_min)); |
---|
| 503 | |
---|
[106] | 504 | check_hit_byte |= mask; |
---|
| 505 | check_hit = 1; |
---|
[71] | 506 | change_state = true; |
---|
| 507 | |
---|
[104] | 508 | log_printf(TRACE,Load_store_unit,FUNCTION," * rdata_new : 0x%.8x", load_data); |
---|
[71] | 509 | } |
---|
| 510 | } |
---|
| 511 | |
---|
[106] | 512 | if (is_big_endian) |
---|
| 513 | { |
---|
| 514 | // swap in little endian |
---|
| 515 | load_data = swapBytes<Tgeneral_data_t >(load_data , _param->_size_general_data>>3,load_size_access); |
---|
| 516 | check_hit_byte = swapBits <Tdcache_address_t>(check_hit_byte, _param->_size_general_data>>3,load_size_access); |
---|
| 517 | |
---|
| 518 | |
---|
| 519 | log_printf(TRACE,Load_store_unit,FUNCTION," * load_data (swap 2) : 0x%.8x",load_data); |
---|
| 520 | log_printf(TRACE,Load_store_unit,FUNCTION," * check_hit_byte (swap 2) : %x",check_hit_byte); |
---|
| 521 | } |
---|
| 522 | |
---|
| 523 | _load_queue[index_load]._rdata = load_data; |
---|
| 524 | _load_queue[index_load]._check_hit_byte = check_hit_byte; |
---|
| 525 | _load_queue[index_load]._check_hit = check_hit; |
---|
| 526 | |
---|
[104] | 527 | log_printf(TRACE,Load_store_unit,FUNCTION," * load_data (after) : 0x%.8x",load_data); |
---|
[71] | 528 | |
---|
[106] | 529 | log_printf(TRACE,Load_store_unit,FUNCTION," * check_hit : %x",check_hit); |
---|
| 530 | log_printf(TRACE,Load_store_unit,FUNCTION," * check_hit_byte : %x",check_hit_byte); |
---|
[71] | 531 | |
---|
| 532 | log_printf(TRACE,Load_store_unit,FUNCTION," * mask_end_check : %x",(-1& _param->_mask_address_lsb)); |
---|
[104] | 533 | log_printf(TRACE,Load_store_unit,FUNCTION," * mask_check_hit_byte: %x",_param->_mask_check_hit_byte); |
---|
[71] | 534 | // The check is finish if all bit is set |
---|
[104] | 535 | end_check = (_load_queue[index_load]._check_hit_byte == _param->_mask_check_hit_byte); |
---|
[71] | 536 | } |
---|
| 537 | } |
---|
| 538 | |
---|
| 539 | next = true; |
---|
| 540 | break; |
---|
| 541 | } |
---|
| 542 | case STORE_QUEUE_EMPTY : |
---|
| 543 | case STORE_QUEUE_NO_VALID_NO_SPECULATIVE : |
---|
| 544 | { |
---|
| 545 | log_printf(TRACE,Load_store_unit,FUNCTION," * store have an invalid entry"); |
---|
| 546 | break; |
---|
| 547 | } |
---|
| 548 | } |
---|
| 549 | } |
---|
| 550 | |
---|
| 551 | if (next) |
---|
| 552 | { |
---|
| 553 | log_printf(TRACE,Load_store_unit,FUNCTION," * next"); |
---|
[106] | 554 | log_printf(TRACE,Load_store_unit,FUNCTION," * new store_queue_ptr_write : %d",index_store); |
---|
[71] | 555 | // if (_load_queue[index_load]._store_queue_ptr_write == 0) |
---|
| 556 | // _load_queue[index_load]._store_queue_ptr_write = _param->_size_store_queue-1; |
---|
| 557 | // else |
---|
| 558 | // _load_queue[index_load]._store_queue_ptr_write --; |
---|
| 559 | _load_queue[index_load]._store_queue_ptr_write = index_store; // because the index store have be decrease |
---|
| 560 | |
---|
| 561 | // FIXME : peut n'est pas obliger de faire cette comparaison. Au prochain cycle on le détectera que les pointeur sont égaux. Ceci évitera d'avoir deux comparateurs avec le registre "reg_STORE_QUEUE_PTR_READ" |
---|
| 562 | if (index_store == reg_STORE_QUEUE_PTR_READ) |
---|
| 563 | { |
---|
| 564 | end_check = true; |
---|
| 565 | change_state = true; |
---|
| 566 | } |
---|
| 567 | } |
---|
| 568 | |
---|
| 569 | if (change_state) |
---|
| 570 | { |
---|
| 571 | log_printf(TRACE,Load_store_unit,FUNCTION," * change_state"); |
---|
[106] | 572 | log_printf(TRACE,Load_store_unit,FUNCTION," * end_check : %d",end_check); |
---|
[71] | 573 | |
---|
[106] | 574 | log_printf(TRACE,Load_store_unit,FUNCTION," * state old : %s",toString(_load_queue[index_load]._state).c_str()); |
---|
| 575 | |
---|
[71] | 576 | switch (_load_queue[index_load]._state) |
---|
| 577 | { |
---|
[106] | 578 | case LOAD_QUEUE_WAIT_CHECK : |
---|
| 579 | { |
---|
| 580 | if (end_check) |
---|
| 581 | _load_queue[index_load]._state = LOAD_QUEUE_WAIT ; |
---|
| 582 | break; |
---|
| 583 | } |
---|
[71] | 584 | case LOAD_QUEUE_COMMIT_CHECK : |
---|
| 585 | { |
---|
| 586 | if (end_check) |
---|
| 587 | _load_queue[index_load]._state = LOAD_QUEUE_COMMIT; |
---|
| 588 | else |
---|
[106] | 589 | _load_queue[index_load]._state = LOAD_QUEUE_CHECK; // No commit : check hit and no end |
---|
[71] | 590 | break; |
---|
| 591 | } |
---|
| 592 | case LOAD_QUEUE_CHECK : |
---|
| 593 | { |
---|
| 594 | if (end_check) |
---|
| 595 | _load_queue[index_load]._state = LOAD_QUEUE_COMMIT; |
---|
[106] | 596 | |
---|
[71] | 597 | // check find a bypass. A speculative load have been committed : report a speculation miss. |
---|
[112] | 598 | if ((_load_queue[index_load]._check_hit != 0) and |
---|
| 599 | (_load_queue[index_load]._write_rd == 0) // is commit |
---|
[106] | 600 | ) |
---|
[71] | 601 | { |
---|
| 602 | _load_queue[index_load]._exception = EXCEPTION_MEMORY_MISS_SPECULATION; |
---|
| 603 | _load_queue[index_load]._write_rd = 1; // write the good result |
---|
[110] | 604 | |
---|
| 605 | #ifdef STATISTICS |
---|
| 606 | if (usage_is_set(_usage,USE_STATISTICS)) |
---|
| 607 | (*_stat_nb_inst_load_commit_miss) ++; |
---|
| 608 | #endif |
---|
[71] | 609 | } |
---|
| 610 | |
---|
| 611 | break; |
---|
| 612 | } |
---|
| 613 | default : break; |
---|
| 614 | } |
---|
[106] | 615 | log_printf(TRACE,Load_store_unit,FUNCTION," * state new : %s",toString(_load_queue[index_load]._state).c_str()); |
---|
[71] | 616 | log_printf(TRACE,Load_store_unit,FUNCTION," * exception : %d",_load_queue[index_load]._exception); |
---|
| 617 | } |
---|
| 618 | } |
---|
| 619 | // else : don't use a port |
---|
| 620 | } |
---|
| 621 | |
---|
| 622 | //================================================================ |
---|
[59] | 623 | // Interface "MEMORY_IN" |
---|
| 624 | //================================================================ |
---|
[88] | 625 | |
---|
| 626 | if ((PORT_READ(in_MEMORY_IN_VAL [internal_MEMORY_IN_PORT]) == 1) and |
---|
[59] | 627 | ( internal_MEMORY_IN_ACK == 1)) |
---|
| 628 | { |
---|
[101] | 629 | log_printf(TRACE,Load_store_unit,FUNCTION," * MEMORY_IN [%d]",internal_MEMORY_IN_PORT); |
---|
| 630 | |
---|
[59] | 631 | // Test operation : |
---|
| 632 | //~~~~~~~~~~~~~~~~~ |
---|
| 633 | // store in store_queue |
---|
| 634 | // load in speculation_access_queue |
---|
| 635 | // others in speculation_access_queue |
---|
| 636 | |
---|
[78] | 637 | #ifdef DEBUG_TEST |
---|
[88] | 638 | if (PORT_READ(in_MEMORY_IN_TYPE [internal_MEMORY_IN_PORT]) != TYPE_MEMORY) |
---|
[78] | 639 | throw ERRORMORPHEO(FUNCTION,"The type is different at 'TYPE_MEMORY'"); |
---|
| 640 | #endif |
---|
[88] | 641 | Toperation_t operation = PORT_READ(in_MEMORY_IN_OPERATION[internal_MEMORY_IN_PORT]); |
---|
| 642 | Tgeneral_data_t address = (PORT_READ(in_MEMORY_IN_IMMEDIAT[internal_MEMORY_IN_PORT]) + |
---|
| 643 | PORT_READ(in_MEMORY_IN_DATA_RA [internal_MEMORY_IN_PORT])); |
---|
[62] | 644 | bool exception_alignement = (mask_memory_access(operation) & address) != 0; |
---|
[59] | 645 | |
---|
| 646 | if (is_operation_memory_store(operation) == true) |
---|
| 647 | { |
---|
| 648 | // ======================= |
---|
| 649 | // ===== STORE_QUEUE ===== |
---|
| 650 | // ======================= |
---|
| 651 | // There a two store request type : |
---|
| 652 | // - first is operation with address and data |
---|
| 653 | // - second is the information of re order buffer : the store become not speculative and can access at the data cache |
---|
| 654 | |
---|
[97] | 655 | log_printf(TRACE,Load_store_unit,FUNCTION," * store_queue"); |
---|
| 656 | log_printf(TRACE,Load_store_unit,FUNCTION," * PUSH"); |
---|
[59] | 657 | |
---|
| 658 | // Write pointer is define in rename stage : |
---|
[88] | 659 | Tlsq_ptr_t index = PORT_READ(in_MEMORY_IN_STORE_QUEUE_PTR_WRITE[internal_MEMORY_IN_PORT]); |
---|
[97] | 660 | log_printf(TRACE,Load_store_unit,FUNCTION," * index : %d",index); |
---|
[59] | 661 | |
---|
| 662 | // Need read : state and exception. |
---|
| 663 | Tstore_queue_state_t old_state = _store_queue [index]._state; |
---|
| 664 | Tstore_queue_state_t new_state = old_state; |
---|
| 665 | bool update_info = false; |
---|
| 666 | |
---|
| 667 | Texception_t old_exception = _store_queue [index]._exception; |
---|
| 668 | Texception_t new_exception = old_exception; |
---|
| 669 | |
---|
| 670 | // Compute next state |
---|
| 671 | switch (old_state) |
---|
| 672 | { |
---|
| 673 | case STORE_QUEUE_EMPTY : |
---|
| 674 | { |
---|
| 675 | if (is_operation_memory_store_head(operation) == true) |
---|
| 676 | { |
---|
| 677 | new_state = STORE_QUEUE_NO_VALID_NO_SPECULATIVE; |
---|
| 678 | |
---|
| 679 | // test if is a speculation |
---|
| 680 | if (operation == OPERATION_MEMORY_STORE_HEAD_KO) |
---|
| 681 | new_exception = EXCEPTION_MEMORY_MISS_SPECULATION; |
---|
| 682 | else |
---|
| 683 | new_exception = EXCEPTION_MEMORY_NONE; |
---|
| 684 | } |
---|
| 685 | else |
---|
| 686 | { |
---|
| 687 | new_state = STORE_QUEUE_VALID_SPECULATIVE; |
---|
| 688 | |
---|
| 689 | // Test if have an exception |
---|
| 690 | if (exception_alignement == true) |
---|
| 691 | new_exception = EXCEPTION_MEMORY_ALIGNMENT; |
---|
| 692 | else |
---|
| 693 | new_exception = EXCEPTION_MEMORY_NONE; |
---|
| 694 | |
---|
| 695 | update_info = true; |
---|
| 696 | } |
---|
| 697 | break; |
---|
| 698 | } |
---|
| 699 | case STORE_QUEUE_NO_VALID_NO_SPECULATIVE : |
---|
| 700 | { |
---|
[71] | 701 | #ifdef DEBUG_TEST |
---|
| 702 | if (is_operation_memory_store_head(operation) == true) |
---|
[110] | 703 | throw ERRORMORPHEO(FUNCTION,_("Transaction in memory_in's interface, actual state of store_queue is \"STORE_QUEUE_NO_VALID_NO_SPECULATIVE\", also a previous store_head have been receiveid. But this operation is a store_head.")); |
---|
[71] | 704 | #endif |
---|
| 705 | // Test if have a new exception (priority : miss_speculation) |
---|
| 706 | if ((exception_alignement == true) and (old_exception == EXCEPTION_MEMORY_NONE)) |
---|
| 707 | new_exception = EXCEPTION_MEMORY_ALIGNMENT; |
---|
| 708 | |
---|
| 709 | if (new_exception != EXCEPTION_MEMORY_NONE) |
---|
| 710 | new_state = STORE_QUEUE_COMMIT; |
---|
| 711 | else |
---|
| 712 | new_state = STORE_QUEUE_VALID_NO_SPECULATIVE; |
---|
| 713 | |
---|
| 714 | update_info = true; |
---|
| 715 | break; |
---|
[59] | 716 | } |
---|
| 717 | case STORE_QUEUE_VALID_SPECULATIVE : |
---|
| 718 | { |
---|
[71] | 719 | #ifdef DEBUG_TEST |
---|
| 720 | if (is_operation_memory_store_head(operation) == false) |
---|
[110] | 721 | throw ERRORMORPHEO(FUNCTION,_("Transaction in memory_in's interface, actual state of store_queue is \"STORE_QUEUE_VALID_SPECULATIVE\", also a previous access with register and address have been receiveid. But this operation is a not store_head.")); |
---|
[71] | 722 | #endif |
---|
| 723 | if (operation == OPERATION_MEMORY_STORE_HEAD_KO) |
---|
| 724 | new_exception = EXCEPTION_MEMORY_MISS_SPECULATION; // great prioritary |
---|
| 725 | |
---|
| 726 | if (new_exception != EXCEPTION_MEMORY_NONE) |
---|
| 727 | new_state = STORE_QUEUE_COMMIT; |
---|
| 728 | else |
---|
| 729 | new_state = STORE_QUEUE_VALID_NO_SPECULATIVE; |
---|
| 730 | |
---|
| 731 | break; |
---|
[59] | 732 | } |
---|
| 733 | case STORE_QUEUE_VALID_NO_SPECULATIVE : |
---|
| 734 | case STORE_QUEUE_COMMIT : |
---|
| 735 | { |
---|
[110] | 736 | throw ERRORMORPHEO(FUNCTION,"<Load_store_unit::function_speculative_load_commit_transition> Invalid state and operation"); |
---|
[59] | 737 | } |
---|
| 738 | } |
---|
| 739 | |
---|
| 740 | _store_queue [index]._state = new_state; |
---|
| 741 | _store_queue [index]._exception = new_exception; |
---|
| 742 | |
---|
| 743 | if (update_info == true) |
---|
| 744 | { |
---|
[97] | 745 | log_printf(TRACE,Load_store_unit,FUNCTION," * Update information"); |
---|
[59] | 746 | |
---|
[88] | 747 | _store_queue [index]._context_id = (not _param->_have_port_context_id )?0:PORT_READ(in_MEMORY_IN_CONTEXT_ID [internal_MEMORY_IN_PORT]); |
---|
| 748 | _store_queue [index]._front_end_id = (not _param->_have_port_front_end_id )?0:PORT_READ(in_MEMORY_IN_FRONT_END_ID [internal_MEMORY_IN_PORT]); |
---|
| 749 | _store_queue [index]._ooo_engine_id = (not _param->_have_port_ooo_engine_id)?0:PORT_READ(in_MEMORY_IN_OOO_ENGINE_ID[internal_MEMORY_IN_PORT]); |
---|
| 750 | _store_queue [index]._packet_id = (not _param->_have_port_rob_ptr )?0:PORT_READ(in_MEMORY_IN_PACKET_ID [internal_MEMORY_IN_PORT]); |
---|
[71] | 751 | _store_queue [index]._operation = operation; |
---|
[88] | 752 | _store_queue [index]._load_queue_ptr_write = (not _param->_have_port_load_queue_ptr)?0:PORT_READ(in_MEMORY_IN_LOAD_QUEUE_PTR_WRITE[internal_MEMORY_IN_PORT]); |
---|
[59] | 753 | _store_queue [index]._address = address; |
---|
[71] | 754 | |
---|
| 755 | // reordering data |
---|
[104] | 756 | _store_queue [index]._wdata = duplicate<Tgeneral_data_t>(_param->_size_general_data,PORT_READ(in_MEMORY_IN_DATA_RB[internal_MEMORY_IN_PORT]), memory_size(operation), 0); |
---|
[88] | 757 | // _store_queue [index]._num_reg_rd = PORT_READ(in_MEMORY_IN_NUM_REG_RD [internal_MEMORY_IN_PORT]); |
---|
[59] | 758 | } |
---|
| 759 | } |
---|
| 760 | else |
---|
| 761 | { |
---|
[71] | 762 | // ==================================== |
---|
| 763 | // ===== SPECULATIVE_ACCESS_QUEUE ===== |
---|
| 764 | // ==================================== |
---|
[59] | 765 | |
---|
[71] | 766 | // In speculative access queue, they are many type's request |
---|
[97] | 767 | log_printf(TRACE,Load_store_unit,FUNCTION," * speculative_access_queue"); |
---|
| 768 | log_printf(TRACE,Load_store_unit,FUNCTION," * PUSH"); |
---|
[59] | 769 | |
---|
[71] | 770 | // Write in reservation station |
---|
| 771 | uint32_t index = _speculative_access_queue_control->push(); |
---|
| 772 | |
---|
[97] | 773 | log_printf(TRACE,Load_store_unit,FUNCTION," * index : %d", index); |
---|
[71] | 774 | |
---|
| 775 | Texception_t exception; |
---|
| 776 | |
---|
| 777 | if (exception_alignement == true) |
---|
| 778 | exception = EXCEPTION_MEMORY_ALIGNMENT; |
---|
| 779 | else |
---|
| 780 | exception = EXCEPTION_MEMORY_NONE; |
---|
| 781 | |
---|
| 782 | // if exception, don't access at the cache |
---|
| 783 | // NOTE : type "other" (lock, invalidate, flush and sync) can't make an alignement exception (access is equivalent at a 8 bits) |
---|
| 784 | _speculative_access_queue [index]._state = (exception == EXCEPTION_MEMORY_NONE)?SPECULATIVE_ACCESS_QUEUE_WAIT_CACHE:SPECULATIVE_ACCESS_QUEUE_WAIT_LOAD_QUEUE; |
---|
[88] | 785 | _speculative_access_queue [index]._context_id = (not _param->_have_port_context_id )?0:PORT_READ(in_MEMORY_IN_CONTEXT_ID [internal_MEMORY_IN_PORT]); |
---|
| 786 | _speculative_access_queue [index]._front_end_id = (not _param->_have_port_front_end_id )?0:PORT_READ(in_MEMORY_IN_FRONT_END_ID [internal_MEMORY_IN_PORT]); |
---|
| 787 | _speculative_access_queue [index]._ooo_engine_id = (not _param->_have_port_ooo_engine_id)?0:PORT_READ(in_MEMORY_IN_OOO_ENGINE_ID[internal_MEMORY_IN_PORT]); |
---|
| 788 | _speculative_access_queue [index]._packet_id = (not _param->_have_port_rob_ptr )?0:PORT_READ(in_MEMORY_IN_PACKET_ID [internal_MEMORY_IN_PORT]); |
---|
[71] | 789 | |
---|
| 790 | _speculative_access_queue [index]._operation = operation; |
---|
[88] | 791 | _speculative_access_queue [index]._load_queue_ptr_write = (not _param->_have_port_load_queue_ptr)?0:PORT_READ(in_MEMORY_IN_LOAD_QUEUE_PTR_WRITE[internal_MEMORY_IN_PORT]); |
---|
| 792 | _speculative_access_queue [index]._store_queue_ptr_write= PORT_READ(in_MEMORY_IN_STORE_QUEUE_PTR_WRITE[internal_MEMORY_IN_PORT]); |
---|
[71] | 793 | _speculative_access_queue [index]._address = address; |
---|
| 794 | // NOTE : is operation is a load, then they are a result and must write in the register file |
---|
| 795 | _speculative_access_queue [index]._write_rd = is_operation_memory_load(operation); |
---|
[88] | 796 | _speculative_access_queue [index]._num_reg_rd = PORT_READ(in_MEMORY_IN_NUM_REG_RD [internal_MEMORY_IN_PORT]); |
---|
[71] | 797 | |
---|
| 798 | _speculative_access_queue [index]._exception = exception; |
---|
[59] | 799 | |
---|
[97] | 800 | log_printf(TRACE,Load_store_unit,FUNCTION," * index : %d",index); |
---|
[59] | 801 | } |
---|
| 802 | } |
---|
| 803 | |
---|
| 804 | //================================================================ |
---|
| 805 | // Interface "MEMORY_OUT" |
---|
| 806 | //================================================================ |
---|
| 807 | |
---|
| 808 | if (( internal_MEMORY_OUT_VAL == 1) and |
---|
[88] | 809 | (PORT_READ(in_MEMORY_OUT_ACK[0]) == 1)) |
---|
[59] | 810 | { |
---|
[101] | 811 | log_printf(TRACE,Load_store_unit,FUNCTION," * MEMORY_OUT[0] transaction"); |
---|
[71] | 812 | |
---|
[59] | 813 | switch (internal_MEMORY_OUT_SELECT_QUEUE) |
---|
| 814 | { |
---|
| 815 | case SELECT_STORE_QUEUE : |
---|
| 816 | { |
---|
| 817 | // ======================= |
---|
| 818 | // ===== STORE_QUEUE ===== |
---|
| 819 | // ======================= |
---|
| 820 | |
---|
[97] | 821 | log_printf(TRACE,Load_store_unit,FUNCTION," * store_queue [%d]",reg_STORE_QUEUE_PTR_READ); |
---|
[71] | 822 | |
---|
[59] | 823 | // Entry flush and increase the read pointer |
---|
[71] | 824 | _store_queue [reg_STORE_QUEUE_PTR_READ]._state = STORE_QUEUE_EMPTY; |
---|
[59] | 825 | |
---|
[71] | 826 | reg_STORE_QUEUE_PTR_READ = (reg_STORE_QUEUE_PTR_READ+1)%_param->_size_store_queue; |
---|
| 827 | |
---|
| 828 | break; |
---|
| 829 | } |
---|
| 830 | case SELECT_LOAD_QUEUE : |
---|
| 831 | { |
---|
| 832 | // ====================== |
---|
| 833 | // ===== LOAD_QUEUE ===== |
---|
| 834 | // ====================== |
---|
[59] | 835 | |
---|
[97] | 836 | log_printf(TRACE,Load_store_unit,FUNCTION," * load_queue [%d]",internal_MEMORY_OUT_PTR); |
---|
[71] | 837 | |
---|
| 838 | // Entry flush and increase the read pointer |
---|
| 839 | |
---|
| 840 | _load_queue [internal_MEMORY_OUT_PTR]._state = LOAD_QUEUE_EMPTY; |
---|
| 841 | |
---|
| 842 | // reg_LOAD_QUEUE_PTR_READ = (reg_LOAD_QUEUE_PTR_READ+1)%_param->_size_load_queue; |
---|
[59] | 843 | |
---|
| 844 | break; |
---|
| 845 | } |
---|
| 846 | case SELECT_LOAD_QUEUE_SPECULATIVE : |
---|
[71] | 847 | { |
---|
[97] | 848 | log_printf(TRACE,Load_store_unit,FUNCTION," * load_queue [%d] (speculative)",internal_MEMORY_OUT_PTR); |
---|
[71] | 849 | |
---|
| 850 | _load_queue [internal_MEMORY_OUT_PTR]._state = LOAD_QUEUE_CHECK; |
---|
| 851 | // NOTE : a speculative load write in the register file. |
---|
| 852 | // if the speculation is a miss, write_rd is re set at 1. |
---|
| 853 | _load_queue [internal_MEMORY_OUT_PTR]._write_rd = 0; |
---|
[110] | 854 | |
---|
| 855 | #ifdef STATISTICS |
---|
| 856 | if (usage_is_set(_usage,USE_STATISTICS)) |
---|
| 857 | (*_stat_nb_inst_load_commit_speculative) ++; |
---|
| 858 | #endif |
---|
| 859 | |
---|
[71] | 860 | break; |
---|
| 861 | } |
---|
| 862 | |
---|
[59] | 863 | break; |
---|
| 864 | } |
---|
| 865 | } |
---|
[62] | 866 | |
---|
| 867 | //================================================================ |
---|
| 868 | // Interface "DCACHE_REQ" |
---|
| 869 | //================================================================ |
---|
[71] | 870 | bool load_queue_push = (_speculative_access_queue [internal_SPECULATIVE_ACCESS_QUEUE_PTR_READ]._state == SPECULATIVE_ACCESS_QUEUE_WAIT_LOAD_QUEUE); |
---|
| 871 | |
---|
[62] | 872 | if (( internal_DCACHE_REQ_VAL == 1) and |
---|
[88] | 873 | (PORT_READ(in_DCACHE_REQ_ACK[0]) == 1)) |
---|
[62] | 874 | { |
---|
[104] | 875 | log_printf(TRACE,Load_store_unit,FUNCTION," * DCACHE_REQ [0]"); |
---|
[71] | 876 | |
---|
[62] | 877 | switch (internal_DCACHE_REQ_SELECT_QUEUE) |
---|
| 878 | { |
---|
| 879 | case SELECT_STORE_QUEUE : |
---|
| 880 | { |
---|
| 881 | // ======================= |
---|
| 882 | // ===== STORE_QUEUE ===== |
---|
| 883 | // ======================= |
---|
| 884 | |
---|
| 885 | // Entry flush and increase the read pointer |
---|
| 886 | |
---|
[71] | 887 | _store_queue [reg_STORE_QUEUE_PTR_READ]._state = STORE_QUEUE_COMMIT; |
---|
[62] | 888 | |
---|
| 889 | break; |
---|
| 890 | } |
---|
[71] | 891 | case SELECT_LOAD_QUEUE_SPECULATIVE : |
---|
| 892 | { |
---|
| 893 | // ========================================= |
---|
| 894 | // ===== SELECT_LOAD_QUEUE_SPECULATIVE ===== |
---|
| 895 | // ========================================= |
---|
| 896 | |
---|
| 897 | load_queue_push = true; |
---|
| 898 | break; |
---|
| 899 | } |
---|
[62] | 900 | case SELECT_LOAD_QUEUE : |
---|
[71] | 901 | { |
---|
[110] | 902 | throw ERRORMORPHEO(FUNCTION,_("Invalid selection")); |
---|
[71] | 903 | break; |
---|
| 904 | } |
---|
| 905 | |
---|
[62] | 906 | break; |
---|
| 907 | } |
---|
| 908 | } |
---|
| 909 | |
---|
[71] | 910 | if (load_queue_push) |
---|
| 911 | { |
---|
| 912 | Tlsq_ptr_t ptr_write = _speculative_access_queue[internal_SPECULATIVE_ACCESS_QUEUE_PTR_READ]._load_queue_ptr_write; |
---|
| 913 | Toperation_t operation = _speculative_access_queue[internal_SPECULATIVE_ACCESS_QUEUE_PTR_READ]._operation; |
---|
| 914 | Texception_t exception = _speculative_access_queue[internal_SPECULATIVE_ACCESS_QUEUE_PTR_READ]._exception; |
---|
| 915 | bool have_exception = (exception != EXCEPTION_MEMORY_NONE); |
---|
| 916 | |
---|
| 917 | if (have_exception) |
---|
| 918 | _load_queue [ptr_write]._state = LOAD_QUEUE_COMMIT; |
---|
| 919 | else |
---|
| 920 | { |
---|
| 921 | if (have_dcache_rsp(operation)) |
---|
| 922 | { |
---|
| 923 | // load and synchronisation |
---|
| 924 | if (must_check(operation)) |
---|
| 925 | { |
---|
| 926 | // load |
---|
| 927 | _load_queue [ptr_write]._state = LOAD_QUEUE_WAIT_CHECK; |
---|
| 928 | } |
---|
| 929 | else |
---|
| 930 | { |
---|
| 931 | // synchronisation |
---|
| 932 | _load_queue [ptr_write]._state = LOAD_QUEUE_WAIT; |
---|
| 933 | } |
---|
| 934 | } |
---|
| 935 | else |
---|
| 936 | { |
---|
| 937 | // lock, prefecth, flush and invalidate |
---|
| 938 | _load_queue [ptr_write]._state = LOAD_QUEUE_COMMIT; |
---|
| 939 | } |
---|
| 940 | } |
---|
| 941 | |
---|
| 942 | Tdcache_address_t address = _speculative_access_queue [internal_SPECULATIVE_ACCESS_QUEUE_PTR_READ]._address; |
---|
| 943 | Tdcache_address_t address_lsb = (address & _param->_mask_address_lsb); |
---|
[104] | 944 | Tdcache_address_t check_hit_byte = gen_mask_not<Tdcache_address_t>(address_lsb+(memory_size(operation)>>3)-1,address_lsb) & _param->_mask_check_hit_byte; |
---|
| 945 | |
---|
| 946 | log_printf(TRACE,Load_store_unit,FUNCTION," * address : 0x%.8x", address); |
---|
| 947 | log_printf(TRACE,Load_store_unit,FUNCTION," * address_lsb : 0x%.8x", address_lsb); |
---|
| 948 | log_printf(TRACE,Load_store_unit,FUNCTION," * operation : %d", operation); |
---|
| 949 | log_printf(TRACE,Load_store_unit,FUNCTION," * memory_size : %d", memory_size(operation)); |
---|
| 950 | log_printf(TRACE,Load_store_unit,FUNCTION," * check_hit_byte : 0x%x", check_hit_byte); |
---|
| 951 | |
---|
| 952 | _load_queue [ptr_write]._context_id = _speculative_access_queue [internal_SPECULATIVE_ACCESS_QUEUE_PTR_READ]._context_id; |
---|
| 953 | _load_queue [ptr_write]._front_end_id = _speculative_access_queue [internal_SPECULATIVE_ACCESS_QUEUE_PTR_READ]._front_end_id; |
---|
| 954 | _load_queue [ptr_write]._ooo_engine_id = _speculative_access_queue [internal_SPECULATIVE_ACCESS_QUEUE_PTR_READ]._ooo_engine_id; |
---|
| 955 | _load_queue [ptr_write]._packet_id = _speculative_access_queue [internal_SPECULATIVE_ACCESS_QUEUE_PTR_READ]._packet_id; |
---|
| 956 | _load_queue [ptr_write]._operation = operation; |
---|
[71] | 957 | _load_queue [ptr_write]._store_queue_ptr_write = _speculative_access_queue [internal_SPECULATIVE_ACCESS_QUEUE_PTR_READ]._store_queue_ptr_write; |
---|
[104] | 958 | _load_queue [ptr_write]._address = address; |
---|
| 959 | _load_queue [ptr_write]._check_hit_byte = check_hit_byte; |
---|
| 960 | _load_queue [ptr_write]._check_hit = 0; |
---|
| 961 | _load_queue [ptr_write]._shift = address_lsb<<3;// *8 |
---|
| 962 | _load_queue [ptr_write]._is_load_signed = is_operation_memory_load_signed(operation); |
---|
| 963 | _load_queue [ptr_write]._access_size = memory_size(operation); |
---|
[71] | 964 | // NOTE : if have an exception, must write in register, because a depend instruction wait the load data. |
---|
[104] | 965 | _load_queue [ptr_write]._write_rd = _speculative_access_queue [internal_SPECULATIVE_ACCESS_QUEUE_PTR_READ]._write_rd ; |
---|
[71] | 966 | |
---|
[104] | 967 | _load_queue [ptr_write]._num_reg_rd = _speculative_access_queue [internal_SPECULATIVE_ACCESS_QUEUE_PTR_READ]._num_reg_rd ; |
---|
| 968 | _load_queue [ptr_write]._exception = exception; |
---|
| 969 | _load_queue [ptr_write]._rdata = address; // to the exception |
---|
[71] | 970 | |
---|
[97] | 971 | log_printf(TRACE,Load_store_unit,FUNCTION," * speculative_access_queue"); |
---|
| 972 | log_printf(TRACE,Load_store_unit,FUNCTION," * POP[%d]",(*_speculative_access_queue_control)[0]); |
---|
[71] | 973 | |
---|
| 974 | _speculative_access_queue [(*_speculative_access_queue_control)[0]]._state = SPECULATIVE_ACCESS_QUEUE_EMPTY; |
---|
| 975 | |
---|
| 976 | _speculative_access_queue_control->pop(); |
---|
| 977 | |
---|
[110] | 978 | #ifdef STATISTICS |
---|
| 979 | if (usage_is_set(_usage,USE_STATISTICS)) |
---|
| 980 | (*_stat_nb_inst_load) ++; |
---|
| 981 | #endif |
---|
| 982 | } |
---|
| 983 | |
---|
[71] | 984 | //================================================================ |
---|
| 985 | // Interface "DCACHE_RSP" |
---|
| 986 | //================================================================ |
---|
[88] | 987 | if ((PORT_READ(in_DCACHE_RSP_VAL[0])== 1) and |
---|
[71] | 988 | ( internal_DCACHE_RSP_ACK == 1)) |
---|
| 989 | { |
---|
[101] | 990 | log_printf(TRACE,Load_store_unit,FUNCTION," * DCACHE_RSP [0]"); |
---|
[71] | 991 | |
---|
| 992 | // don't use context_id : because there are one queue for all thread |
---|
[88] | 993 | //Tcontext_t context_id = PORT_READ(in_DCACHE_RSP_CONTEXT_ID[0]); |
---|
| 994 | Tpacket_t packet_id = PORT_READ(in_DCACHE_RSP_PACKET_ID [0]); |
---|
| 995 | Tdcache_data_t rdata = PORT_READ(in_DCACHE_RSP_RDATA [0]); |
---|
| 996 | Tdcache_error_t error = PORT_READ(in_DCACHE_RSP_ERROR [0]); |
---|
[71] | 997 | |
---|
[101] | 998 | log_printf(TRACE,Load_store_unit,FUNCTION," * original packet_id : %d" , packet_id); |
---|
[106] | 999 | log_printf(TRACE,Load_store_unit,FUNCTION," * packet_id : %d" , packet_id>>1); |
---|
[101] | 1000 | log_printf(TRACE,Load_store_unit,FUNCTION," * rdata : %.8x", rdata); |
---|
| 1001 | log_printf(TRACE,Load_store_unit,FUNCTION," * error : %d" , error); |
---|
[71] | 1002 | |
---|
| 1003 | if (DCACHE_RSP_IS_LOAD(packet_id) == 1) |
---|
| 1004 | { |
---|
| 1005 | packet_id >>= 1; |
---|
| 1006 | |
---|
[106] | 1007 | log_printf(TRACE,Load_store_unit,FUNCTION," * packet is a LOAD"); |
---|
[71] | 1008 | |
---|
| 1009 | #ifdef DEBUG_TEST |
---|
| 1010 | if (not have_dcache_rsp(_load_queue [packet_id]._operation)) |
---|
[110] | 1011 | throw ERRORMORPHEO(FUNCTION,_("Receive of respons, but the corresponding operation don't wait a respons.")); |
---|
[71] | 1012 | #endif |
---|
[101] | 1013 | |
---|
[104] | 1014 | Tdcache_data_t data = _load_queue [packet_id]._rdata; |
---|
| 1015 | |
---|
| 1016 | log_printf(TRACE,Load_store_unit,FUNCTION," * data construction"); |
---|
| 1017 | log_printf(TRACE,Load_store_unit,FUNCTION," * data from cache : 0x%.8x",rdata); |
---|
| 1018 | log_printf(TRACE,Load_store_unit,FUNCTION," * data (before) : 0x%.8x", data); |
---|
| 1019 | log_printf(TRACE,Load_store_unit,FUNCTION," * check_hit_byte : 0x%x" ,_load_queue [packet_id]._check_hit_byte); |
---|
| 1020 | for (uint32_t i=0;i<(_param->_size_general_data>>3)/*8*/; ++i) |
---|
| 1021 | // Test if this byte has been checked |
---|
| 1022 | if ((_load_queue [packet_id]._check_hit_byte & (1<<i)) == 0) |
---|
| 1023 | { |
---|
| 1024 | log_printf(TRACE,Load_store_unit,FUNCTION," * no previous check ]%d:%d]",(i+1)<<3,i<<3); |
---|
| 1025 | data = insert<Tdcache_data_t>(data,rdata,((i+1)<<3)-1,i<<3); |
---|
| 1026 | } |
---|
| 1027 | log_printf(TRACE,Load_store_unit,FUNCTION," * data (after) : 0x%.8x", data); |
---|
| 1028 | |
---|
| 1029 | _load_queue [packet_id]._rdata = data; |
---|
[71] | 1030 | |
---|
[72] | 1031 | if (error != DCACHE_ERROR_NONE) |
---|
[71] | 1032 | { |
---|
[97] | 1033 | log_printf(TRACE,Load_store_unit,FUNCTION," * have a bus error !!!"); |
---|
[71] | 1034 | |
---|
| 1035 | _load_queue [packet_id]._exception = EXCEPTION_MEMORY_BUS_ERROR; |
---|
| 1036 | _load_queue [packet_id]._state = LOAD_QUEUE_COMMIT; |
---|
| 1037 | } |
---|
| 1038 | else |
---|
| 1039 | { |
---|
[97] | 1040 | log_printf(TRACE,Load_store_unit,FUNCTION," * have no bus error."); |
---|
[106] | 1041 | log_printf(TRACE,Load_store_unit,FUNCTION," * previous state : %s",toString(_load_queue [packet_id]._state).c_str()); |
---|
[71] | 1042 | |
---|
| 1043 | // FIXME : convention : if bus error, the cache return the fautive address ! |
---|
| 1044 | // But, the load's address is aligned ! |
---|
[101] | 1045 | |
---|
[71] | 1046 | switch (_load_queue [packet_id]._state) |
---|
| 1047 | { |
---|
| 1048 | case LOAD_QUEUE_WAIT_CHECK : _load_queue [packet_id]._state = LOAD_QUEUE_COMMIT_CHECK; break; |
---|
| 1049 | case LOAD_QUEUE_WAIT : _load_queue [packet_id]._state = LOAD_QUEUE_COMMIT ; break; |
---|
[110] | 1050 | default : throw ERRORMORPHEO(FUNCTION,_("Illegal state (dcache_rsp).")); break; |
---|
[71] | 1051 | } |
---|
| 1052 | } |
---|
| 1053 | } |
---|
| 1054 | else |
---|
| 1055 | { |
---|
[97] | 1056 | log_printf(TRACE,Load_store_unit,FUNCTION," * packet is a STORE"); |
---|
[71] | 1057 | |
---|
| 1058 | // TODO : les stores ne génére pas de réponse sauf quand c'est un bus error !!! |
---|
| 1059 | throw ERRORMORPHEO(FUNCTION,_("dcache_rsp : no respons to a write. (TODO : manage bus error to the store operation.)")); |
---|
| 1060 | } |
---|
| 1061 | |
---|
| 1062 | } |
---|
| 1063 | |
---|
| 1064 | // this register is to manage the priority of check -> Round robin |
---|
| 1065 | reg_LOAD_QUEUE_CHECK_PRIORITY = (reg_LOAD_QUEUE_CHECK_PRIORITY+1)%_param->_size_load_queue; |
---|
| 1066 | |
---|
| 1067 | |
---|
[88] | 1068 | #if defined(DEBUG) and (DEBUG>=DEBUG_TRACE) |
---|
[62] | 1069 | // ***** dump store queue |
---|
[97] | 1070 | log_printf(TRACE,Load_store_unit,FUNCTION," * Dump STORE_QUEUE"); |
---|
| 1071 | log_printf(TRACE,Load_store_unit,FUNCTION," * ptr_read : %d",reg_STORE_QUEUE_PTR_READ); |
---|
[62] | 1072 | |
---|
| 1073 | for (uint32_t i=0; i<_param->_size_store_queue; i++) |
---|
| 1074 | { |
---|
[71] | 1075 | uint32_t j = (reg_STORE_QUEUE_PTR_READ+i)%_param->_size_store_queue; |
---|
[97] | 1076 | |
---|
| 1077 | log_printf(TRACE,Load_store_unit,FUNCTION," [%.4d] %.4d %.4d %.4d, %.4d, %.4d, %.4d, %.8x %.8x, %.2d, %s", |
---|
| 1078 | j, |
---|
| 1079 | _store_queue[j]._context_id , |
---|
| 1080 | _store_queue[j]._front_end_id , |
---|
| 1081 | _store_queue[j]._ooo_engine_id , |
---|
| 1082 | _store_queue[j]._packet_id , |
---|
| 1083 | _store_queue[j]._operation , |
---|
| 1084 | _store_queue[j]._load_queue_ptr_write, |
---|
| 1085 | _store_queue[j]._address , |
---|
| 1086 | _store_queue[j]._wdata , |
---|
| 1087 | //_store_queue[j]._write_rd , |
---|
| 1088 | //_store_queue[j]._num_reg_rd , |
---|
| 1089 | _store_queue[j]._exception , |
---|
| 1090 | toString(_store_queue[j]._state).c_str()); |
---|
[62] | 1091 | } |
---|
[71] | 1092 | |
---|
| 1093 | // ***** dump speculative_access queue |
---|
[97] | 1094 | log_printf(TRACE,Load_store_unit,FUNCTION," * Dump SPECULATIVE_ACCESS_QUEUE"); |
---|
[71] | 1095 | |
---|
| 1096 | for (uint32_t i=0; i<_param->_size_speculative_access_queue; i++) |
---|
| 1097 | { |
---|
| 1098 | uint32_t j = (*_speculative_access_queue_control)[i]; |
---|
[97] | 1099 | |
---|
[101] | 1100 | log_printf(TRACE,Load_store_unit,FUNCTION," [%.4d] %.4d %.4d %.4d, %.4d, %.4d, %.4d %.4d, %.8x, %.1d %.4d, %.2d, %s", |
---|
[97] | 1101 | j, |
---|
| 1102 | _speculative_access_queue[j]._context_id , |
---|
| 1103 | _speculative_access_queue[j]._front_end_id , |
---|
| 1104 | _speculative_access_queue[j]._ooo_engine_id , |
---|
| 1105 | _speculative_access_queue[j]._packet_id , |
---|
| 1106 | _speculative_access_queue[j]._operation , |
---|
| 1107 | _speculative_access_queue[j]._load_queue_ptr_write, |
---|
| 1108 | _speculative_access_queue[j]._store_queue_ptr_write, |
---|
| 1109 | _speculative_access_queue[j]._address , |
---|
| 1110 | _speculative_access_queue[j]._write_rd , |
---|
| 1111 | _speculative_access_queue[j]._num_reg_rd , |
---|
| 1112 | _speculative_access_queue[j]._exception , |
---|
| 1113 | toString(_speculative_access_queue[j]._state).c_str()); |
---|
[71] | 1114 | } |
---|
| 1115 | |
---|
| 1116 | // ***** dump load queue |
---|
[97] | 1117 | log_printf(TRACE,Load_store_unit,FUNCTION," * Dump LOAD_QUEUE"); |
---|
| 1118 | log_printf(TRACE,Load_store_unit,FUNCTION," * ptr_read_check_priority : %d",reg_LOAD_QUEUE_CHECK_PRIORITY); |
---|
[71] | 1119 | |
---|
| 1120 | for (uint32_t i=0; i<_param->_size_load_queue; i++) |
---|
| 1121 | { |
---|
| 1122 | uint32_t j = i; |
---|
[97] | 1123 | |
---|
[101] | 1124 | log_printf(TRACE,Load_store_unit,FUNCTION," [%.4d] %.4d %.4d %.4d, %.4d, %.4d, %.4d, %.8x %.1x %.1d %.2d %.1d %.2d, %.8x, %.1d %.4d, %.2d, %s", |
---|
[97] | 1125 | j, |
---|
| 1126 | _load_queue[j]._context_id , |
---|
| 1127 | _load_queue[j]._front_end_id , |
---|
| 1128 | _load_queue[j]._ooo_engine_id , |
---|
| 1129 | _load_queue[j]._packet_id , |
---|
| 1130 | _load_queue[j]._operation , |
---|
| 1131 | _load_queue[j]._store_queue_ptr_write, |
---|
| 1132 | _load_queue[j]._address , |
---|
| 1133 | _load_queue[j]._check_hit_byte , |
---|
| 1134 | _load_queue[j]._check_hit , |
---|
| 1135 | _load_queue[j]._shift , |
---|
| 1136 | _load_queue[j]._is_load_signed , |
---|
| 1137 | _load_queue[j]._access_size , |
---|
| 1138 | _load_queue[j]._rdata , |
---|
| 1139 | _load_queue[j]._write_rd , |
---|
| 1140 | _load_queue[j]._num_reg_rd , |
---|
| 1141 | _load_queue[j]._exception , |
---|
| 1142 | toString(_load_queue[j]._state).c_str()); |
---|
[71] | 1143 | } |
---|
[62] | 1144 | #endif |
---|
[71] | 1145 | |
---|
| 1146 | #ifdef STATISTICS |
---|
[88] | 1147 | if (usage_is_set(_usage,USE_STATISTICS)) |
---|
| 1148 | { |
---|
| 1149 | for (uint32_t i=0; i<_param->_size_store_queue; i++) |
---|
| 1150 | if (_store_queue[i]._state != STORE_QUEUE_EMPTY) |
---|
| 1151 | (*_stat_use_store_queue) ++; |
---|
| 1152 | for (uint32_t i=0; i<_param->_size_speculative_access_queue; i++) |
---|
| 1153 | if (_speculative_access_queue[i]._state != SPECULATIVE_ACCESS_QUEUE_EMPTY) |
---|
| 1154 | (*_stat_use_speculative_access_queue) ++; |
---|
| 1155 | for (uint32_t i=0; i<_param->_size_load_queue; i++) |
---|
| 1156 | if (_load_queue[i]._state != LOAD_QUEUE_EMPTY) |
---|
| 1157 | (*_stat_use_load_queue) ++; |
---|
| 1158 | } |
---|
[71] | 1159 | #endif |
---|
[59] | 1160 | } |
---|
| 1161 | |
---|
[97] | 1162 | log_end(Load_store_unit,FUNCTION); |
---|
[59] | 1163 | }; |
---|
| 1164 | |
---|
| 1165 | }; // end namespace load_store_unit |
---|
| 1166 | }; // end namespace execute_unit |
---|
| 1167 | }; // end namespace multi_execute_unit |
---|
| 1168 | }; // end namespace execute_loop |
---|
| 1169 | }; // end namespace multi_execute_loop |
---|
| 1170 | }; // end namespace core |
---|
| 1171 | |
---|
| 1172 | }; // end namespace behavioural |
---|
| 1173 | }; // end namespace morpheo |
---|
| 1174 | #endif |
---|