Changeset 435 for trunk/kernel/mm
- Timestamp:
- Feb 20, 2018, 5:32:17 PM (7 years ago)
- Location:
- trunk/kernel/mm
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/mm/kcm.c
r433 r435 48 48 { 49 49 50 #if CONFIG_DEBUG_KCM _ALLOC50 #if CONFIG_DEBUG_KCM 51 51 uint32_t cycle = (uint32_t)hal_get_cycles(); 52 if( CONFIG_DEBUG_KCM _ALLOC< cycle )52 if( CONFIG_DEBUG_KCM < cycle ) 53 53 printk("\n[DBG] %s : thread %x enters for %s / page %x / count %d / active %d\n", 54 54 __FUNCTION__ , CURRENT_THREAD , kmem_type_str( kcm->type ) , … … 85 85 + (index * kcm->block_size) ); 86 86 87 #if CONFIG_DEBUG_KCM _ALLOC87 #if CONFIG_DEBUG_KCM 88 88 cycle = (uint32_t)hal_get_cycles(); 89 if( CONFIG_DEBUG_KCM _ALLOC< cycle )89 if( CONFIG_DEBUG_KCM < cycle ) 90 90 printk("\n[DBG] %s : thread %x exit / type %s / ptr %p / page %x / count %d\n", 91 91 __FUNCTION__ , CURRENT_THREAD , kmem_type_str( kcm->type ) , ptr , -
trunk/kernel/mm/kmem.c
r433 r435 145 145 assert( ((type > 1) && (type < KMEM_TYPES_NR) ) , __FUNCTION__ , "illegal KCM type" ); 146 146 147 kmem_dmsg("\n[DBG] %s : enters / KCM type %s missing in cluster %x\n", 148 __FUNCTION__ , kmem_type_str( type ) , local_cxy ); 147 #if CONFIG_DEBUG_KMEM 148 uint32_t cycle = (uint32_t)hal_get_cycles(); 149 if( CONFIG_DEBUG_KMEM < cycle ) 150 printk("\n[DBG] %s : thread %x enter / KCM type %s missing in cluster %x / cycle %d\n", 151 __FUNCTION__, CURRENT_THREAD, kmem_type_str( type ), local_cxy, cycle ); 152 #endif 149 153 150 154 cluster_t * cluster = LOCAL_CLUSTER; … … 169 173 hal_fence(); 170 174 171 kmem_dmsg("\n[DBG] %s : exit / KCM type %s created in cluster %x\n", 172 __FUNCTION__ , kmem_type_str( type ) , local_cxy ); 175 #if CONFIG_DEBUG_KMEM 176 cycle = (uint32_t)hal_get_cycles(); 177 if( CONFIG_DEBUG_KMEM < cycle ) 178 printk("\n[DBG] %s : thread %x exit / cycle %d\n", 179 __FUNCTION__, CURRENT_THREAD, cycle ); 180 #endif 173 181 174 182 return 0; … … 192 200 assert( (type < KMEM_TYPES_NR) , __FUNCTION__ , "illegal KMEM request type" ); 193 201 194 kmem_dmsg("\n[DBG] %s : enters in cluster %x for type %s\n", 195 __FUNCTION__ , local_cxy , kmem_type_str( type ) ); 202 #if CONFIG_DEBUG_KMEM 203 uint32_t cycle = (uint32_t)hal_get_cycles(); 204 if( CONFIG_DEBUG_KMEM < cycle ) 205 printk("\n[DBG] %s : thread %x enter / type %s / cluster %x / cycle %d\n", 206 __FUNCTION__, CURRENT_THREAD, kmem_type_str( type ), local_cxy, cycle ); 207 #endif 196 208 197 209 // analyse request type 198 210 if( type == KMEM_PAGE ) // PPM allocator 199 211 { 200 201 #if CONFIG_DEBUG_KMEM_ALLOC202 if( CONFIG_DEBUG_KMEM_ALLOC < (uint32_t)hal_get_cycles() )203 printk("\n[DBG] in %s : thread %x enter for %d page(s)\n",204 __FUNCTION__ , CURRENT_THREAD , 1<<size );205 #endif206 207 212 // allocate the number of requested pages 208 213 ptr = (void *)ppm_alloc_pages( size ); … … 217 222 if( flags & AF_ZERO ) page_zero( (page_t *)ptr ); 218 223 219 kmem_dmsg("\n[DBG] %s : exit in cluster %x for type %s / page = %x / base = %x\n", 220 __FUNCTION__, local_cxy , kmem_type_str( type ) , 221 (intptr_t)ptr , (intptr_t)ppm_page2base( ptr ) ); 222 223 #if CONFIG_DEBUG_KMEM_ALLOC 224 if( CONFIG_DEBUG_KMEM_ALLOC < (uint32_t)hal_get_cycles() ) 225 printk("\n[DBG] in %s : thread %x exit / %d page(s) allocated / ppn = %x\n", 226 __FUNCTION__ , CURRENT_THREAD , 1<<size , ppm_page2ppn( XPTR( local_cxy , ptr ) ) ); 224 #if CONFIG_DEBUG_KMEM 225 cycle = (uint32_t)hal_get_cycles(); 226 if( CONFIG_DEBUG_KMEM < cycle ) 227 printk("\n[DBG] %s : thread %x exit / %d page(s) allocated / ppn %x / cycle %d\n", 228 __FUNCTION__, CURRENT_THREAD, 1<<size, ppm_page2ppn(XPTR(local_cxy,ptr)), cycle ); 227 229 #endif 228 230 … … 242 244 if( flags & AF_ZERO ) memset( ptr , 0 , size ); 243 245 244 kmem_dmsg("\n[DBG] %s : exit in cluster %x for type %s / base = %x / size = %d\n", 245 __FUNCTION__, local_cxy , kmem_type_str( type ) , 246 (intptr_t)ptr , req->size ); 246 #if CONFIG_DEBUG_KMEM 247 cycle = (uint32_t)hal_get_cycles(); 248 if( CONFIG_DEBUG_KMEM < cycle ) 249 printk("\n[DBG] %s : thread %x exit / type %s allocated / base %x / size %d / cycle %d\n", 250 __FUNCTION__, CURRENT_THREAD, kmem_type_str( type ), (intptr_t)ptr, size, cycle ); 251 #endif 252 247 253 } 248 254 else // KCM allocator … … 269 275 if( flags & AF_ZERO ) memset( ptr , 0 , kmem_type_size( type ) ); 270 276 271 kmem_dmsg("\n[DBG] %s : exit in cluster %x for type %s / base = %x / size = %d\n", 272 __FUNCTION__, local_cxy , kmem_type_str( type ) , 273 (intptr_t)ptr , kmem_type_size( type ) ); 277 #if CONFIG_DEBUG_KMEM 278 cycle = (uint32_t)hal_get_cycles(); 279 if( CONFIG_DEBUG_KMEM < cycle ) 280 printk("\n[DBG] %s : thread %x exit / type %s allocated / base %x / size %d / cycle %d\n", 281 __FUNCTION__, CURRENT_THREAD, kmem_type_str(type), (intptr_t)ptr, 282 kmem_type_size(type), cycle ); 283 #endif 284 274 285 } 275 286 -
trunk/kernel/mm/mapper.c
r408 r435 143 143 error_t error; 144 144 145 mapper_dmsg("\n[DBG] %s : core[%x,%d] enters for page %d / mapper %x\n", 146 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , index , mapper ); 145 #if CONFIG_DEBUG_MAPPER_GET_PAGE 146 uint32_t cycle = (uint32_t)hal_get_cycles(); 147 if( CONFIG_DEBUG_MAPPER_GET_PAGE < cycle ) 148 printk("\n[DBG] %s : thread %x enter for page %d / mapper %x / cycle %d\n", 149 __FUNCTION__ , CURRENT_THREAD , index , mapper , cycle ); 150 #endif 147 151 148 152 thread_t * this = CURRENT_THREAD; … … 171 175 { 172 176 173 mapper_dmsg("\n[DBG] %s : core[%x,%d] missing page => load from device\n", 174 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid ); 175 177 #if (CONFIG_DEBUG_MAPPER_GET_PAGE & 1) 178 if( CONFIG_DEBUG_MAPPER_GET_PAGE < cycle ) 179 printk("\n[DBG] %s : missing page => load from device\n", __FUNCTION__ ); 180 #endif 176 181 // allocate one page from PPM 177 182 req.type = KMEM_PAGE; … … 230 235 // reset the page INLOAD flag to make the page available to all readers 231 236 page_clear_flag( page , PG_INLOAD ); 232 233 mapper_dmsg("\n[DBG] %s : missing page loaded / ppn = %x\n",234 __FUNCTION__ , ppm_page2ppn(XPTR(local_cxy,page)) );235 236 237 } 237 238 else if( page_is_flag( page , PG_INLOAD ) ) // page is loaded by another thread … … 256 257 } 257 258 258 mapper_dmsg("\n[DBG] %s : exit for page %d / mapper %x / page_desc = %x\n", 259 __FUNCTION__ , index , mapper , page ); 259 #if CONFIG_DEBUG_MAPPER_GET_PAGE 260 cycle = (uint32_t)hal_get_cycles(); 261 if( CONFIG_DEBUG_MAPPER_GET_PAGE < cycle ) 262 printk("\n[DBG] %s : thread %x exit for page %d / ppn %x / cycle %d\n", 263 __FUNCTION__, CURRENT_THREAD, index, ppm_page2ppn(XPTR(local_cxy, page)), cycle ); 264 #endif 260 265 261 266 return page; … … 312 317 uint8_t * buf_ptr; // current buffer address 313 318 314 mapper_dmsg("\n[DBG] %s : enters / to_buf = %d / buffer = %x\n", 315 __FUNCTION__ , to_buffer , buffer ); 319 #if CONFIG_DEBUG_MAPPER_MOVE_USER 320 uint32_t cycle = (uint32_t)hal_get_cycles(); 321 if( CONFIG_DEBUG_MAPPER_MOVE_USER < cycle ) 322 printk("\n[DBG] %s : thread %x enter / to_buf %d / buffer %x / cycle %d\n", 323 __FUNCTION__ , CURRENT_THREAD , to_buffer , buffer , cycle ); 324 #endif 316 325 317 326 // compute offsets of first and last bytes in file … … 338 347 else page_count = CONFIG_PPM_PAGE_SIZE; 339 348 340 mapper_dmsg("\n[DBG] %s : index = %d / offset = %d / count = %d\n", 341 __FUNCTION__ , index , page_offset , page_count ); 349 #if (CONFIG_DEBUG_MAPPER_MOVE_USER & 1) 350 if( CONFIG_DEBUG_MAPPER_MOVE_USER < cycle ) 351 printk("\n[DBG] %s : index = %d / offset = %d / count = %d\n", 352 __FUNCTION__ , index , page_offset , page_count ); 353 #endif 342 354 343 355 // get page descriptor … … 353 365 buf_ptr = (uint8_t *)buffer + done; 354 366 355 mapper_dmsg("\n[DBG] %s : index = %d / buf_ptr = %x / map_ptr = %x\n",356 __FUNCTION__ , index , buf_ptr , map_ptr );357 358 367 // move fragment 359 368 if( to_buffer ) … … 370 379 } 371 380 372 mapper_dmsg("\n[DBG] %s : exit for buffer %x\n", 373 __FUNCTION__, buffer ); 381 #if CONFIG_DEBUG_MAPPER_MOVE_USER 382 cycle = (uint32_t)hal_get_cycles(); 383 if( CONFIG_DEBUG_MAPPER_MOVE_USER < cycle ) 384 printk("\n[DBG] %s : thread %x exit / to_buf %d / buffer %x / cycle %d\n", 385 __FUNCTION__ , CURRENT_THREAD , to_buffer , buffer , cycle ); 386 #endif 374 387 375 388 return 0; … … 399 412 uint8_t * buffer_ptr = (uint8_t *)GET_PTR( buffer_xp ); 400 413 401 mapper_dmsg("\n[DBG] %s : core[%x,%d] / to_buf = %d / buf_cxy = %x / buf_ptr = %x / size = %x\n", 402 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, to_buffer, buffer_cxy, buffer_ptr, size ); 414 #if CONFIG_DEBUG_MAPPER_MOVE_KERNEL 415 uint32_t cycle = (uint32_t)hal_get_cycles(); 416 if( CONFIG_DEBUG_MAPPER_MOVE_KERNEL < cycle ) 417 printk("\n[DBG] %s : thread %x enter / to_buf %d / buf_cxy %x / buf_ptr %x / cycle %d\n", 418 __FUNCTION__ , CURRENT_THREAD , to_buffer , buffer_cxy , buffer_ptr , cycle ); 419 #endif 403 420 404 421 // compute offsets of first and last bytes in file … … 410 427 uint32_t last = max_byte >> CONFIG_PPM_PAGE_SHIFT; 411 428 412 mapper_dmsg("\n[DBG] %s : core[%x,%d] / first_page = %d / last_page = %d\n", 413 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, first, last ); 429 #if (CONFIG_DEBUG_MAPPER_MOVE_KERNEL & 1) 430 if( CONFIG_DEBUG_MAPPER_MOVE_KERNEL < cycle ) 431 printk("\n[DBG] %s : first_page %d / last_page %d\n", __FUNCTION__, first, last ); 432 #endif 414 433 415 434 // compute source and destination clusters … … 440 459 else page_count = CONFIG_PPM_PAGE_SIZE; 441 460 442 mapper_dmsg("\n[DBG] %s : core[%x;%d] / page_index = %d / offset = %d / bytes = %d\n", 443 __FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, index, page_offset, page_count ); 461 #if (CONFIG_DEBUG_MAPPER_MOVE_KERNEL & 1) 462 if( CONFIG_DEBUG_MAPPER_MOVE_KERNEL < cycle ) 463 printk("\n[DBG] %s : page_index = %d / offset = %d / bytes = %d\n", 464 __FUNCTION__ , index , page_offset , page_count ); 465 #endif 444 466 445 467 // get page descriptor … … 472 494 } 473 495 474 mapper_dmsg("\n[DBG] %s : core_cxy[%x,%d] / exit / buf_cxy = %x / buf_ptr = %x / size = %x\n", 475 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, buffer_cxy, buffer_ptr, size ); 496 #if CONFIG_DEBUG_MAPPER_MOVE_KERNEL 497 cycle = (uint32_t)hal_get_cycles(); 498 if( CONFIG_DEBUG_MAPPER_MOVE_KERNEL < cycle ) 499 printk("\n[DBG] %s : thread %x exit / to_buf %d / buf_cxy %x / buf_ptr %x / cycle %d\n", 500 __FUNCTION__ , CURRENT_THREAD , to_buffer , buffer_cxy , buffer_ptr , cycle ); 501 #endif 476 502 477 503 return 0; -
trunk/kernel/mm/vmm.c
r433 r435 1643 1643 #if CONFIG_DEBUG_VMM_GET_PTE 1644 1644 uint32_t cycle = (uint32_t)hal_get_cycles(); 1645 if( CONFIG_DEBUG_VMM_GET_PTE >cycle )1645 if( CONFIG_DEBUG_VMM_GET_PTE < cycle ) 1646 1646 printk("\n[DBG] %s : thread %x enter for vpn = %x / process %x / cow = %d / cycle %d\n", 1647 1647 __FUNCTION__ , CURRENT_THREAD , vpn , process->pid , cow , cycle ); … … 1800 1800 error_t error; 1801 1801 1802 #if CONFIG_DEBUG_VMM_GET_PTE 1803 uint32_t cycle = (uint32_t)hal_get_cycles(); 1804 if( CONFIG_DEBUG_VMM_GET_PTE < cycle ) 1805 printk("\n[DBG] %s : thread %x enter for vpn %x / process %x / cycle %d\n", 1806 __FUNCTION__ , CURRENT_THREAD , vpn , process->pid , cycle ); 1807 #endif 1808 1802 1809 // get reference process cluster and local pointer 1803 1810 cxy_t ref_cxy = GET_CXY( process->ref_xp ); … … 1833 1840 } 1834 1841 1842 #if CONFIG_DEBUG_VMM_GET_PTE 1843 cycle = (uint32_t)hal_get_cycles(); 1844 if( CONFIG_DEBUG_VMM_GET_PTE < cycle ) 1845 printk("\n[DBG] %s : thread %x exit for vpn %x / process %x / cycle %d\n", 1846 __FUNCTION__ , CURRENT_THREAD , vpn , process->pid , cycle ); 1847 #endif 1848 1835 1849 return error; 1836 1850 … … 1845 1859 error_t error; 1846 1860 1861 #if CONFIG_DEBUG_VMM_GET_PTE 1862 uint32_t cycle = (uint32_t)hal_get_cycles(); 1863 if( CONFIG_DEBUG_VMM_GET_PTE < cycle ) 1864 printk("\n[DBG] %s : thread %x enter for vpn %x / process %x / cycle %d\n", 1865 __FUNCTION__ , CURRENT_THREAD , vpn , process->pid , cycle ); 1866 #endif 1847 1867 1848 1868 // get reference process cluster and local pointer … … 1879 1899 } 1880 1900 1901 #if CONFIG_DEBUG_VMM_GET_PTE 1902 cycle = (uint32_t)hal_get_cycles(); 1903 if( CONFIG_DEBUG_VMM_GET_PTE < cycle ) 1904 printk("\n[DBG] %s : thread %x exit for vpn %x / process %x / cycle %d\n", 1905 __FUNCTION__ , CURRENT_THREAD , vpn , process->pid , cycle ); 1906 #endif 1907 1881 1908 return error; 1882 1909
Note: See TracChangeset
for help on using the changeset viewer.