Changeset 438 for trunk/kernel/mm
- Timestamp:
- Apr 4, 2018, 2:49:02 PM (7 years ago)
- Location:
- trunk/kernel/mm
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/mm/kcm.c
r437 r438 48 48 { 49 49 50 #if CONFIG_DEBUG_KCM50 #if DEBUG_KCM 51 51 uint32_t cycle = (uint32_t)hal_get_cycles(); 52 if( CONFIG_DEBUG_KCM < cycle )52 if( DEBUG_KCM < cycle ) 53 53 printk("\n[DBG] %s : thread %x enters for %s / page %x / count %d / active %d\n", 54 54 __FUNCTION__ , CURRENT_THREAD , kmem_type_str( kcm->type ) , … … 85 85 + (index * kcm->block_size) ); 86 86 87 #if CONFIG_DEBUG_KCM87 #if DEBUG_KCM 88 88 cycle = (uint32_t)hal_get_cycles(); 89 if( CONFIG_DEBUG_KCM < cycle )89 if( DEBUG_KCM < cycle ) 90 90 printk("\n[DBG] %s : thread %x exit / type %s / ptr %p / page %x / count %d\n", 91 91 __FUNCTION__ , CURRENT_THREAD , kmem_type_str( kcm->type ) , ptr , -
trunk/kernel/mm/kmem.c
r435 r438 145 145 assert( ((type > 1) && (type < KMEM_TYPES_NR) ) , __FUNCTION__ , "illegal KCM type" ); 146 146 147 #if CONFIG_DEBUG_KMEM147 #if DEBUG_KMEM 148 148 uint32_t cycle = (uint32_t)hal_get_cycles(); 149 if( CONFIG_DEBUG_KMEM < cycle )149 if( DEBUG_KMEM < cycle ) 150 150 printk("\n[DBG] %s : thread %x enter / KCM type %s missing in cluster %x / cycle %d\n", 151 151 __FUNCTION__, CURRENT_THREAD, kmem_type_str( type ), local_cxy, cycle ); … … 173 173 hal_fence(); 174 174 175 #if CONFIG_DEBUG_KMEM175 #if DEBUG_KMEM 176 176 cycle = (uint32_t)hal_get_cycles(); 177 if( CONFIG_DEBUG_KMEM < cycle )177 if( DEBUG_KMEM < cycle ) 178 178 printk("\n[DBG] %s : thread %x exit / cycle %d\n", 179 179 __FUNCTION__, CURRENT_THREAD, cycle ); … … 200 200 assert( (type < KMEM_TYPES_NR) , __FUNCTION__ , "illegal KMEM request type" ); 201 201 202 #if CONFIG_DEBUG_KMEM202 #if DEBUG_KMEM 203 203 uint32_t cycle = (uint32_t)hal_get_cycles(); 204 if( CONFIG_DEBUG_KMEM < cycle )204 if( DEBUG_KMEM < cycle ) 205 205 printk("\n[DBG] %s : thread %x enter / type %s / cluster %x / cycle %d\n", 206 206 __FUNCTION__, CURRENT_THREAD, kmem_type_str( type ), local_cxy, cycle ); … … 222 222 if( flags & AF_ZERO ) page_zero( (page_t *)ptr ); 223 223 224 #if CONFIG_DEBUG_KMEM224 #if DEBUG_KMEM 225 225 cycle = (uint32_t)hal_get_cycles(); 226 if( CONFIG_DEBUG_KMEM < cycle )226 if( DEBUG_KMEM < cycle ) 227 227 printk("\n[DBG] %s : thread %x exit / %d page(s) allocated / ppn %x / cycle %d\n", 228 228 __FUNCTION__, CURRENT_THREAD, 1<<size, ppm_page2ppn(XPTR(local_cxy,ptr)), cycle ); … … 244 244 if( flags & AF_ZERO ) memset( ptr , 0 , size ); 245 245 246 #if CONFIG_DEBUG_KMEM246 #if DEBUG_KMEM 247 247 cycle = (uint32_t)hal_get_cycles(); 248 if( CONFIG_DEBUG_KMEM < cycle )248 if( DEBUG_KMEM < cycle ) 249 249 printk("\n[DBG] %s : thread %x exit / type %s allocated / base %x / size %d / cycle %d\n", 250 250 __FUNCTION__, CURRENT_THREAD, kmem_type_str( type ), (intptr_t)ptr, size, cycle ); … … 275 275 if( flags & AF_ZERO ) memset( ptr , 0 , kmem_type_size( type ) ); 276 276 277 #if CONFIG_DEBUG_KMEM277 #if DEBUG_KMEM 278 278 cycle = (uint32_t)hal_get_cycles(); 279 if( CONFIG_DEBUG_KMEM < cycle )279 if( DEBUG_KMEM < cycle ) 280 280 printk("\n[DBG] %s : thread %x exit / type %s allocated / base %x / size %d / cycle %d\n", 281 281 __FUNCTION__, CURRENT_THREAD, kmem_type_str(type), (intptr_t)ptr, -
trunk/kernel/mm/mapper.c
r435 r438 143 143 error_t error; 144 144 145 #if CONFIG_DEBUG_MAPPER_GET_PAGE145 #if DEBUG_MAPPER_GET_PAGE 146 146 uint32_t cycle = (uint32_t)hal_get_cycles(); 147 if( CONFIG_DEBUG_MAPPER_GET_PAGE < cycle )147 if( DEBUG_MAPPER_GET_PAGE < cycle ) 148 148 printk("\n[DBG] %s : thread %x enter for page %d / mapper %x / cycle %d\n", 149 149 __FUNCTION__ , CURRENT_THREAD , index , mapper , cycle ); … … 175 175 { 176 176 177 #if ( CONFIG_DEBUG_MAPPER_GET_PAGE & 1)178 if( CONFIG_DEBUG_MAPPER_GET_PAGE < cycle )177 #if (DEBUG_MAPPER_GET_PAGE & 1) 178 if( DEBUG_MAPPER_GET_PAGE < cycle ) 179 179 printk("\n[DBG] %s : missing page => load from device\n", __FUNCTION__ ); 180 180 #endif … … 257 257 } 258 258 259 #if CONFIG_DEBUG_MAPPER_GET_PAGE259 #if DEBUG_MAPPER_GET_PAGE 260 260 cycle = (uint32_t)hal_get_cycles(); 261 if( CONFIG_DEBUG_MAPPER_GET_PAGE < cycle )261 if( DEBUG_MAPPER_GET_PAGE < cycle ) 262 262 printk("\n[DBG] %s : thread %x exit for page %d / ppn %x / cycle %d\n", 263 263 __FUNCTION__, CURRENT_THREAD, index, ppm_page2ppn(XPTR(local_cxy, page)), cycle ); … … 317 317 uint8_t * buf_ptr; // current buffer address 318 318 319 #if CONFIG_DEBUG_MAPPER_MOVE_USER319 #if DEBUG_MAPPER_MOVE_USER 320 320 uint32_t cycle = (uint32_t)hal_get_cycles(); 321 if( CONFIG_DEBUG_MAPPER_MOVE_USER < cycle )321 if( DEBUG_MAPPER_MOVE_USER < cycle ) 322 322 printk("\n[DBG] %s : thread %x enter / to_buf %d / buffer %x / cycle %d\n", 323 323 __FUNCTION__ , CURRENT_THREAD , to_buffer , buffer , cycle ); … … 347 347 else page_count = CONFIG_PPM_PAGE_SIZE; 348 348 349 #if ( CONFIG_DEBUG_MAPPER_MOVE_USER & 1)350 if( CONFIG_DEBUG_MAPPER_MOVE_USER < cycle )349 #if (DEBUG_MAPPER_MOVE_USER & 1) 350 if( DEBUG_MAPPER_MOVE_USER < cycle ) 351 351 printk("\n[DBG] %s : index = %d / offset = %d / count = %d\n", 352 352 __FUNCTION__ , index , page_offset , page_count ); … … 379 379 } 380 380 381 #if CONFIG_DEBUG_MAPPER_MOVE_USER381 #if DEBUG_MAPPER_MOVE_USER 382 382 cycle = (uint32_t)hal_get_cycles(); 383 if( CONFIG_DEBUG_MAPPER_MOVE_USER < cycle )383 if( DEBUG_MAPPER_MOVE_USER < cycle ) 384 384 printk("\n[DBG] %s : thread %x exit / to_buf %d / buffer %x / cycle %d\n", 385 385 __FUNCTION__ , CURRENT_THREAD , to_buffer , buffer , cycle ); … … 412 412 uint8_t * buffer_ptr = (uint8_t *)GET_PTR( buffer_xp ); 413 413 414 #if CONFIG_DEBUG_MAPPER_MOVE_KERNEL414 #if DEBUG_MAPPER_MOVE_KERNEL 415 415 uint32_t cycle = (uint32_t)hal_get_cycles(); 416 if( CONFIG_DEBUG_MAPPER_MOVE_KERNEL < cycle )416 if( DEBUG_MAPPER_MOVE_KERNEL < cycle ) 417 417 printk("\n[DBG] %s : thread %x enter / to_buf %d / buf_cxy %x / buf_ptr %x / cycle %d\n", 418 418 __FUNCTION__ , CURRENT_THREAD , to_buffer , buffer_cxy , buffer_ptr , cycle ); … … 427 427 uint32_t last = max_byte >> CONFIG_PPM_PAGE_SHIFT; 428 428 429 #if ( CONFIG_DEBUG_MAPPER_MOVE_KERNEL & 1)430 if( CONFIG_DEBUG_MAPPER_MOVE_KERNEL < cycle )429 #if (DEBUG_MAPPER_MOVE_KERNEL & 1) 430 if( DEBUG_MAPPER_MOVE_KERNEL < cycle ) 431 431 printk("\n[DBG] %s : first_page %d / last_page %d\n", __FUNCTION__, first, last ); 432 432 #endif … … 459 459 else page_count = CONFIG_PPM_PAGE_SIZE; 460 460 461 #if ( CONFIG_DEBUG_MAPPER_MOVE_KERNEL & 1)462 if( CONFIG_DEBUG_MAPPER_MOVE_KERNEL < cycle )461 #if (DEBUG_MAPPER_MOVE_KERNEL & 1) 462 if( DEBUG_MAPPER_MOVE_KERNEL < cycle ) 463 463 printk("\n[DBG] %s : page_index = %d / offset = %d / bytes = %d\n", 464 464 __FUNCTION__ , index , page_offset , page_count ); … … 494 494 } 495 495 496 #if CONFIG_DEBUG_MAPPER_MOVE_KERNEL496 #if DEBUG_MAPPER_MOVE_KERNEL 497 497 cycle = (uint32_t)hal_get_cycles(); 498 if( CONFIG_DEBUG_MAPPER_MOVE_KERNEL < cycle )498 if( DEBUG_MAPPER_MOVE_KERNEL < cycle ) 499 499 printk("\n[DBG] %s : thread %x exit / to_buf %d / buf_cxy %x / buf_ptr %x / cycle %d\n", 500 500 __FUNCTION__ , CURRENT_THREAD , to_buffer , buffer_cxy , buffer_ptr , cycle ); -
trunk/kernel/mm/ppm.c
r437 r438 201 201 uint32_t current_size; 202 202 203 #if CONFIG_DEBUG_PPM_ALLOC_PAGES203 #if DEBUG_PPM_ALLOC_PAGES 204 204 uint32_t cycle = (uint32_t)hal_get_cycles(); 205 if( CONFIG_DEBUG_PPM_ALLOC_PAGES < cycle )205 if( DEBUG_PPM_ALLOC_PAGES < cycle ) 206 206 printk("\n[DBG] in %s : thread %x enter for %d page(s) / cycle %d\n", 207 207 __FUNCTION__ , CURRENT_THREAD , 1<<order, cycle ); 208 208 #endif 209 209 210 #if( CONFIG_DEBUG_PPM_ALLOC_PAGES & 0x1)211 if( CONFIG_DEBUG_PPM_ALLOC_PAGES < cycle )210 #if(DEBUG_PPM_ALLOC_PAGES & 0x1) 211 if( DEBUG_PPM_ALLOC_PAGES < cycle ) 212 212 ppm_print(); 213 213 #endif … … 239 239 spinlock_unlock( &ppm->free_lock ); 240 240 241 #if CONFIG_DEBUG_PPM_ALLOC_PAGES241 #if DEBUG_PPM_ALLOC_PAGES 242 242 cycle = (uint32_t)hal_get_cycles(); 243 if( CONFIG_DEBUG_PPM_ALLOC_PAGES < cycle )243 if( DEBUG_PPM_ALLOC_PAGES < cycle ) 244 244 printk("\n[DBG] in %s : thread %x cannot allocate %d page(s) at cycle %d\n", 245 245 __FUNCTION__ , CURRENT_THREAD , 1<<order, cycle ); … … 275 275 spinlock_unlock( &ppm->free_lock ); 276 276 277 #if CONFIG_DEBUG_PPM_ALLOC_PAGES277 #if DEBUG_PPM_ALLOC_PAGES 278 278 cycle = (uint32_t)hal_get_cycles(); 279 if( CONFIG_DEBUG_PPM_ALLOC_PAGES < cycle )279 if( DEBUG_PPM_ALLOC_PAGES < cycle ) 280 280 printk("\n[DBG] in %s : thread %x exit / %d page(s) allocated / ppn = %x / cycle %d\n", 281 281 __FUNCTION__, CURRENT_THREAD, 1<<order, ppm_page2ppn(XPTR( local_cxy , block )), cycle ); … … 292 292 ppm_t * ppm = &LOCAL_CLUSTER->ppm; 293 293 294 #if CONFIG_DEBUG_PPM_FREE_PAGES294 #if DEBUG_PPM_FREE_PAGES 295 295 uint32_t cycle = (uint32_t)hal_get_cycles(); 296 if( CONFIG_DEBUG_PPM_FREE_PAGES < cycle )296 if( DEBUG_PPM_FREE_PAGES < cycle ) 297 297 printk("\n[DBG] in %s : thread %x enter for %d page(s) / cycle %d\n", 298 298 __FUNCTION__ , CURRENT_THREAD , 1<<page->order , cycle ); 299 299 #endif 300 300 301 #if( CONFIG_DEBUG_PPM_FREE_PAGES & 0x1)302 if( CONFIG_DEBUG_PPM_FREE_PAGES < cycle )301 #if(DEBUG_PPM_FREE_PAGES & 0x1) 302 if( DEBUG_PPM_FREE_PAGES < cycle ) 303 303 ppm_print(); 304 304 #endif … … 312 312 spinlock_unlock( &ppm->free_lock ); 313 313 314 #if CONFIG_DEBUG_PPM_FREE_PAGES314 #if DEBUG_PPM_FREE_PAGES 315 315 cycle = (uint32_t)hal_get_cycles(); 316 if( CONFIG_DEBUG_PPM_FREE_PAGES < cycle )316 if( DEBUG_PPM_FREE_PAGES < cycle ) 317 317 printk("\n[DBG] in %s : thread %x exit / %d page(s) released / ppn = %x / cycle %d\n", 318 318 __FUNCTION__, CURRENT_THREAD, 1<<page->order, ppm_page2ppn(XPTR(local_cxy , page)), cycle ); -
trunk/kernel/mm/vmm.c
r437 r438 63 63 intptr_t size; 64 64 65 #if CONFIG_DEBUG_VMM_INIT65 #if DEBUG_VMM_INIT 66 66 uint32_t cycle = (uint32_t)hal_get_cycles(); 67 if( CONFIG_DEBUG_VMM_INIT )67 if( DEBUG_VMM_INIT ) 68 68 printk("\n[DBG] %s : thread %x enter for process %x / cycle %d\n", 69 69 __FUNCTION__ , CURRENT_THREAD , process->pid , cycle ); … … 183 183 hal_fence(); 184 184 185 #if CONFIG_DEBUG_VMM_INIT185 #if DEBUG_VMM_INIT 186 186 cycle = (uint32_t)hal_get_cycles(); 187 if( CONFIG_DEBUG_VMM_INIT )187 if( DEBUG_VMM_INIT ) 188 188 printk("\n[DBG] %s : thread %x exit for process %x / entry_point = %x / cycle %d\n", 189 189 __FUNCTION__ , CURRENT_THREAD , process->pid , process->vmm.entry_point , cycle ); … … 266 266 lpid_t owner_lpid; 267 267 268 #if CONFIG_DEBUG_VMM_UPDATE_PTE268 #if DEBUG_VMM_UPDATE_PTE 269 269 uint32_t cycle = (uint32_t)hal_get_cycles(); 270 if( CONFIG_DEBUG_VMM_UPDATE_PTE < cycle )270 if( DEBUG_VMM_UPDATE_PTE < cycle ) 271 271 printk("\n[DBG] %s : thread %x enter for process %x / vpn %x / cycle %d\n", 272 272 __FUNCTION__ , CURRENT_THREAD , process->pid , vpn , cycle ); … … 292 292 remote_process_cxy = GET_CXY( remote_process_xp ); 293 293 294 #if ( CONFIG_DEBUG_VMM_UPDATE_PTE & 0x1)295 if( CONFIG_DEBUG_VMM_UPDATE_PTE < cycle )294 #if (DEBUG_VMM_UPDATE_PTE & 0x1) 295 if( DEBUG_VMM_UPDATE_PTE < cycle ) 296 296 printk("\n[DBG] %s : thread %x handling process %x in cluster %x\n", 297 297 __FUNCTION__ , CURRENT_THREAD , process->pid , remote_process_cxy ); … … 305 305 } 306 306 307 #if CONFIG_DEBUG_VMM_UPDATE_PTE307 #if DEBUG_VMM_UPDATE_PTE 308 308 cycle = (uint32_t)hal_get_cycles(); 309 if( CONFIG_DEBUG_VMM_UPDATE_PTE < cycle )309 if( DEBUG_VMM_UPDATE_PTE < cycle ) 310 310 printk("\n[DBG] %s : thread %x exit for process %x / vpn %x / cycle %d\n", 311 311 __FUNCTION__ , CURRENT_THREAD , process->pid , vpn , cycle ); … … 338 338 lpid_t owner_lpid; 339 339 340 #if CONFIG_DEBUG_VMM_SET_COW340 #if DEBUG_VMM_SET_COW 341 341 uint32_t cycle = (uint32_t)hal_get_cycles(); 342 if( CONFIG_DEBUG_VMM_SET_COW < cycle )342 if( DEBUG_VMM_SET_COW < cycle ) 343 343 printk("\n[DBG] %s : thread %x enter for process %x / cycle %d\n", 344 344 __FUNCTION__ , CURRENT_THREAD , process->pid , cycle ); … … 370 370 remote_process_cxy = GET_CXY( remote_process_xp ); 371 371 372 #if ( CONFIG_DEBUG_VMM_SET_COW &0x1)373 if( CONFIG_DEBUG_VMM_SET_COW < cycle )372 #if (DEBUG_VMM_SET_COW &0x1) 373 if( DEBUG_VMM_SET_COW < cycle ) 374 374 printk("\n[DBG] %s : thread %x handling process %x in cluster %x\n", 375 375 __FUNCTION__ , CURRENT_THREAD , process->pid , remote_process_cxy ); … … 394 394 vpn_t vpn_size = vseg->vpn_size; 395 395 396 #if ( CONFIG_DEBUG_VMM_SET_COW & 0x1)397 if( CONFIG_DEBUG_VMM_SET_COW < cycle )396 #if (DEBUG_VMM_SET_COW & 0x1) 397 if( DEBUG_VMM_SET_COW < cycle ) 398 398 printk("\n[DBG] %s : thread %x handling vseg %s / vpn_base = %x / vpn_size = %x\n", 399 399 __FUNCTION__, CURRENT_THREAD , vseg_type_str(type), vpn_base, vpn_size ); … … 445 445 } // end loop on process copies 446 446 447 #if CONFIG_DEBUG_VMM_SET_COW447 #if DEBUG_VMM_SET_COW 448 448 cycle = (uint32_t)hal_get_cycles(); 449 if( CONFIG_DEBUG_VMM_SET_COW < cycle )449 if( DEBUG_VMM_SET_COW < cycle ) 450 450 printk("\n[DBG] %s : thread %x exit for process %x / cycle %d\n", 451 451 __FUNCTION__ , CURRENT_THREAD , process->pid , cycle ); … … 480 480 ppn_t ppn; 481 481 482 #if CONFIG_DEBUG_VMM_FORK_COPY482 #if DEBUG_VMM_FORK_COPY 483 483 uint32_t cycle = (uint32_t)hal_get_cycles(); 484 if( CONFIG_DEBUG_VMM_FORK_COPY < cycle )484 if( DEBUG_VMM_FORK_COPY < cycle ) 485 485 printk("\n[DBG] %s : thread %x enter / cycle %d\n", 486 486 __FUNCTION__ , CURRENT_THREAD, cycle ); … … 530 530 type = hal_remote_lw( XPTR( parent_cxy , &parent_vseg->type ) ); 531 531 532 #if CONFIG_DEBUG_VMM_FORK_COPY532 #if DEBUG_VMM_FORK_COPY 533 533 cycle = (uint32_t)hal_get_cycles(); 534 if( CONFIG_DEBUG_VMM_FORK_COPY < cycle )534 if( DEBUG_VMM_FORK_COPY < cycle ) 535 535 printk("\n[DBG] %s : thread %x found parent vseg %s / vpn_base = %x / cycle %d\n", 536 536 __FUNCTION__ , CURRENT_THREAD, vseg_type_str(type), … … 556 556 vseg_attach( child_vmm , child_vseg ); 557 557 558 #if CONFIG_DEBUG_VMM_FORK_COPY558 #if DEBUG_VMM_FORK_COPY 559 559 cycle = (uint32_t)hal_get_cycles(); 560 if( CONFIG_DEBUG_VMM_FORK_COPY < cycle )560 if( DEBUG_VMM_FORK_COPY < cycle ) 561 561 printk("\n[DBG] %s : thread %x copied vseg %s / vpn_base = %x to child VSL / cycle %d\n", 562 562 __FUNCTION__ , CURRENT_THREAD , vseg_type_str(type), … … 597 597 hal_remote_atomic_add( XPTR( page_cxy , &page_ptr->forks ) , 1 ); 598 598 599 #if CONFIG_DEBUG_VMM_FORK_COPY599 #if DEBUG_VMM_FORK_COPY 600 600 cycle = (uint32_t)hal_get_cycles(); 601 if( CONFIG_DEBUG_VMM_FORK_COPY < cycle )601 if( DEBUG_VMM_FORK_COPY < cycle ) 602 602 printk("\n[DBG] %s : thread %x copied vpn %x to child GPT / cycle %d\n", 603 603 __FUNCTION__ , CURRENT_THREAD , vpn , cycle ); … … 649 649 hal_fence(); 650 650 651 #if CONFIG_DEBUG_VMM_FORK_COPY651 #if DEBUG_VMM_FORK_COPY 652 652 cycle = (uint32_t)hal_get_cycles(); 653 if( CONFIG_DEBUG_VMM_FORK_COPY < cycle )653 if( DEBUG_VMM_FORK_COPY < cycle ) 654 654 printk("\n[DBG] %s : thread %x exit successfully / cycle %d\n", 655 655 __FUNCTION__ , CURRENT_THREAD , cycle ); … … 666 666 vseg_t * vseg; 667 667 668 #if CONFIG_DEBUG_VMM_DESTROY668 #if DEBUG_VMM_DESTROY 669 669 uint32_t cycle = (uint32_t)hal_get_cycles(); 670 if( CONFIG_DEBUG_VMM_DESTROY < cycle )670 if( DEBUG_VMM_DESTROY < cycle ) 671 671 printk("\n[DBG] %s : thread %x enter for process %x / cycle %d\n", 672 672 __FUNCTION__ , CURRENT_THREAD , process->pid , cycle ); 673 673 #endif 674 674 675 #if ( CONFIG_DEBUG_VMM_DESTROY & 1 )675 #if (DEBUG_VMM_DESTROY & 1 ) 676 676 vmm_display( process , true ); 677 677 #endif … … 694 694 vseg = GET_PTR( vseg_xp ); 695 695 696 #if( CONFIG_DEBUG_VMM_DESTROY & 1 )697 if( CONFIG_DEBUG_VMM_DESTROY < cycle )696 #if( DEBUG_VMM_DESTROY & 1 ) 697 if( DEBUG_VMM_DESTROY < cycle ) 698 698 printk("\n[DBG] %s : %s / vpn_base %x / vpn_size %d\n", 699 699 __FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size ); … … 728 728 hal_gpt_destroy( &vmm->gpt ); 729 729 730 #if CONFIG_DEBUG_VMM_DESTROY730 #if DEBUG_VMM_DESTROY 731 731 cycle = (uint32_t)hal_get_cycles(); 732 if( CONFIG_DEBUG_VMM_DESTROY < cycle )732 if( DEBUG_VMM_DESTROY < cycle ) 733 733 printk("\n[DBG] %s : thread %x exit / cycle %d\n", 734 734 __FUNCTION__ , CURRENT_THREAD , cycle ); … … 882 882 error_t error; 883 883 884 #if CONFIG_DEBUG_VMM_CREATE_VSEG884 #if DEBUG_VMM_CREATE_VSEG 885 885 uint32_t cycle = (uint32_t)hal_get_cycles(); 886 if( CONFIG_DEBUG_VMM_CREATE_VSEG < cycle )886 if( DEBUG_VMM_CREATE_VSEG < cycle ) 887 887 printk("\n[DBG] %s : thread %x enter / process %x / base %x / size %x / %s / cxy %x / cycle %d\n", 888 888 __FUNCTION__, CURRENT_THREAD, process->pid, base, size, vseg_type_str(type), cxy, cycle ); … … 973 973 remote_rwlock_wr_unlock( lock_xp ); 974 974 975 #if CONFIG_DEBUG_VMM_CREATE_VSEG975 #if DEBUG_VMM_CREATE_VSEG 976 976 cycle = (uint32_t)hal_get_cycles(); 977 if( CONFIG_DEBUG_VMM_CREATE_VSEG < cycle )977 if( DEBUG_VMM_CREATE_VSEG < cycle ) 978 978 printk("\n[DBG] %s : thread %x exit / process %x / %s / cxy %x / cycle %d\n", 979 979 __FUNCTION__, CURRENT_THREAD, process->pid, vseg_type_str(type), cxy, cycle ); … … 1110 1110 uint32_t count; // actual number of pendinf forks 1111 1111 1112 #if CONFIG_DEBUG_VMM_UNMAP_VSEG1112 #if DEBUG_VMM_UNMAP_VSEG 1113 1113 uint32_t cycle = (uint32_t)hal_get_cycles(); 1114 if( CONFIG_DEBUG_VMM_UNMAP_VSEG < cycle )1114 if( DEBUG_VMM_UNMAP_VSEG < cycle ) 1115 1115 printk("\n[DBG] %s : thread %x enter / process %x / vseg %s / base %x / cycle %d\n", 1116 1116 __FUNCTION__, CURRENT_THREAD, process->pid, vseg_type_str( vseg->type ), vseg->vpn_base, cycle ); … … 1131 1131 { 1132 1132 1133 #if( CONFIG_DEBUG_VMM_UNMAP_VSEG & 1 )1134 if( CONFIG_DEBUG_VMM_UNMAP_VSEG < cycle )1133 #if( DEBUG_VMM_UNMAP_VSEG & 1 ) 1134 if( DEBUG_VMM_UNMAP_VSEG < cycle ) 1135 1135 printk("- vpn %x / ppn %x\n" , vpn , ppn ); 1136 1136 #endif … … 1183 1183 } 1184 1184 1185 #if CONFIG_DEBUG_VMM_UNMAP_VSEG1185 #if DEBUG_VMM_UNMAP_VSEG 1186 1186 cycle = (uint32_t)hal_get_cycles(); 1187 if( CONFIG_DEBUG_VMM_UNMAP_VSEG < cycle )1187 if( DEBUG_VMM_UNMAP_VSEG < cycle ) 1188 1188 printk("\n[DBG] %s : thread %x exit / process %x / vseg %s / base %x / cycle %d\n", 1189 1189 __FUNCTION__, CURRENT_THREAD, process->pid, vseg_type_str( vseg->type ), vseg->vpn_base, cycle ); … … 1383 1383 { 1384 1384 1385 #if CONFIG_DEBUG_VMM_ALLOCATE_PAGE1386 if( CONFIG_DEBUG_VMM_ALLOCATE_PAGE < (uint32_t)hal_get_cycles() )1385 #if DEBUG_VMM_ALLOCATE_PAGE 1386 if( DEBUG_VMM_ALLOCATE_PAGE < (uint32_t)hal_get_cycles() ) 1387 1387 printk("\n[DBG] in %s : thread %x enter for vpn %x\n", 1388 1388 __FUNCTION__ , CURRENT_THREAD, vpn ); … … 1427 1427 } 1428 1428 1429 #if CONFIG_DEBUG_VMM_ALLOCATE_PAGE1430 if( CONFIG_DEBUG_VMM_ALLOCATE_PAGE < (uint32_t)hal_get_cycles() )1429 #if DEBUG_VMM_ALLOCATE_PAGE 1430 if( DEBUG_VMM_ALLOCATE_PAGE < (uint32_t)hal_get_cycles() ) 1431 1431 printk("\n[DBG] in %s : thread %x exit for vpn = %d / ppn = %x\n", 1432 1432 __FUNCTION__ , CURRENT_THREAD, vpn, ppm_page2ppn( XPTR( page_cxy , page_ptr ) ) ); … … 1452 1452 index = vpn - vseg->vpn_base; 1453 1453 1454 #if CONFIG_DEBUG_VMM_GET_ONE_PPN1455 if( CONFIG_DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )1454 #if DEBUG_VMM_GET_ONE_PPN 1455 if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() ) 1456 1456 printk("\n[DBG] %s : thread %x enter for vpn = %x / type = %s / index = %d\n", 1457 1457 __FUNCTION__, CURRENT_THREAD, vpn, vseg_type_str(type), index ); … … 1515 1515 uint32_t elf_offset = vseg->file_offset + offset; 1516 1516 1517 #if ( CONFIG_DEBUG_VMM_GET_ONE_PPN & 0x1)1518 if( CONFIG_DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )1517 #if (DEBUG_VMM_GET_ONE_PPN & 0x1) 1518 if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() ) 1519 1519 printk("\n[DBG] %s : thread %x for vpn = %x / elf_offset = %x\n", 1520 1520 __FUNCTION__, CURRENT_THREAD, vpn, elf_offset ); … … 1530 1530 { 1531 1531 1532 #if ( CONFIG_DEBUG_VMM_GET_ONE_PPN & 0x1)1533 if( CONFIG_DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )1532 #if (DEBUG_VMM_GET_ONE_PPN & 0x1) 1533 if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() ) 1534 1534 printk("\n[DBG] %s : thread%x for vpn = %x / fully in BSS\n", 1535 1535 __FUNCTION__, CURRENT_THREAD, vpn ); … … 1548 1548 { 1549 1549 1550 #if ( CONFIG_DEBUG_VMM_GET_ONE_PPN & 0x1)1551 if( CONFIG_DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )1550 #if (DEBUG_VMM_GET_ONE_PPN & 0x1) 1551 if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() ) 1552 1552 printk("\n[DBG] %s : thread %x, for vpn = %x / fully in mapper\n", 1553 1553 __FUNCTION__, CURRENT_THREAD, vpn ); … … 1580 1580 { 1581 1581 1582 #if ( CONFIG_DEBUG_VMM_GET_ONE_PPN & 0x1)1583 if( CONFIG_DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )1582 #if (DEBUG_VMM_GET_ONE_PPN & 0x1) 1583 if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() ) 1584 1584 printk("\n[DBG] %s : thread %x for vpn = %x / both mapper & BSS\n" 1585 1585 " %d bytes from mapper / %d bytes from BSS\n", … … 1627 1627 *ppn = ppm_page2ppn( page_xp ); 1628 1628 1629 #if CONFIG_DEBUG_VMM_GET_ONE_PPN1630 if( CONFIG_DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )1629 #if DEBUG_VMM_GET_ONE_PPN 1630 if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() ) 1631 1631 printk("\n[DBG] %s : thread %x exit for vpn = %x / ppn = %x\n", 1632 1632 __FUNCTION__ , CURRENT_THREAD , vpn , *ppn ); … … 1655 1655 "not called in the reference cluster\n" ); 1656 1656 1657 #if CONFIG_DEBUG_VMM_GET_PTE1657 #if DEBUG_VMM_GET_PTE 1658 1658 uint32_t cycle = (uint32_t)hal_get_cycles(); 1659 if( CONFIG_DEBUG_VMM_GET_PTE < cycle )1660 printk("\n[DBG] %s : thread %x enter for vpn = %x / process %x / cow =%d / cycle %d\n",1659 if( DEBUG_VMM_GET_PTE < cycle ) 1660 printk("\n[DBG] %s : thread %x enter / vpn %x / process %x / cow %d / cycle %d\n", 1661 1661 __FUNCTION__ , CURRENT_THREAD , vpn , process->pid , cow , cycle ); 1662 1662 #endif … … 1675 1675 } 1676 1676 1677 #if CONFIG_DEBUG_VMM_GET_PTE1677 #if( DEBUG_VMM_GET_PTE & 1 ) 1678 1678 cycle = (uint32_t)hal_get_cycles(); 1679 if( CONFIG_DEBUG_VMM_GET_PTE < cycle )1679 if( DEBUG_VMM_GET_PTE < cycle ) 1680 1680 printk("\n[DBG] %s : thread %x found vseg %s / vpn_base = %x / vpn_size = %x\n", 1681 1681 __FUNCTION__, CURRENT_THREAD, vseg_type_str(vseg->type), vseg->vpn_base, vseg->vpn_size ); 1682 1682 #endif 1683 1683 1684 // access GPT to get current PTE attributes and PPN1685 hal_gpt_get_pte( &vmm->gpt , vpn , &old_attr , &old_ppn );1686 1687 // for both "copy_on_write" and "page_fault" events, allocate a physical page,1688 // initialize it, register it in the reference GPT, update GPT copies in all1689 // clusters containing a copy, and return the new_ppn and new_attr1690 1691 if( cow ) /////////////////////////// copy_on_write request //////////////////////1692 { 1684 if( cow ) //////////////// copy_on_write request ////////////////////// 1685 // get PTE from reference GPT 1686 // allocate a new physical page if there is pending forks, 1687 // initialize it from old physical page content, 1688 // update PTE in all GPT copies, 1689 { 1690 // access GPT to get current PTE attributes and PPN 1691 hal_gpt_get_pte( &vmm->gpt , vpn , &old_attr , &old_ppn ); 1692 1693 1693 assert( (old_attr & GPT_MAPPED) , __FUNCTION__ , 1694 1694 "PTE must be mapped for a copy-on-write exception\n" ); 1695 1695 1696 #if CONFIG_DEBUG_VMM_GET_PTE1696 #if( DEBUG_VMM_GET_PTE & 1 ) 1697 1697 cycle = (uint32_t)hal_get_cycles(); 1698 if( CONFIG_DEBUG_VMM_GET_PTE < cycle )1698 if( DEBUG_VMM_GET_PTE < cycle ) 1699 1699 printk("\n[DBG] %s : thread %x handling COW for vpn %x in process %x\n", 1700 1700 __FUNCTION__, CURRENT_THREAD, vpn, process->pid ); … … 1744 1744 hal_remote_atomic_add( XPTR( page_cxy , &page_ptr->forks ) , -1 ); 1745 1745 } 1746 else ////////////////////////////////// page_fault request //////////////////////// 1746 else //////////// page_fault request /////////////////////////// 1747 // get PTE from reference GPT 1748 // allocate a physical page if it is a true page fault, 1749 // register in reference GPT, but don't update GPT copies 1747 1750 { 1751 // access GPT to get current PTE 1752 hal_gpt_get_pte( &vmm->gpt , vpn , &old_attr , &old_ppn ); 1753 1748 1754 if( (old_attr & GPT_MAPPED) == 0 ) // true page_fault => map it 1749 1755 { 1750 1756 1751 #if CONFIG_DEBUG_VMM_GET_PTE1757 #if( DEBUG_VMM_GET_PTE & 1 ) 1752 1758 cycle = (uint32_t)hal_get_cycles(); 1753 if( CONFIG_DEBUG_VMM_GET_PTE < cycle )1759 if( DEBUG_VMM_GET_PTE < cycle ) 1754 1760 printk("\n[DBG] %s : thread %x handling page fault for vpn %x in process %x\n", 1755 1761 __FUNCTION__, CURRENT_THREAD, vpn, process->pid ); … … 1792 1798 } 1793 1799 1794 #if CONFIG_DEBUG_VMM_GET_PTE1800 #if DEBUG_VMM_GET_PTE 1795 1801 cycle = (uint32_t)hal_get_cycles(); 1796 if( CONFIG_DEBUG_VMM_GET_PTE < cycle )1797 printk("\n[DBG] %s : thread,%x exit for vpn %x in process %x / ppn = %x / attr =%x / cycle %d\n",1802 if( DEBUG_VMM_GET_PTE < cycle ) 1803 printk("\n[DBG] %s : thread,%x exit / vpn %x in process %x / ppn %x / attr %x / cycle %d\n", 1798 1804 __FUNCTION__, CURRENT_THREAD, vpn, process->pid, new_ppn, new_attr, cycle ); 1799 1805 #endif 1800 1806 1801 // return success1807 // return PPN and flags 1802 1808 *ppn = new_ppn; 1803 1809 *attr = new_attr; … … 1814 1820 error_t error; 1815 1821 1816 #if CONFIG_DEBUG_VMM_GET_PTE1822 #if DEBUG_VMM_GET_PTE 1817 1823 uint32_t cycle = (uint32_t)hal_get_cycles(); 1818 if( CONFIG_DEBUG_VMM_GET_PTE < cycle )1824 if( DEBUG_VMM_GET_PTE < cycle ) 1819 1825 printk("\n[DBG] %s : thread %x enter for vpn %x / process %x / cycle %d\n", 1820 1826 __FUNCTION__ , CURRENT_THREAD , vpn , process->pid , cycle ); … … 1854 1860 } 1855 1861 1856 #if CONFIG_DEBUG_VMM_GET_PTE1862 #if DEBUG_VMM_GET_PTE 1857 1863 cycle = (uint32_t)hal_get_cycles(); 1858 if( CONFIG_DEBUG_VMM_GET_PTE < cycle )1864 if( DEBUG_VMM_GET_PTE < cycle ) 1859 1865 printk("\n[DBG] %s : thread %x exit for vpn %x / process %x / cycle %d\n", 1860 1866 __FUNCTION__ , CURRENT_THREAD , vpn , process->pid , cycle ); … … 1873 1879 error_t error; 1874 1880 1875 #if CONFIG_DEBUG_VMM_GET_PTE1881 #if DEBUG_VMM_GET_PTE 1876 1882 uint32_t cycle = (uint32_t)hal_get_cycles(); 1877 if( CONFIG_DEBUG_VMM_GET_PTE < cycle )1883 if( DEBUG_VMM_GET_PTE < cycle ) 1878 1884 printk("\n[DBG] %s : thread %x enter for vpn %x / process %x / cycle %d\n", 1879 1885 __FUNCTION__ , CURRENT_THREAD , vpn , process->pid , cycle ); … … 1913 1919 } 1914 1920 1915 #if CONFIG_DEBUG_VMM_GET_PTE1921 #if DEBUG_VMM_GET_PTE 1916 1922 cycle = (uint32_t)hal_get_cycles(); 1917 if( CONFIG_DEBUG_VMM_GET_PTE < cycle )1923 if( DEBUG_VMM_GET_PTE < cycle ) 1918 1924 printk("\n[DBG] %s : thread %x exit for vpn %x / process %x / cycle %d\n", 1919 1925 __FUNCTION__ , CURRENT_THREAD , vpn , process->pid , cycle );
Note: See TracChangeset
for help on using the changeset viewer.