Changeset 438 for trunk/kernel/mm/vmm.c
- Timestamp:
- Apr 4, 2018, 2:49:02 PM (7 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/mm/vmm.c
r437 r438 63 63 intptr_t size; 64 64 65 #if CONFIG_DEBUG_VMM_INIT65 #if DEBUG_VMM_INIT 66 66 uint32_t cycle = (uint32_t)hal_get_cycles(); 67 if( CONFIG_DEBUG_VMM_INIT )67 if( DEBUG_VMM_INIT ) 68 68 printk("\n[DBG] %s : thread %x enter for process %x / cycle %d\n", 69 69 __FUNCTION__ , CURRENT_THREAD , process->pid , cycle ); … … 183 183 hal_fence(); 184 184 185 #if CONFIG_DEBUG_VMM_INIT185 #if DEBUG_VMM_INIT 186 186 cycle = (uint32_t)hal_get_cycles(); 187 if( CONFIG_DEBUG_VMM_INIT )187 if( DEBUG_VMM_INIT ) 188 188 printk("\n[DBG] %s : thread %x exit for process %x / entry_point = %x / cycle %d\n", 189 189 __FUNCTION__ , CURRENT_THREAD , process->pid , process->vmm.entry_point , cycle ); … … 266 266 lpid_t owner_lpid; 267 267 268 #if CONFIG_DEBUG_VMM_UPDATE_PTE268 #if DEBUG_VMM_UPDATE_PTE 269 269 uint32_t cycle = (uint32_t)hal_get_cycles(); 270 if( CONFIG_DEBUG_VMM_UPDATE_PTE < cycle )270 if( DEBUG_VMM_UPDATE_PTE < cycle ) 271 271 printk("\n[DBG] %s : thread %x enter for process %x / vpn %x / cycle %d\n", 272 272 __FUNCTION__ , CURRENT_THREAD , process->pid , vpn , cycle ); … … 292 292 remote_process_cxy = GET_CXY( remote_process_xp ); 293 293 294 #if ( CONFIG_DEBUG_VMM_UPDATE_PTE & 0x1)295 if( CONFIG_DEBUG_VMM_UPDATE_PTE < cycle )294 #if (DEBUG_VMM_UPDATE_PTE & 0x1) 295 if( DEBUG_VMM_UPDATE_PTE < cycle ) 296 296 printk("\n[DBG] %s : thread %x handling process %x in cluster %x\n", 297 297 __FUNCTION__ , CURRENT_THREAD , process->pid , remote_process_cxy ); … … 305 305 } 306 306 307 #if CONFIG_DEBUG_VMM_UPDATE_PTE307 #if DEBUG_VMM_UPDATE_PTE 308 308 cycle = (uint32_t)hal_get_cycles(); 309 if( CONFIG_DEBUG_VMM_UPDATE_PTE < cycle )309 if( DEBUG_VMM_UPDATE_PTE < cycle ) 310 310 printk("\n[DBG] %s : thread %x exit for process %x / vpn %x / cycle %d\n", 311 311 __FUNCTION__ , CURRENT_THREAD , process->pid , vpn , cycle ); … … 338 338 lpid_t owner_lpid; 339 339 340 #if CONFIG_DEBUG_VMM_SET_COW340 #if DEBUG_VMM_SET_COW 341 341 uint32_t cycle = (uint32_t)hal_get_cycles(); 342 if( CONFIG_DEBUG_VMM_SET_COW < cycle )342 if( DEBUG_VMM_SET_COW < cycle ) 343 343 printk("\n[DBG] %s : thread %x enter for process %x / cycle %d\n", 344 344 __FUNCTION__ , CURRENT_THREAD , process->pid , cycle ); … … 370 370 remote_process_cxy = GET_CXY( remote_process_xp ); 371 371 372 #if ( CONFIG_DEBUG_VMM_SET_COW &0x1)373 if( CONFIG_DEBUG_VMM_SET_COW < cycle )372 #if (DEBUG_VMM_SET_COW &0x1) 373 if( DEBUG_VMM_SET_COW < cycle ) 374 374 printk("\n[DBG] %s : thread %x handling process %x in cluster %x\n", 375 375 __FUNCTION__ , CURRENT_THREAD , process->pid , remote_process_cxy ); … … 394 394 vpn_t vpn_size = vseg->vpn_size; 395 395 396 #if ( CONFIG_DEBUG_VMM_SET_COW & 0x1)397 if( CONFIG_DEBUG_VMM_SET_COW < cycle )396 #if (DEBUG_VMM_SET_COW & 0x1) 397 if( DEBUG_VMM_SET_COW < cycle ) 398 398 printk("\n[DBG] %s : thread %x handling vseg %s / vpn_base = %x / vpn_size = %x\n", 399 399 __FUNCTION__, CURRENT_THREAD , vseg_type_str(type), vpn_base, vpn_size ); … … 445 445 } // end loop on process copies 446 446 447 #if CONFIG_DEBUG_VMM_SET_COW447 #if DEBUG_VMM_SET_COW 448 448 cycle = (uint32_t)hal_get_cycles(); 449 if( CONFIG_DEBUG_VMM_SET_COW < cycle )449 if( DEBUG_VMM_SET_COW < cycle ) 450 450 printk("\n[DBG] %s : thread %x exit for process %x / cycle %d\n", 451 451 __FUNCTION__ , CURRENT_THREAD , process->pid , cycle ); … … 480 480 ppn_t ppn; 481 481 482 #if CONFIG_DEBUG_VMM_FORK_COPY482 #if DEBUG_VMM_FORK_COPY 483 483 uint32_t cycle = (uint32_t)hal_get_cycles(); 484 if( CONFIG_DEBUG_VMM_FORK_COPY < cycle )484 if( DEBUG_VMM_FORK_COPY < cycle ) 485 485 printk("\n[DBG] %s : thread %x enter / cycle %d\n", 486 486 __FUNCTION__ , CURRENT_THREAD, cycle ); … … 530 530 type = hal_remote_lw( XPTR( parent_cxy , &parent_vseg->type ) ); 531 531 532 #if CONFIG_DEBUG_VMM_FORK_COPY532 #if DEBUG_VMM_FORK_COPY 533 533 cycle = (uint32_t)hal_get_cycles(); 534 if( CONFIG_DEBUG_VMM_FORK_COPY < cycle )534 if( DEBUG_VMM_FORK_COPY < cycle ) 535 535 printk("\n[DBG] %s : thread %x found parent vseg %s / vpn_base = %x / cycle %d\n", 536 536 __FUNCTION__ , CURRENT_THREAD, vseg_type_str(type), … … 556 556 vseg_attach( child_vmm , child_vseg ); 557 557 558 #if CONFIG_DEBUG_VMM_FORK_COPY558 #if DEBUG_VMM_FORK_COPY 559 559 cycle = (uint32_t)hal_get_cycles(); 560 if( CONFIG_DEBUG_VMM_FORK_COPY < cycle )560 if( DEBUG_VMM_FORK_COPY < cycle ) 561 561 printk("\n[DBG] %s : thread %x copied vseg %s / vpn_base = %x to child VSL / cycle %d\n", 562 562 __FUNCTION__ , CURRENT_THREAD , vseg_type_str(type), … … 597 597 hal_remote_atomic_add( XPTR( page_cxy , &page_ptr->forks ) , 1 ); 598 598 599 #if CONFIG_DEBUG_VMM_FORK_COPY599 #if DEBUG_VMM_FORK_COPY 600 600 cycle = (uint32_t)hal_get_cycles(); 601 if( CONFIG_DEBUG_VMM_FORK_COPY < cycle )601 if( DEBUG_VMM_FORK_COPY < cycle ) 602 602 printk("\n[DBG] %s : thread %x copied vpn %x to child GPT / cycle %d\n", 603 603 __FUNCTION__ , CURRENT_THREAD , vpn , cycle ); … … 649 649 hal_fence(); 650 650 651 #if CONFIG_DEBUG_VMM_FORK_COPY651 #if DEBUG_VMM_FORK_COPY 652 652 cycle = (uint32_t)hal_get_cycles(); 653 if( CONFIG_DEBUG_VMM_FORK_COPY < cycle )653 if( DEBUG_VMM_FORK_COPY < cycle ) 654 654 printk("\n[DBG] %s : thread %x exit successfully / cycle %d\n", 655 655 __FUNCTION__ , CURRENT_THREAD , cycle ); … … 666 666 vseg_t * vseg; 667 667 668 #if CONFIG_DEBUG_VMM_DESTROY668 #if DEBUG_VMM_DESTROY 669 669 uint32_t cycle = (uint32_t)hal_get_cycles(); 670 if( CONFIG_DEBUG_VMM_DESTROY < cycle )670 if( DEBUG_VMM_DESTROY < cycle ) 671 671 printk("\n[DBG] %s : thread %x enter for process %x / cycle %d\n", 672 672 __FUNCTION__ , CURRENT_THREAD , process->pid , cycle ); 673 673 #endif 674 674 675 #if ( CONFIG_DEBUG_VMM_DESTROY & 1 )675 #if (DEBUG_VMM_DESTROY & 1 ) 676 676 vmm_display( process , true ); 677 677 #endif … … 694 694 vseg = GET_PTR( vseg_xp ); 695 695 696 #if( CONFIG_DEBUG_VMM_DESTROY & 1 )697 if( CONFIG_DEBUG_VMM_DESTROY < cycle )696 #if( DEBUG_VMM_DESTROY & 1 ) 697 if( DEBUG_VMM_DESTROY < cycle ) 698 698 printk("\n[DBG] %s : %s / vpn_base %x / vpn_size %d\n", 699 699 __FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size ); … … 728 728 hal_gpt_destroy( &vmm->gpt ); 729 729 730 #if CONFIG_DEBUG_VMM_DESTROY730 #if DEBUG_VMM_DESTROY 731 731 cycle = (uint32_t)hal_get_cycles(); 732 if( CONFIG_DEBUG_VMM_DESTROY < cycle )732 if( DEBUG_VMM_DESTROY < cycle ) 733 733 printk("\n[DBG] %s : thread %x exit / cycle %d\n", 734 734 __FUNCTION__ , CURRENT_THREAD , cycle ); … … 882 882 error_t error; 883 883 884 #if CONFIG_DEBUG_VMM_CREATE_VSEG884 #if DEBUG_VMM_CREATE_VSEG 885 885 uint32_t cycle = (uint32_t)hal_get_cycles(); 886 if( CONFIG_DEBUG_VMM_CREATE_VSEG < cycle )886 if( DEBUG_VMM_CREATE_VSEG < cycle ) 887 887 printk("\n[DBG] %s : thread %x enter / process %x / base %x / size %x / %s / cxy %x / cycle %d\n", 888 888 __FUNCTION__, CURRENT_THREAD, process->pid, base, size, vseg_type_str(type), cxy, cycle ); … … 973 973 remote_rwlock_wr_unlock( lock_xp ); 974 974 975 #if CONFIG_DEBUG_VMM_CREATE_VSEG975 #if DEBUG_VMM_CREATE_VSEG 976 976 cycle = (uint32_t)hal_get_cycles(); 977 if( CONFIG_DEBUG_VMM_CREATE_VSEG < cycle )977 if( DEBUG_VMM_CREATE_VSEG < cycle ) 978 978 printk("\n[DBG] %s : thread %x exit / process %x / %s / cxy %x / cycle %d\n", 979 979 __FUNCTION__, CURRENT_THREAD, process->pid, vseg_type_str(type), cxy, cycle ); … … 1110 1110 uint32_t count; // actual number of pendinf forks 1111 1111 1112 #if CONFIG_DEBUG_VMM_UNMAP_VSEG1112 #if DEBUG_VMM_UNMAP_VSEG 1113 1113 uint32_t cycle = (uint32_t)hal_get_cycles(); 1114 if( CONFIG_DEBUG_VMM_UNMAP_VSEG < cycle )1114 if( DEBUG_VMM_UNMAP_VSEG < cycle ) 1115 1115 printk("\n[DBG] %s : thread %x enter / process %x / vseg %s / base %x / cycle %d\n", 1116 1116 __FUNCTION__, CURRENT_THREAD, process->pid, vseg_type_str( vseg->type ), vseg->vpn_base, cycle ); … … 1131 1131 { 1132 1132 1133 #if( CONFIG_DEBUG_VMM_UNMAP_VSEG & 1 )1134 if( CONFIG_DEBUG_VMM_UNMAP_VSEG < cycle )1133 #if( DEBUG_VMM_UNMAP_VSEG & 1 ) 1134 if( DEBUG_VMM_UNMAP_VSEG < cycle ) 1135 1135 printk("- vpn %x / ppn %x\n" , vpn , ppn ); 1136 1136 #endif … … 1183 1183 } 1184 1184 1185 #if CONFIG_DEBUG_VMM_UNMAP_VSEG1185 #if DEBUG_VMM_UNMAP_VSEG 1186 1186 cycle = (uint32_t)hal_get_cycles(); 1187 if( CONFIG_DEBUG_VMM_UNMAP_VSEG < cycle )1187 if( DEBUG_VMM_UNMAP_VSEG < cycle ) 1188 1188 printk("\n[DBG] %s : thread %x exit / process %x / vseg %s / base %x / cycle %d\n", 1189 1189 __FUNCTION__, CURRENT_THREAD, process->pid, vseg_type_str( vseg->type ), vseg->vpn_base, cycle ); … … 1383 1383 { 1384 1384 1385 #if CONFIG_DEBUG_VMM_ALLOCATE_PAGE1386 if( CONFIG_DEBUG_VMM_ALLOCATE_PAGE < (uint32_t)hal_get_cycles() )1385 #if DEBUG_VMM_ALLOCATE_PAGE 1386 if( DEBUG_VMM_ALLOCATE_PAGE < (uint32_t)hal_get_cycles() ) 1387 1387 printk("\n[DBG] in %s : thread %x enter for vpn %x\n", 1388 1388 __FUNCTION__ , CURRENT_THREAD, vpn ); … … 1427 1427 } 1428 1428 1429 #if CONFIG_DEBUG_VMM_ALLOCATE_PAGE1430 if( CONFIG_DEBUG_VMM_ALLOCATE_PAGE < (uint32_t)hal_get_cycles() )1429 #if DEBUG_VMM_ALLOCATE_PAGE 1430 if( DEBUG_VMM_ALLOCATE_PAGE < (uint32_t)hal_get_cycles() ) 1431 1431 printk("\n[DBG] in %s : thread %x exit for vpn = %d / ppn = %x\n", 1432 1432 __FUNCTION__ , CURRENT_THREAD, vpn, ppm_page2ppn( XPTR( page_cxy , page_ptr ) ) ); … … 1452 1452 index = vpn - vseg->vpn_base; 1453 1453 1454 #if CONFIG_DEBUG_VMM_GET_ONE_PPN1455 if( CONFIG_DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )1454 #if DEBUG_VMM_GET_ONE_PPN 1455 if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() ) 1456 1456 printk("\n[DBG] %s : thread %x enter for vpn = %x / type = %s / index = %d\n", 1457 1457 __FUNCTION__, CURRENT_THREAD, vpn, vseg_type_str(type), index ); … … 1515 1515 uint32_t elf_offset = vseg->file_offset + offset; 1516 1516 1517 #if ( CONFIG_DEBUG_VMM_GET_ONE_PPN & 0x1)1518 if( CONFIG_DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )1517 #if (DEBUG_VMM_GET_ONE_PPN & 0x1) 1518 if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() ) 1519 1519 printk("\n[DBG] %s : thread %x for vpn = %x / elf_offset = %x\n", 1520 1520 __FUNCTION__, CURRENT_THREAD, vpn, elf_offset ); … … 1530 1530 { 1531 1531 1532 #if ( CONFIG_DEBUG_VMM_GET_ONE_PPN & 0x1)1533 if( CONFIG_DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )1532 #if (DEBUG_VMM_GET_ONE_PPN & 0x1) 1533 if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() ) 1534 1534 printk("\n[DBG] %s : thread%x for vpn = %x / fully in BSS\n", 1535 1535 __FUNCTION__, CURRENT_THREAD, vpn ); … … 1548 1548 { 1549 1549 1550 #if ( CONFIG_DEBUG_VMM_GET_ONE_PPN & 0x1)1551 if( CONFIG_DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )1550 #if (DEBUG_VMM_GET_ONE_PPN & 0x1) 1551 if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() ) 1552 1552 printk("\n[DBG] %s : thread %x, for vpn = %x / fully in mapper\n", 1553 1553 __FUNCTION__, CURRENT_THREAD, vpn ); … … 1580 1580 { 1581 1581 1582 #if ( CONFIG_DEBUG_VMM_GET_ONE_PPN & 0x1)1583 if( CONFIG_DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )1582 #if (DEBUG_VMM_GET_ONE_PPN & 0x1) 1583 if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() ) 1584 1584 printk("\n[DBG] %s : thread %x for vpn = %x / both mapper & BSS\n" 1585 1585 " %d bytes from mapper / %d bytes from BSS\n", … … 1627 1627 *ppn = ppm_page2ppn( page_xp ); 1628 1628 1629 #if CONFIG_DEBUG_VMM_GET_ONE_PPN1630 if( CONFIG_DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() )1629 #if DEBUG_VMM_GET_ONE_PPN 1630 if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() ) 1631 1631 printk("\n[DBG] %s : thread %x exit for vpn = %x / ppn = %x\n", 1632 1632 __FUNCTION__ , CURRENT_THREAD , vpn , *ppn ); … … 1655 1655 "not called in the reference cluster\n" ); 1656 1656 1657 #if CONFIG_DEBUG_VMM_GET_PTE1657 #if DEBUG_VMM_GET_PTE 1658 1658 uint32_t cycle = (uint32_t)hal_get_cycles(); 1659 if( CONFIG_DEBUG_VMM_GET_PTE < cycle )1660 printk("\n[DBG] %s : thread %x enter for vpn = %x / process %x / cow =%d / cycle %d\n",1659 if( DEBUG_VMM_GET_PTE < cycle ) 1660 printk("\n[DBG] %s : thread %x enter / vpn %x / process %x / cow %d / cycle %d\n", 1661 1661 __FUNCTION__ , CURRENT_THREAD , vpn , process->pid , cow , cycle ); 1662 1662 #endif … … 1675 1675 } 1676 1676 1677 #if CONFIG_DEBUG_VMM_GET_PTE1677 #if( DEBUG_VMM_GET_PTE & 1 ) 1678 1678 cycle = (uint32_t)hal_get_cycles(); 1679 if( CONFIG_DEBUG_VMM_GET_PTE < cycle )1679 if( DEBUG_VMM_GET_PTE < cycle ) 1680 1680 printk("\n[DBG] %s : thread %x found vseg %s / vpn_base = %x / vpn_size = %x\n", 1681 1681 __FUNCTION__, CURRENT_THREAD, vseg_type_str(vseg->type), vseg->vpn_base, vseg->vpn_size ); 1682 1682 #endif 1683 1683 1684 // access GPT to get current PTE attributes and PPN1685 hal_gpt_get_pte( &vmm->gpt , vpn , &old_attr , &old_ppn );1686 1687 // for both "copy_on_write" and "page_fault" events, allocate a physical page,1688 // initialize it, register it in the reference GPT, update GPT copies in all1689 // clusters containing a copy, and return the new_ppn and new_attr1690 1691 if( cow ) /////////////////////////// copy_on_write request //////////////////////1692 { 1684 if( cow ) //////////////// copy_on_write request ////////////////////// 1685 // get PTE from reference GPT 1686 // allocate a new physical page if there is pending forks, 1687 // initialize it from old physical page content, 1688 // update PTE in all GPT copies, 1689 { 1690 // access GPT to get current PTE attributes and PPN 1691 hal_gpt_get_pte( &vmm->gpt , vpn , &old_attr , &old_ppn ); 1692 1693 1693 assert( (old_attr & GPT_MAPPED) , __FUNCTION__ , 1694 1694 "PTE must be mapped for a copy-on-write exception\n" ); 1695 1695 1696 #if CONFIG_DEBUG_VMM_GET_PTE1696 #if( DEBUG_VMM_GET_PTE & 1 ) 1697 1697 cycle = (uint32_t)hal_get_cycles(); 1698 if( CONFIG_DEBUG_VMM_GET_PTE < cycle )1698 if( DEBUG_VMM_GET_PTE < cycle ) 1699 1699 printk("\n[DBG] %s : thread %x handling COW for vpn %x in process %x\n", 1700 1700 __FUNCTION__, CURRENT_THREAD, vpn, process->pid ); … … 1744 1744 hal_remote_atomic_add( XPTR( page_cxy , &page_ptr->forks ) , -1 ); 1745 1745 } 1746 else ////////////////////////////////// page_fault request //////////////////////// 1746 else //////////// page_fault request /////////////////////////// 1747 // get PTE from reference GPT 1748 // allocate a physical page if it is a true page fault, 1749 // register in reference GPT, but don't update GPT copies 1747 1750 { 1751 // access GPT to get current PTE 1752 hal_gpt_get_pte( &vmm->gpt , vpn , &old_attr , &old_ppn ); 1753 1748 1754 if( (old_attr & GPT_MAPPED) == 0 ) // true page_fault => map it 1749 1755 { 1750 1756 1751 #if CONFIG_DEBUG_VMM_GET_PTE1757 #if( DEBUG_VMM_GET_PTE & 1 ) 1752 1758 cycle = (uint32_t)hal_get_cycles(); 1753 if( CONFIG_DEBUG_VMM_GET_PTE < cycle )1759 if( DEBUG_VMM_GET_PTE < cycle ) 1754 1760 printk("\n[DBG] %s : thread %x handling page fault for vpn %x in process %x\n", 1755 1761 __FUNCTION__, CURRENT_THREAD, vpn, process->pid ); … … 1792 1798 } 1793 1799 1794 #if CONFIG_DEBUG_VMM_GET_PTE1800 #if DEBUG_VMM_GET_PTE 1795 1801 cycle = (uint32_t)hal_get_cycles(); 1796 if( CONFIG_DEBUG_VMM_GET_PTE < cycle )1797 printk("\n[DBG] %s : thread,%x exit for vpn %x in process %x / ppn = %x / attr =%x / cycle %d\n",1802 if( DEBUG_VMM_GET_PTE < cycle ) 1803 printk("\n[DBG] %s : thread,%x exit / vpn %x in process %x / ppn %x / attr %x / cycle %d\n", 1798 1804 __FUNCTION__, CURRENT_THREAD, vpn, process->pid, new_ppn, new_attr, cycle ); 1799 1805 #endif 1800 1806 1801 // return success1807 // return PPN and flags 1802 1808 *ppn = new_ppn; 1803 1809 *attr = new_attr; … … 1814 1820 error_t error; 1815 1821 1816 #if CONFIG_DEBUG_VMM_GET_PTE1822 #if DEBUG_VMM_GET_PTE 1817 1823 uint32_t cycle = (uint32_t)hal_get_cycles(); 1818 if( CONFIG_DEBUG_VMM_GET_PTE < cycle )1824 if( DEBUG_VMM_GET_PTE < cycle ) 1819 1825 printk("\n[DBG] %s : thread %x enter for vpn %x / process %x / cycle %d\n", 1820 1826 __FUNCTION__ , CURRENT_THREAD , vpn , process->pid , cycle ); … … 1854 1860 } 1855 1861 1856 #if CONFIG_DEBUG_VMM_GET_PTE1862 #if DEBUG_VMM_GET_PTE 1857 1863 cycle = (uint32_t)hal_get_cycles(); 1858 if( CONFIG_DEBUG_VMM_GET_PTE < cycle )1864 if( DEBUG_VMM_GET_PTE < cycle ) 1859 1865 printk("\n[DBG] %s : thread %x exit for vpn %x / process %x / cycle %d\n", 1860 1866 __FUNCTION__ , CURRENT_THREAD , vpn , process->pid , cycle ); … … 1873 1879 error_t error; 1874 1880 1875 #if CONFIG_DEBUG_VMM_GET_PTE1881 #if DEBUG_VMM_GET_PTE 1876 1882 uint32_t cycle = (uint32_t)hal_get_cycles(); 1877 if( CONFIG_DEBUG_VMM_GET_PTE < cycle )1883 if( DEBUG_VMM_GET_PTE < cycle ) 1878 1884 printk("\n[DBG] %s : thread %x enter for vpn %x / process %x / cycle %d\n", 1879 1885 __FUNCTION__ , CURRENT_THREAD , vpn , process->pid , cycle ); … … 1913 1919 } 1914 1920 1915 #if CONFIG_DEBUG_VMM_GET_PTE1921 #if DEBUG_VMM_GET_PTE 1916 1922 cycle = (uint32_t)hal_get_cycles(); 1917 if( CONFIG_DEBUG_VMM_GET_PTE < cycle )1923 if( DEBUG_VMM_GET_PTE < cycle ) 1918 1924 printk("\n[DBG] %s : thread %x exit for vpn %x / process %x / cycle %d\n", 1919 1925 __FUNCTION__ , CURRENT_THREAD , vpn , process->pid , cycle );
Note: See TracChangeset
for help on using the changeset viewer.