Changeset 585 for trunk/kernel/mm
- Timestamp:
- Nov 1, 2018, 12:22:17 PM (6 years ago)
- Location:
- trunk/kernel/mm
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/mm/ppm.c
r567 r585 30 30 #include <bits.h> 31 31 #include <page.h> 32 #include <dqdt.h> 32 33 #include <busylock.h> 33 34 #include <queuelock.h> … … 281 282 busylock_release( &ppm->free_lock ); 282 283 284 // update DQDT 285 dqdt_increment_pages( order ); 286 283 287 #if DEBUG_PPM_ALLOC_PAGES 284 288 cycle = (uint32_t)hal_get_cycles(); … … 320 324 busylock_release( &ppm->free_lock ); 321 325 326 // update DQDT 327 dqdt_decrement_pages( page->order ); 328 322 329 #if DEBUG_PPM_FREE_PAGES 323 330 cycle = (uint32_t)hal_get_cycles(); -
trunk/kernel/mm/vmm.c
r580 r585 47 47 #include <kmem.h> 48 48 #include <vmm.h> 49 #include <hal_exception.h> 49 50 50 51 ////////////////////////////////////////////////////////////////////////////////// … … 162 163 printk("\n[ERROR] in %s : cannot create GPT\n", __FUNCTION__ ); 163 164 164 // initialize GPT (architecture specic) 165 // initialize GPT lock 166 remote_rwlock_init( XPTR( local_cxy , &vmm->gpt_lock ) , LOCK_VMM_GPT ); 167 168 // architecture specic GPT initialisation 165 169 // (For TSAR, identity map the kentry_vseg) 166 170 error = hal_vmm_init( vmm ); … … 209 213 process->vmm.gpt.ptr , process->pid , local_cxy ); 210 214 211 // get lock protecting the vseg list215 // get lock protecting the VSL and the GPT 212 216 remote_rwlock_rd_acquire( XPTR( local_cxy , &vmm->vsegs_lock ) ); 217 remote_rwlock_rd_acquire( XPTR( local_cxy , &vmm->gpt_lock ) ); 213 218 214 219 // scan the list of vsegs … … 234 239 for( vpn = base ; vpn < (base+size) ; vpn++ ) 235 240 { 236 hal_gpt_get_pte( gpt, vpn , &attr , &ppn );241 hal_gpt_get_pte( XPTR( local_cxy , gpt ) , vpn , &attr , &ppn ); 237 242 if( attr & GPT_MAPPED ) 238 243 { … … 243 248 } 244 249 245 // release the lock 250 // release the locks 246 251 remote_rwlock_rd_release( XPTR( local_cxy , &vmm->vsegs_lock ) ); 252 remote_rwlock_rd_release( XPTR( local_cxy , &vmm->gpt_lock ) ); 247 253 248 254 } // vmm_display() … … 295 301 ppn_t ppn ) 296 302 { 297 298 303 xlist_entry_t * process_root_ptr; 299 304 xptr_t process_root_xp; … … 317 322 318 323 // check cluster is reference 319 assert( (GET_CXY( process->ref_xp ) == local_cxy) , 320 "not called in reference cluster\n"); 324 assert( (GET_CXY( process->ref_xp ) == local_cxy) , "not called in reference cluster\n"); 321 325 322 326 // get extended pointer on root of process copies xlist in owner cluster … … 465 469 if( remote_process_cxy == local_cxy ) 466 470 { 467 // the reference GPT is the local GPT468 gpt_t * gpt = GET_PTR( remote_gpt_xp );469 470 471 // scan all pages in vseg 471 472 for( vpn = vpn_base ; vpn < (vpn_base + vpn_size) ; vpn++ ) 472 473 { 473 474 // get page attributes and PPN from reference GPT 474 hal_gpt_get_pte( gpt, vpn , &attr , &ppn );475 hal_gpt_get_pte( remote_gpt_xp , vpn , &attr , &ppn ); 475 476 476 477 // atomically update pending forks counter if page is mapped … … 1139 1140 { 1140 1141 // get GPT entry 1141 hal_gpt_get_pte( gpt, vpn , &attr , &ppn );1142 hal_gpt_get_pte( XPTR( local_cxy , gpt ) , vpn , &attr , &ppn ); 1142 1143 1143 1144 if( attr & GPT_MAPPED ) // entry is mapped … … 1150 1151 1151 1152 // check small page 1152 assert( (attr & GPT_SMALL) , 1153 "an user vseg must use small pages" ); 1154 1155 // unmap GPT entry in all GPT copies 1153 assert( (attr & GPT_SMALL) , "an user vseg must use small pages" ); 1154 1155 // unmap GPT entry in local GPT 1156 1156 hal_gpt_reset_pte( gpt , vpn ); 1157 1157 … … 1392 1392 // for a given <vpn> in a given <vseg>, allocates the page (with an RPC if required) 1393 1393 // and returns an extended pointer on the allocated page descriptor. 1394 // It can be called by a thread running in any cluster. 1394 1395 // The vseg cannot have the FILE type. 1395 1396 ////////////////////////////////////////////////////////////////////////////////////// … … 1655 1656 } // end vmm_get_one_ppn() 1656 1657 1657 ///////////////////////////////////////// 1658 error_t vmm_get_pte( process_t * process, 1659 vpn_t vpn, 1660 bool_t cow, 1661 uint32_t * attr, 1662 ppn_t * ppn ) 1658 /////////////////////////////////////////////////// 1659 error_t vmm_handle_page_fault( process_t * process, 1660 vpn_t vpn ) 1663 1661 { 1664 ppn_t old_ppn; // current PTE_PPN 1665 uint32_t old_attr; // current PTE_ATTR 1666 ppn_t new_ppn; // new PTE_PPN 1667 uint32_t new_attr; // new PTE_ATTR 1668 vmm_t * vmm; 1669 vseg_t * vseg; 1670 error_t error; 1671 1672 1673 #if DEBUG_VMM_GET_PTE 1662 vseg_t * vseg; // vseg containing vpn 1663 uint32_t new_attr; // new PTE_ATTR value 1664 ppn_t new_ppn; // new PTE_PPN value 1665 uint32_t ref_attr; // PTE_ATTR value in reference GPT 1666 ppn_t ref_ppn; // PTE_PPN value in reference GPT 1667 cxy_t ref_cxy; // reference cluster for missing vpn 1668 process_t * ref_ptr; // reference process for missing vpn 1669 xptr_t local_gpt_xp; // extended pointer on local GPT 1670 xptr_t local_lock_xp; // extended pointer on local GPT lock 1671 xptr_t ref_gpt_xp; // extended pointer on reference GPT 1672 xptr_t ref_lock_xp; // extended pointer on reference GPT lock 1673 error_t error; // value returned by called functions 1674 1675 // get local vseg (access to reference VSL can be required) 1676 error = vmm_get_vseg( process, 1677 (intptr_t)vpn<<CONFIG_PPM_PAGE_SHIFT, 1678 &vseg ); 1679 1680 if( error ) 1681 { 1682 printk("\n[ERROR] in %s : illegal VPN %x in process %x\n", 1683 __FUNCTION__ , vpn , process->pid ); 1684 1685 return EXCP_USER_ERROR; 1686 } 1687 1688 #if DEBUG_VMM_HANDLE_PAGE_FAULT 1689 uint32_t cycle = (uint32_t)hal_get_cycles(); 1674 1690 thread_t * this = CURRENT_THREAD; 1675 uint32_t cycle = (uint32_t)hal_get_cycles(); 1676 if( DEBUG_VMM_GET_PTE < cycle ) 1677 printk("\n[DBG] %s : thread %x in process %x enter / vpn %x / cow %d / cycle %d\n", 1678 __FUNCTION__, this->trdid, process->pid, vpn, cow, cycle ); 1679 #endif 1680 1681 // get VMM pointer 1682 vmm = &process->vmm; 1683 1684 // get local vseg descriptor 1685 error = vmm_get_vseg( process, 1686 ((intptr_t)vpn << CONFIG_PPM_PAGE_SHIFT), 1687 &vseg ); 1688 1689 // vseg has been checked by the vmm_handle_page_fault() function 1690 assert( (vseg != NULL) , "vseg undefined / vpn %x\n"); 1691 1692 if( cow ) //////////////// copy_on_write request ////////////////////// 1693 // get PTE from local GPT 1694 // allocate a new physical page if there is pending forks, 1695 // initialize it from old physical page content, 1696 // update PTE in all GPT copies, 1697 { 1698 // access local GPT to get current PTE attributes and PPN 1699 hal_gpt_get_pte( &vmm->gpt , vpn , &old_attr , &old_ppn ); 1700 1701 assert( (old_attr & GPT_MAPPED), 1702 "PTE unmapped for a COW exception / vpn %x\n" ); 1703 1704 #if( DEBUG_VMM_GET_PTE & 1 ) 1705 if( DEBUG_VMM_GET_PTE < cycle ) 1706 printk("\n[DBG] %s : thread %x in process %x handling COW for vpn %x\n", 1707 __FUNCTION__, this->trdid, process->pid, vpn ); 1708 #endif 1709 1710 // get extended pointer, cluster and local pointer on physical page descriptor 1711 xptr_t page_xp = ppm_ppn2page( old_ppn ); 1712 cxy_t page_cxy = GET_CXY( page_xp ); 1713 page_t * page_ptr = GET_PTR( page_xp ); 1714 1715 // get extended pointers on forks and lock field in page descriptor 1716 xptr_t forks_xp = XPTR( page_cxy , &page_ptr->forks ); 1717 xptr_t lock_xp = XPTR( page_cxy , &page_ptr->lock ); 1718 1719 // take lock protecting "forks" counter 1720 remote_busylock_acquire( lock_xp ); 1721 1722 // get number of pending forks in page descriptor 1723 uint32_t forks = hal_remote_l32( forks_xp ); 1724 1725 if( forks ) // pending fork => allocate a new page, copy old to new 1726 { 1727 // allocate a new physical page 1728 page_xp = vmm_page_allocate( vseg , vpn ); 1729 if( page_xp == XPTR_NULL ) 1730 { 1731 printk("\n[ERROR] in %s : no memory / process = %x / vpn = %x\n", 1732 __FUNCTION__ , process->pid , vpn ); 1733 return -1; 1734 } 1735 1736 // compute allocated page PPN 1737 new_ppn = ppm_page2ppn( page_xp ); 1738 1739 // copy old page content to new page 1740 xptr_t old_base_xp = ppm_ppn2base( old_ppn ); 1741 xptr_t new_base_xp = ppm_ppn2base( new_ppn ); 1742 memcpy( GET_PTR( new_base_xp ), 1743 GET_PTR( old_base_xp ), 1744 CONFIG_PPM_PAGE_SIZE ); 1745 1746 // decrement pending forks counter in page descriptor 1747 hal_remote_atomic_add( forks_xp , -1 ); 1748 } 1749 else // no pending fork => keep the existing page 1750 { 1751 new_ppn = old_ppn; 1752 } 1753 1754 // release lock protecting "forks" counter 1755 remote_busylock_release( lock_xp ); 1756 1757 // build new_attr : reset COW and set WRITABLE, 1758 new_attr = (old_attr | GPT_WRITABLE) & (~GPT_COW); 1759 1760 // update GPT[vpn] for all GPT copies 1761 vmm_global_update_pte( process, vpn, new_attr, new_ppn ); 1762 } 1763 else //////////// page_fault request /////////////////////////// 1764 // get PTE from local GPT 1765 // allocate a physical page if it is a true page fault, 1766 // initialize it if type is FILE, CODE, or DATA, 1767 // register in reference GPT, but don't update GPT copies 1768 { 1769 // access local GPT to get current PTE 1770 hal_gpt_get_pte( &vmm->gpt , vpn , &old_attr , &old_ppn ); 1771 1772 if( (old_attr & GPT_MAPPED) == 0 ) // true page_fault => map it 1773 { 1774 1775 #if( DEBUG_VMM_GET_PTE & 1 ) 1776 if( DEBUG_VMM_GET_PTE < cycle ) 1777 printk("\n[DBG] %s : thread %x in process %x handling page fault for vpn %x\n", 1778 __FUNCTION__, this->trdid, process->pid, vpn ); 1779 #endif 1780 // allocate new_ppn, and initialize the new page 1691 if( DEBUG_VMM_HANDLE_PAGE_FAULT < cycle ) 1692 printk("\n[DBG] %s : threadr[%x,%x] enter for vpn %x / %s / cycle %d\n", 1693 __FUNCTION__, this->process->pid, this->trdid, vpn, vseg_type_str(vseg->type), cycle ); 1694 #endif 1695 1696 //////////////// private vseg => access only the local GPT 1697 if( (vseg->type == VSEG_TYPE_STACK) || (vseg->type == VSEG_TYPE_CODE) ) 1698 { 1699 // build extended pointer on local GPT and local GPT lock 1700 local_gpt_xp = XPTR( local_cxy , &process->vmm.gpt ); 1701 local_lock_xp = XPTR( local_cxy , &process->vmm.gpt_lock ); 1702 1703 // take local GPT lock in write mode 1704 remote_rwlock_wr_acquire( local_lock_xp ); 1705 1706 // check VPN still unmapped in local GPT 1707 // do nothing if VPN has been mapped by a a concurrent page_fault 1708 hal_gpt_get_pte( local_gpt_xp, 1709 vpn, 1710 &new_attr, 1711 &new_ppn ); 1712 1713 if( (new_attr & GPT_MAPPED) == 0 ) // VPN still unmapped 1714 { 1715 // allocate and initialise a physical page depending on the vseg type 1781 1716 error = vmm_get_one_ppn( vseg , vpn , &new_ppn ); 1717 1782 1718 if( error ) 1783 1719 { 1784 1720 printk("\n[ERROR] in %s : no memory / process = %x / vpn = %x\n", 1785 1721 __FUNCTION__ , process->pid , vpn ); 1786 return -1; 1722 1723 // release local GPT lock in write mode 1724 remote_rwlock_wr_release( local_lock_xp ); 1725 1726 return EXCP_KERNEL_PANIC; 1787 1727 } 1788 1728 … … 1794 1734 if( vseg->flags & VSEG_CACHE ) new_attr |= GPT_CACHABLE; 1795 1735 1796 // register new PTE in reference GPT 1797 // on demand policy => no update of GPT copies 1798 error = hal_gpt_set_pte( &vmm->gpt, 1736 // set PTE (PPN & attribute) to local GPT 1737 error = hal_gpt_set_pte( local_gpt_xp, 1799 1738 vpn, 1800 1739 new_attr, 1801 1740 new_ppn ); 1802 if ( error )1741 if ( error ) 1803 1742 { 1804 printk("\n[ERROR] in %s : cannot update GPT / process =%x / vpn = %x\n",1743 printk("\n[ERROR] in %s : cannot update local GPT / process %x / vpn = %x\n", 1805 1744 __FUNCTION__ , process->pid , vpn ); 1806 return -1; 1745 1746 // release local GPT lock in write mode 1747 remote_rwlock_wr_release( local_lock_xp ); 1748 1749 return EXCP_KERNEL_PANIC; 1807 1750 } 1808 1751 } 1809 else // mapped in reference GPT => get it 1810 { 1811 new_ppn = old_ppn; 1812 new_attr = old_attr; 1813 } 1814 } 1815 1816 #if DEBUG_VMM_GET_PTE 1817 cycle = (uint32_t)hal_get_cycles(); 1818 if( DEBUG_VMM_GET_PTE < cycle ) 1819 printk("\n[DBG] %s : thread %x in process %x exit / vpn %x / ppn %x / attr %x / cycle %d\n", 1820 __FUNCTION__, this->trdid, process->pid, vpn, new_ppn, new_attr, cycle ); 1821 #endif 1822 1823 // return PPN and flags 1824 *ppn = new_ppn; 1825 *attr = new_attr; 1826 return 0; 1827 1828 } // end vmm_get_pte() 1829 1830 /////////////////////////////////////////////////// 1831 error_t vmm_handle_page_fault( process_t * process, 1832 vpn_t vpn, 1833 bool_t is_cow ) 1834 { 1835 uint32_t attr; // missing page attributes 1836 ppn_t ppn; // missing page PPN 1837 vseg_t * vseg; // vseg containing vpn 1838 uint32_t type; // vseg type 1839 cxy_t ref_cxy; // reference cluster for missing vpn 1840 process_t * ref_ptr; // reference process for missing vpn 1841 error_t error; 1842 1843 thread_t * this = CURRENT_THREAD; 1844 1845 #if DEBUG_VMM_HANDLE_PAGE_FAULT 1846 uint32_t cycle = (uint32_t)hal_get_cycles(); 1847 if( DEBUG_VMM_HANDLE_PAGE_FAULT < cycle ) 1848 printk("\n[DBG] %s : thread %x in process %x enter for vpn %x / core[%x,%d] / cycle %d\n", 1849 __FUNCTION__, this, process->pid, vpn, local_cxy, this->core->lid, cycle ); 1850 #endif 1851 1852 // get local vseg (access reference VSL if required) 1853 error = vmm_get_vseg( process , vpn<<CONFIG_PPM_PAGE_SHIFT , &vseg ); 1854 1855 if( error ) 1856 { 1857 printk("\n[ERROR] in %s : vpn %x / process %x / thread %x / core[%x,%d] / cycle %d\n", 1858 __FUNCTION__, vpn, process->pid, this->trdid, local_cxy, this->core->lid, 1859 (uint32_t)hal_get_cycles() ); 1860 return error; 1861 } 1862 1863 // get segment type 1864 type = vseg->type; 1865 1866 // get relevant process cluster and local pointer 1867 // for private vsegs (CODE and DATA type), 1868 // the relevant process descriptor is local. 1869 if( (type == VSEG_TYPE_STACK) || (type == VSEG_TYPE_CODE) ) 1870 { 1871 ref_cxy = local_cxy; 1872 ref_ptr = process; 1873 } 1874 else 1875 { 1876 ref_cxy = GET_CXY( process->ref_xp ); 1877 ref_ptr = GET_PTR( process->ref_xp ); 1878 } 1879 1880 // get missing PTE attributes and PPN 1881 if( local_cxy != ref_cxy ) 1882 { 1883 1884 #if DEBUG_VMM_HANDLE_PAGE_FAULT 1885 if( DEBUG_VMM_HANDLE_PAGE_FAULT < cycle ) 1886 printk("\n[DBG] %s : thread %x in process %x call RPC_VMM_GET_PTE\n", 1887 __FUNCTION__, this, process->pid ); 1888 #endif 1889 1890 rpc_vmm_get_pte_client( ref_cxy, 1891 ref_ptr, 1892 vpn, 1893 is_cow, 1894 &attr, 1895 &ppn, 1896 &error ); 1897 1898 // get local VMM pointer 1899 vmm_t * vmm = &process->vmm; 1900 1901 // update local GPT 1902 error |= hal_gpt_set_pte( &vmm->gpt, 1903 vpn, 1904 attr, 1905 ppn ); 1906 } 1907 else // local cluster is the reference cluster 1908 { 1909 error = vmm_get_pte( process, 1910 vpn, 1911 is_cow, 1912 &attr, 1913 &ppn ); 1914 } 1752 1753 // release local GPT lock in write mode 1754 remote_rwlock_wr_release( local_lock_xp ); 1915 1755 1916 1756 #if DEBUG_VMM_HANDLE_PAGE_FAULT 1917 1757 cycle = (uint32_t)hal_get_cycles(); 1918 1758 if( DEBUG_VMM_HANDLE_PAGE_FAULT < cycle ) 1919 printk("\n[DBG] %s : thread %x in process %x exit for vpn %x / core[%x,%d] / cycle %d\n", 1920 __FUNCTION__, this, process->pid, vpn, local_cxy, this->core->lid, cycle ); 1921 #endif 1922 1923 return error; 1924 1759 printk("\n[DBG] %s : private page fault handled / vpn %x / ppn %x / attr %x / cycle %d\n", 1760 __FUNCTION__, vpn, new_ppn, new_attr, cycle ); 1761 #endif 1762 return EXCP_NON_FATAL; 1763 1764 } // end local GPT access 1765 1766 //////////// public vseg => access reference GPT 1767 else 1768 { 1769 // get reference process cluster and local pointer 1770 ref_cxy = GET_CXY( process->ref_xp ); 1771 ref_ptr = GET_PTR( process->ref_xp ); 1772 1773 // build extended pointer on reference GPT and reference GPT lock 1774 ref_gpt_xp = XPTR( ref_cxy , &ref_ptr->vmm.gpt ); 1775 ref_lock_xp = XPTR( ref_cxy , &ref_ptr->vmm.gpt_lock ); 1776 1777 // build extended pointer on local GPT and local GPT lock 1778 local_gpt_xp = XPTR( local_cxy , &process->vmm.gpt ); 1779 local_lock_xp = XPTR( local_cxy , &process->vmm.gpt_lock ); 1780 1781 // take reference GPT lock in read mode 1782 remote_rwlock_rd_acquire( ref_lock_xp ); 1783 1784 // get directly PPN & attributes from reference GPT 1785 // this can avoids a costly RPC for a false page fault 1786 hal_gpt_get_pte( ref_gpt_xp, 1787 vpn, 1788 &ref_attr, 1789 &ref_ppn ); 1790 1791 // release reference GPT lock in read mode 1792 remote_rwlock_rd_release( ref_lock_xp ); 1793 1794 if( ref_attr & GPT_MAPPED ) // false page fault => update local GPT 1795 { 1796 // take local GPT lock in write mode 1797 remote_rwlock_wr_acquire( local_lock_xp ); 1798 1799 // check VPN still unmapped in local GPT 1800 hal_gpt_get_pte( local_gpt_xp, 1801 vpn, 1802 &new_attr, 1803 &new_ppn ); 1804 1805 if( (new_attr & GPT_MAPPED) == 0 ) // VPN still unmapped 1806 { 1807 // update local GPT from reference GPT 1808 error = hal_gpt_set_pte( local_gpt_xp, 1809 vpn, 1810 ref_attr, 1811 ref_ppn ); 1812 if( error ) 1813 { 1814 printk("\n[ERROR] in %s : cannot update local GPT / process %x / vpn = %x\n", 1815 __FUNCTION__ , process->pid , vpn ); 1816 1817 // release local GPT lock in write mode 1818 remote_rwlock_wr_release( local_lock_xp ); 1819 1820 return EXCP_KERNEL_PANIC; 1821 } 1822 } 1823 else // VPN has been mapped by a a concurrent page_fault 1824 { 1825 // keep PTE from local GPT 1826 ref_attr = new_attr; 1827 ref_ppn = new_ppn; 1828 } 1829 1830 // release local GPT lock in write mode 1831 remote_rwlock_wr_release( local_lock_xp ); 1832 1833 #if DEBUG_VMM_HANDLE_PAGE_FAULT 1834 cycle = (uint32_t)hal_get_cycles(); 1835 if( DEBUG_VMM_HANDLE_PAGE_FAULT < cycle ) 1836 printk("\n[DBG] %s : false page fault handled / vpn %x / ppn %x / attr %x / cycle %d\n", 1837 __FUNCTION__, vpn, ref_ppn, ref_attr, cycle ); 1838 #endif 1839 return EXCP_NON_FATAL; 1840 } 1841 else // true page fault => update reference GPT 1842 { 1843 // take reference GPT lock in write mode 1844 remote_rwlock_wr_acquire( ref_lock_xp ); 1845 1846 // check VPN still unmapped in reference GPT 1847 // do nothing if VPN has been mapped by a a concurrent page_fault 1848 hal_gpt_get_pte( ref_gpt_xp, 1849 vpn, 1850 &ref_attr, 1851 &ref_ppn ); 1852 1853 if( (ref_attr & GPT_MAPPED) == 0 ) // VPN actually unmapped 1854 { 1855 // allocate and initialise a physical page depending on the vseg type 1856 error = vmm_get_one_ppn( vseg , vpn , &new_ppn ); 1857 1858 if( error ) 1859 { 1860 printk("\n[ERROR] in %s : no memory / process = %x / vpn = %x\n", 1861 __FUNCTION__ , process->pid , vpn ); 1862 1863 // release reference GPT lock in write mode 1864 remote_rwlock_wr_release( ref_lock_xp ); 1865 1866 return EXCP_KERNEL_PANIC; 1867 } 1868 1869 // define new_attr from vseg flags 1870 new_attr = GPT_MAPPED | GPT_SMALL; 1871 if( vseg->flags & VSEG_USER ) new_attr |= GPT_USER; 1872 if( vseg->flags & VSEG_WRITE ) new_attr |= GPT_WRITABLE; 1873 if( vseg->flags & VSEG_EXEC ) new_attr |= GPT_EXECUTABLE; 1874 if( vseg->flags & VSEG_CACHE ) new_attr |= GPT_CACHABLE; 1875 1876 // update reference GPT 1877 error = hal_gpt_set_pte( ref_gpt_xp, 1878 vpn, 1879 new_attr, 1880 new_ppn ); 1881 1882 // update local GPT (protected by reference GPT lock) 1883 error |= hal_gpt_set_pte( local_gpt_xp, 1884 vpn, 1885 new_attr, 1886 new_ppn ); 1887 1888 if( error ) 1889 { 1890 printk("\n[ERROR] in %s : cannot update GPT / process %x / vpn = %x\n", 1891 __FUNCTION__ , process->pid , vpn ); 1892 1893 // release reference GPT lock in write mode 1894 remote_rwlock_wr_release( ref_lock_xp ); 1895 1896 return EXCP_KERNEL_PANIC; 1897 } 1898 } 1899 1900 // release reference GPT lock in write mode 1901 remote_rwlock_wr_release( ref_lock_xp ); 1902 1903 #if DEBUG_VMM_HANDLE_PAGE_FAULT 1904 cycle = (uint32_t)hal_get_cycles(); 1905 if( DEBUG_VMM_HANDLE_PAGE_FAULT < cycle ) 1906 printk("\n[DBG] %s : true page fault handled / vpn %x / ppn %x / attr %x / cycle %d\n", 1907 __FUNCTION__, vpn, new_ppn, new_attr, cycle ); 1908 #endif 1909 return EXCP_NON_FATAL; 1910 } 1911 } 1925 1912 } // end vmm_handle_page_fault() 1926 1913 1927 1928 1929 1930 1931 1932 1933 1934 1935 /* deprecated April 2018 [AG] 1936 1937 error_t vmm_v2p_translate( process_t * process, 1938 void * ptr, 1939 paddr_t * paddr ) 1914 //////////////////////////////////////////// 1915 error_t vmm_handle_cow( process_t * process, 1916 vpn_t vpn ) 1940 1917 { 1941 // access page table 1942 error_t error; 1943 vpn_t vpn; 1944 uint32_t attr; 1945 ppn_t ppn; 1946 uint32_t offset; 1947 1948 vpn = (vpn_t)( (intptr_t)ptr >> CONFIG_PPM_PAGE_SHIFT ); 1949 offset = (uint32_t)( ((intptr_t)ptr) & CONFIG_PPM_PAGE_MASK ); 1950 1951 if( local_cxy == GET_CXY( process->ref_xp) ) // local process is reference process 1952 { 1953 error = vmm_get_pte( process, vpn , false , &attr , &ppn ); 1954 } 1955 else // calling process is not reference process 1956 { 1957 cxy_t ref_cxy = GET_CXY( process->ref_xp ); 1958 process_t * ref_ptr = GET_PTR( process->ref_xp ); 1959 rpc_vmm_get_pte_client( ref_cxy , ref_ptr , vpn , false , &attr , &ppn , &error ); 1960 } 1961 1962 // set paddr 1963 *paddr = (((paddr_t)ppn) << CONFIG_PPM_PAGE_SHIFT) | offset; 1964 1965 return error; 1966 1967 } // end vmm_v2p_translate() 1968 1969 */ 1918 vseg_t * vseg; // vseg containing vpn 1919 cxy_t ref_cxy; // reference cluster for missing vpn 1920 process_t * ref_ptr; // reference process for missing vpn 1921 xptr_t gpt_xp; // extended pointer on GPT 1922 xptr_t gpt_lock_xp; // extended pointer on GPT lock 1923 uint32_t old_attr; // current PTE_ATTR value 1924 ppn_t old_ppn; // current PTE_PPN value 1925 uint32_t new_attr; // new PTE_ATTR value 1926 ppn_t new_ppn; // new PTE_PPN value 1927 error_t error; 1928 1929 #if DEBUG_VMM_HANDLE_COW 1930 uint32_t cycle = (uint32_t)hal_get_cycles(); 1931 thread_t * this = CURRENT_THREAD; 1932 if( DEBUG_VMM_HANDLE_COW < cycle ) 1933 printk("\n[DBG] %s : thread[%x,%x] enter for vpn %x / core[%x,%d] / cycle %d\n", 1934 __FUNCTION__, process->pid, this->trdid, vpn, local_cxy, this->core->lid, cycle ); 1935 #endif 1936 1937 // get local vseg 1938 error = vmm_get_vseg( process, 1939 (intptr_t)vpn<<CONFIG_PPM_PAGE_SHIFT, 1940 &vseg ); 1941 1942 if( error ) 1943 { 1944 printk("\n[PANIC] in %s : VPN %x in process %x not in registered vseg\n", 1945 __FUNCTION__, vpn, process->pid ); 1946 1947 return EXCP_KERNEL_PANIC; 1948 } 1949 1950 // get reference GPT cluster and local pointer 1951 ref_cxy = GET_CXY( process->ref_xp ); 1952 ref_ptr = GET_PTR( process->ref_xp ); 1953 1954 // build relevant extended pointers on GPT and GPT lock 1955 // - access local GPT for a private vseg 1956 // - access reference GPT for a public vseg 1957 if( (vseg->type == VSEG_TYPE_STACK) || (vseg->type == VSEG_TYPE_CODE) ) 1958 { 1959 gpt_xp = XPTR( local_cxy , &process->vmm.gpt ); 1960 gpt_lock_xp = XPTR( local_cxy , &process->vmm.gpt_lock ); 1961 } 1962 else 1963 { 1964 gpt_xp = XPTR( ref_cxy , &ref_ptr->vmm.gpt ); 1965 gpt_lock_xp = XPTR( ref_cxy , &ref_ptr->vmm.gpt_lock ); 1966 } 1967 1968 // take GPT lock in write mode 1969 remote_rwlock_wr_acquire( gpt_lock_xp ); 1970 1971 // get current PTE from reference GPT 1972 hal_gpt_get_pte( gpt_xp, 1973 vpn, 1974 &old_attr, 1975 &old_ppn ); 1976 1977 // the PTE must be mapped for a COW 1978 if( (old_attr & GPT_MAPPED) == 0 ) 1979 { 1980 printk("\n[PANIC] in %s : VPN %x in process %x unmapped\n", 1981 __FUNCTION__, vpn, process->pid ); 1982 1983 // release GPT lock in write mode 1984 remote_rwlock_wr_acquire( gpt_lock_xp ); 1985 1986 return EXCP_KERNEL_PANIC; 1987 } 1988 1989 // get extended pointer, cluster and local pointer on physical page descriptor 1990 xptr_t page_xp = ppm_ppn2page( old_ppn ); 1991 cxy_t page_cxy = GET_CXY( page_xp ); 1992 page_t * page_ptr = GET_PTR( page_xp ); 1993 1994 // get extended pointers on forks and lock field in page descriptor 1995 xptr_t forks_xp = XPTR( page_cxy , &page_ptr->forks ); 1996 xptr_t forks_lock_xp = XPTR( page_cxy , &page_ptr->lock ); 1997 1998 // take lock protecting "forks" counter 1999 remote_busylock_acquire( forks_lock_xp ); 2000 2001 // get number of pending forks from page descriptor 2002 uint32_t forks = hal_remote_l32( forks_xp ); 2003 2004 if( forks ) // pending fork => allocate a new page, and copy old to new 2005 { 2006 // allocate a new physical page 2007 page_xp = vmm_page_allocate( vseg , vpn ); 2008 if( page_xp == XPTR_NULL ) 2009 { 2010 printk("\n[PANIC] in %s : no memory for vpn %x in process %x\n", 2011 __FUNCTION__ , vpn, process->pid ); 2012 2013 // release GPT lock in write mode 2014 remote_rwlock_wr_acquire( gpt_lock_xp ); 2015 2016 // release lock protecting "forks" counter 2017 remote_busylock_release( forks_lock_xp ); 2018 2019 return EXCP_KERNEL_PANIC; 2020 } 2021 2022 // compute allocated page PPN 2023 new_ppn = ppm_page2ppn( page_xp ); 2024 2025 // copy old page content to new page 2026 xptr_t old_base_xp = ppm_ppn2base( old_ppn ); 2027 xptr_t new_base_xp = ppm_ppn2base( new_ppn ); 2028 memcpy( GET_PTR( new_base_xp ), 2029 GET_PTR( old_base_xp ), 2030 CONFIG_PPM_PAGE_SIZE ); 2031 2032 // decrement pending forks counter in page descriptor 2033 hal_remote_atomic_add( forks_xp , -1 ); 2034 2035 #if(DEBUG_VMM_HANDLE_COW & 1) 2036 if( DEBUG_VMM_HANDLE_COW < cycle ) 2037 printk("\n[DBG] %s : thread[%x,%x] : pending forks => allocate a new PPN %x\n", 2038 __FUNCTION__, process->pid, this->trdid, new_ppn ); 2039 #endif 2040 2041 } 2042 else // no pending fork => keep the existing page 2043 { 2044 2045 #if(DEBUG_VMM_HANDLE_COW & 1) 2046 if( DEBUG_VMM_HANDLE_COW < cycle ) 2047 printk("\n[DBG] %s : thread[%x,%x] no pending forks => keep existing PPN %x\n", 2048 __FUNCTION__, process->pid, this->trdid, new_ppn ); 2049 #endif 2050 new_ppn = old_ppn; 2051 } 2052 2053 // release lock protecting "forks" counter 2054 remote_busylock_release( forks_lock_xp ); 2055 2056 // build new_attr : reset COW and set WRITABLE, 2057 new_attr = (old_attr | GPT_WRITABLE) & (~GPT_COW); 2058 2059 // update the relevan GPT 2060 // - private vseg => update local GPT 2061 // - public vseg => update all GPT copies 2062 if( (vseg->type == VSEG_TYPE_STACK) || (vseg->type == VSEG_TYPE_CODE) ) 2063 { 2064 hal_gpt_set_pte( gpt_xp, 2065 vpn, 2066 new_attr, 2067 new_ppn ); 2068 } 2069 else 2070 { 2071 if( ref_cxy == local_cxy ) // reference cluster is local 2072 { 2073 vmm_global_update_pte( process, 2074 vpn, 2075 new_attr, 2076 new_ppn ); 2077 } 2078 else // reference cluster is remote 2079 { 2080 rpc_vmm_global_update_pte_client( ref_cxy, 2081 ref_ptr, 2082 vpn, 2083 new_attr, 2084 new_ppn ); 2085 } 2086 } 2087 2088 // release GPT lock in write mode 2089 remote_rwlock_wr_release( gpt_lock_xp ); 2090 2091 #if DEBUG_VMM_HANDLE_COW 2092 cycle = (uint32_t)hal_get_cycles(); 2093 if( DEBUG_VMM_HANDLE_COW < cycle ) 2094 printk("\n[DBG] %s : thread[%x,%x] exit for vpn %x / core[%x,%d] / cycle %d\n", 2095 __FUNCTION__, process->pid, this->trdid, vpn, local_cxy, this->core->lid, cycle ); 2096 #endif 2097 2098 return EXCP_NON_FATAL; 2099 2100 } // end vmm_handle_cow() 2101 -
trunk/kernel/mm/vmm.h
r567 r585 89 89 /********************************************************************************************* 90 90 * This structure defines the Virtual Memory Manager for a given process in a given cluster. 91 * This local VMM provides four main services:91 * This local VMM implements four main services: 92 92 * 1) It contains the local copy of vseg list (VSL), only complete in referrence. 93 93 * 2) It contains the local copy of the generic page table (GPT), only complete in reference. … … 96 96 ******************************************************a************************************** 97 97 * Implementation notes: 98 * 1. The VSL contains only local vsegs, but it is implemented as an xlist, and protected by 98 * 1. In most clusters, the VSL and GPT are only partial copies of the reference VSL and GPT 99 * structures, stored in the reference cluster. 100 * 2. The VSL contains only local vsegs, but it is implemented as an xlist, and protected by 99 101 * a remote_rwlock, because it can be accessed by a thread running in a remote cluster. 100 102 * An exemple is the vmm_fork_copy() function. 101 * 2. In most clusters, the VSL and GPT are only partial copies of the reference VSL and GPT 102 * structures, stored in the reference cluster. 103 * 3. The GPT in the reference cluster can be directly accessed by remote threads to handle 104 * false page-fault (page is mapped in the reference GPT, but the PTE copy is missing 105 * in the local GPT). It is also protected by a remote_rwlock. 103 106 ********************************************************************************************/ 104 107 … … 106 109 { 107 110 remote_rwlock_t vsegs_lock; /*! lock protecting the local VSL */ 108 xlist_entry_t vsegs_root; /*! V SL root (VSL only complete in reference)*/111 xlist_entry_t vsegs_root; /*! Virtual Segment List (complete in reference) */ 109 112 uint32_t vsegs_nr; /*! total number of local vsegs */ 110 113 114 remote_rwlock_t gpt_lock; /*! lock protecting the local GPT */ 111 115 gpt_t gpt; /*! Generic Page Table (complete in reference) */ 112 116 … … 213 217 214 218 /********************************************************************************************* 215 * This global function modifies a GPT entry identified 219 * This global function modifies a GPT entry identified by the <process> and <vpn> 216 220 * arguments in all clusters containing a process copy. 217 221 * It must be called by a thread running in the reference cluster. … … 352 356 353 357 /********************************************************************************************* 354 * This function is called by the generic exception handler in case of page-fault ,355 * or copy-on-write event locally detected for a given <vpn> in a given <process>356 * as defined by the <is_cow> argument.357 * 1) For a Page-Fault:358 * - If the local cluster is the reference, or for the STACK and CODE segment types,359 * it call directly the vmm_get_pte() function to access the local VMM.360 * - Otherwise, it send a RPC_VMM_GET_PTE to the reference cluster to get the missing361 * PTE attributes and PPN.362 * This function check that the missing VPN belongs to a registered vseg, allocates363 * a new physical page if required, and updates the local page table.364 * 2) For a Copy-On-Write:365 * - If no pending fork, it reset the COW flag and set the WRITE flag in the reference366 * GPT entry, and in all the GPT copies.367 * - If there is a pending fork, it allocates a new physical page from the cluster defined368 * by the vseg type, copies the old physical page content to the new physical page,369 * and decrements the pending_fork counter in old physical page descriptor.358 * This function is called by the generic exception handler in case of page-fault event, 359 * detected for a given <vpn> in a given <process> in any cluster. 360 * It checks the missing VPN and returns an user error if it is not in a registered vseg. 361 * For a legal VPN, there is actually 3 cases: 362 * 1) if the missing VPN belongs to a private vseg (STACK or CODE segment types, non 363 * replicated in all clusters), it allocates a new physical page, computes the attributes, 364 * depending on vseg type, and updates directly the local GPT. 365 * 2) if the missing VPN belongs to a public vseg, it can be a false page-fault, when the VPN 366 * is mapped in the reference GPT, but not in the local GPT. For this false page-fault, 367 * the local GPT is simply updated from the reference GPT. 368 * 3) if the missing VPN is public, and unmapped in the reference GPT, it's a true page fault. 369 * The calling thread allocates a new physical page, computes the attributes, depending 370 * on vseg type, and updates directly (without RPC) the local GPT and the reference GPT. 371 * Other GPT copies will updated on demand. 372 * In the three cases, concurrent accesses to the GPT are handled, thanks to the 373 * remote_rwlock protecting each GPT copy. 370 374 ********************************************************************************************* 371 375 * @ process : pointer on local process descriptor copy. 372 * @ vpn : VPN of the missing or faulting PTE. 373 * @ is_cow : Copy-On-Write event if true / Page-fault if false. 374 * @ returns 0 if success / returns ENOMEM if no memory or illegal VPN. 376 * @ vpn : VPN of the missing PTE. 377 * @ returns EXCP_NON_FATAL / EXCP_USER_ERROR / EXCP_KERNEL_PANIC after analysis 375 378 ********************************************************************************************/ 376 379 error_t vmm_handle_page_fault( struct process_s * process, 377 vpn_t vpn ,378 bool_t is_cow ); 379 380 /********************************************************************************************* 381 * This function is called by the vmm_handle_page_fault() to handle both the "page-fault",382 * and the "copy-on_write" events for a given <vpn> in a given <process>, as defined383 * by the <is_cow> argument.384 * The vseg containing the searched VPN must be registered in the reference VMM.385 * - for an page-fault, it allocates the missing physical page from the target cluster386 * defined by the vseg type, initializes it, and updates the reference GPT, but not387 * the copies GPT, that will be updated on demand.388 * - for a copy-on-write, it allocates a new physical page from the target cluster,389 * initialise it from the old physical page, and updates the reference GPT and all390 * the GPT copies, for coherence.391 * It calls the RPC_PMEM_GET_PAGES to get the new physical page when the target cluster392 * is not the local cluster,393 * It returns in the <attr> and <ppn> arguments the accessed or modified PTE.394 * ********************************************************************************************395 * @ process : [in] pointer on processdescriptor.396 * @ vpn : [in] VPN defining the missing PTE.397 * @ is_cow : [in] "copy_on_write" if true / "page_fault" if false.398 * @ attr : [out] PTE attributes.399 * @ ppn : [out] PTE ppn.400 * @ returns 0 if success / returns ENOMEM if error.401 * *******************************************************************************************/402 error_t vmm_get_pte( struct process_s * process, 403 vpn_t vpn,404 bool_t is_cow,405 uint32_t * attr,406 ppn_t * ppn );380 vpn_t vpn ); 381 382 /********************************************************************************************* 383 * This function is called by the generic exception handler in case of copy-on-write event, 384 * detected for a given <vpn> in a given <process> in any cluster. 385 * It returns a kernel panic if VPN is not in a registered vseg or is not mapped. 386 * For a legal mapped vseg there is two cases: 387 * 1) If the missing VPN belongs to a private vseg (STACK or CODE segment types, non 388 * replicated in all clusters), it access the local GPT to get the current PPN and ATTR. 389 * It access the forks counter in the current physical page descriptor. 390 * If there is a pending fork, it allocates a new physical page from the cluster defined 391 * by the vseg type, copies the old physical page content to the new physical page, 392 * and decrements the pending_fork counter in old physical page descriptor. 393 * Finally, it reset the COW flag and set the WRITE flag in local GPT. 394 * 2) If the missing VPN is public, it access the reference GPT to get the current PPN and 395 * ATTR. It access the forks counter in the current physical page descriptor. 396 * If there is a pending fork, it allocates a new physical page from the cluster defined 397 * by the vseg type, copies the old physical page content to the new physical page, 398 * and decrements the pending_fork counter in old physical page descriptor. 399 * Finally it calls the vmm_global_update_pte() function to reset the COW flag and set 400 * the WRITE flag in all the GPT copies, using a RPC if the reference cluster is remote. 401 * In both cases, concurrent accesses to the GPT are handled, thanks to the 402 * remote_rwlock protecting each GPT copy. 403 ********************************************************************************************* 404 * @ process : pointer on local process descriptor copy. 405 * @ vpn : VPN of the faulting PTE. 406 * @ returns EXCP_NON_FATAL / EXCP_USER_ERROR / EXCP_KERNEL_PANIC after analysis 407 ********************************************************************************************/ 408 error_t vmm_handle_cow( struct process_s * process, 409 vpn_t vpn ); 407 410 408 411 /*********************************************************************************************
Note: See TracChangeset
for help on using the changeset viewer.