Changeset 469 for trunk/kernel/mm
- Timestamp:
- Aug 20, 2018, 1:04:16 PM (6 years ago)
- Location:
- trunk/kernel/mm
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/mm/page.h
r457 r469 56 56 * This structure defines a physical page descriptor. 57 57 * Size is 64 bytes for a 32 bits core... 58 * The spinlock is used to test/modify the forks counter. 58 59 * TODO : the list of waiting threads seems to be unused [AG] 59 $ TODO : the spinlock use has to be clarified [AG]60 * TODO : the refcount use has to be clarified 60 61 ************************************************************************************/ 61 62 … … 70 71 uint32_t refcount; /*! reference counter (4) */ 71 72 uint32_t forks; /*! number of pending forks (4) */ 72 spinlock_t lock; /*! To Be Defined [AG] (16)*/73 spinlock_t lock; /*! protect the forks field (4) */ 73 74 } 74 75 page_t; -
trunk/kernel/mm/vmm.c
r457 r469 411 411 page_t * page_ptr; 412 412 xptr_t forks_xp; 413 xptr_t lock_xp; 413 414 414 415 // update flags in remote GPT … … 433 434 if( attr & GPT_MAPPED ) 434 435 { 436 // get pointers and cluster on page descriptor 435 437 page_xp = ppm_ppn2page( ppn ); 436 438 page_cxy = GET_CXY( page_xp ); 437 439 page_ptr = GET_PTR( page_xp ); 440 441 // get extended pointers on "forks" and "lock" 438 442 forks_xp = XPTR( page_cxy , &page_ptr->forks ); 443 lock_xp = XPTR( page_cxy , &page_ptr->lock ); 444 445 // increment "forks" 446 remote_spinlock_lock( lock_xp ); 439 447 hal_remote_atomic_add( forks_xp , 1 ); 448 remote_spinlock_unlock( lock_xp ); 440 449 } 441 450 } // end loop on vpn … … 473 482 vpn_t vpn_base; 474 483 vpn_t vpn_size; 475 xptr_t page_xp; 484 xptr_t page_xp; // extended pointer on page descriptor 476 485 page_t * page_ptr; 477 486 cxy_t page_cxy; 487 xptr_t forks_xp; // extended pointer on forks counter in page descriptor 488 xptr_t lock_xp; // extended pointer on lock protecting the forks counter 478 489 xptr_t parent_root_xp; 479 490 bool_t mapped; … … 592 603 if( mapped ) 593 604 { 594 page_xp = ppm_ppn2page( ppn ); 605 // get pointers and cluster on page descriptor 606 page_xp = ppm_ppn2page( ppn ); 595 607 page_cxy = GET_CXY( page_xp ); 596 608 page_ptr = GET_PTR( page_xp ); 597 hal_remote_atomic_add( XPTR( page_cxy , &page_ptr->forks ) , 1 ); 609 610 // get extended pointers on "forks" and "lock" 611 forks_xp = XPTR( page_cxy , &page_ptr->forks ); 612 lock_xp = XPTR( page_cxy , &page_ptr->lock ); 613 614 // increment "forks" 615 remote_spinlock_lock( lock_xp ); 616 hal_remote_atomic_add( forks_xp , 1 ); 617 remote_spinlock_unlock( lock_xp ); 598 618 599 619 #if DEBUG_VMM_FORK_COPY … … 603 623 __FUNCTION__ , CURRENT_THREAD , vpn , cycle ); 604 624 #endif 605 606 625 } 607 626 } … … 670 689 if( DEBUG_VMM_DESTROY < cycle ) 671 690 printk("\n[DBG] %s : thread %x enter for process %x in cluster %x / cycle %d\n", 672 __FUNCTION__ , CURRENT_THREAD , process->pid , local_cxy, cycle );691 __FUNCTION__, CURRENT_THREAD->trdid, process->pid, local_cxy, cycle ); 673 692 #endif 674 693 … … 695 714 vseg = GET_PTR( vseg_xp ); 696 715 697 #if( DEBUG_VMM_DESTROY & 1 )698 if( DEBUG_VMM_DESTROY < cycle )699 printk("\n[DBG] %s : found %s vseg / vpn_base %x / vpn_size %d\n",700 __FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size );701 #endif702 716 // unmap and release physical pages 703 717 vmm_unmap_vseg( process , vseg ); … … 751 765 if( DEBUG_VMM_DESTROY < cycle ) 752 766 printk("\n[DBG] %s : thread %x exit for process %x in cluster %x / cycle %d\n", 753 __FUNCTION__ , CURRENT_THREAD , process->pid, local_cxy , cycle );767 __FUNCTION__, CURRENT_THREAD->trdid, process->pid, local_cxy , cycle ); 754 768 #endif 755 769 … … 1069 1083 page_t * page_ptr; // page descriptor pointer 1070 1084 xptr_t forks_xp; // extended pointer on pending forks counter 1071 uint32_t count; // actual number of pendinf forks 1085 xptr_t lock_xp; // extended pointer on lock protecting forks counter 1086 uint32_t forks; // actual number of pendinf forks 1072 1087 1073 1088 #if DEBUG_VMM_UNMAP_VSEG … … 1115 1130 page_ptr = GET_PTR( page_xp ); 1116 1131 1117 // FIXME lock the physical page 1132 // get extended pointers on forks and lock fields 1133 forks_xp = XPTR( page_cxy , &page_ptr->forks ); 1134 lock_xp = XPTR( page_cxy , &page_ptr->lock ); 1135 1136 // get lock protecting page descriptor 1137 remote_spinlock_lock( lock_xp ); 1118 1138 1119 1139 // get pending forks counter 1120 count = hal_remote_lw( XPTR( page_cxy , &page_ptr->forks ));1140 forks = hal_remote_lw( forks_xp ); 1121 1141 1122 if( count) // decrement pending forks counter1142 if( forks ) // decrement pending forks counter 1123 1143 { 1124 forks_xp = XPTR( page_cxy , &page_ptr->forks );1125 1144 hal_remote_atomic_add( forks_xp , -1 ); 1126 1145 } … … 1139 1158 } 1140 1159 1141 // FIXME unlock the physical page 1160 // release lock protecting page descriptor 1161 remote_spinlock_unlock( lock_xp ); 1142 1162 } 1143 1163 } … … 1418 1438 #if DEBUG_VMM_GET_ONE_PPN 1419 1439 thread_t * this = CURRENT_THREAD; 1420 // if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() ) 1421 if( (vpn == 0x403) && ((local_cxy == 0) || (this->type == THREAD_RPC)) ) 1440 if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() ) 1422 1441 printk("\n[DBG] %s : thread %x enter for vpn = %x / type = %s / index = %d\n", 1423 1442 __FUNCTION__, this, vpn, vseg_type_str(type), index ); … … 1482 1501 1483 1502 #if (DEBUG_VMM_GET_ONE_PPN & 0x1) 1484 if( (vpn == 0x403) && ((local_cxy == 0) || (this->type == THREAD_RPC)) ) 1485 // if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() ) 1503 if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() ) 1486 1504 printk("\n[DBG] %s : thread %x for vpn = %x / elf_offset = %x\n", 1487 1505 __FUNCTION__, this, vpn, elf_offset ); … … 1499 1517 1500 1518 #if (DEBUG_VMM_GET_ONE_PPN & 0x1) 1501 // if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() ) 1502 if( (vpn == 0x403) && ((local_cxy == 0) || (this->type == THREAD_RPC)) ) 1519 if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() ) 1503 1520 printk("\n[DBG] %s : thread%x for vpn = %x / fully in BSS\n", 1504 1521 __FUNCTION__, this, vpn ); … … 1519 1536 1520 1537 #if (DEBUG_VMM_GET_ONE_PPN & 0x1) 1521 // if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() ) 1522 if( (vpn == 0x403) && ((local_cxy == 0) || (this->type == THREAD_RPC)) ) 1538 if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() ) 1523 1539 printk("\n[DBG] %s : thread %x, for vpn = %x / fully in mapper\n", 1524 1540 __FUNCTION__, this, vpn ); … … 1551 1567 1552 1568 #if (DEBUG_VMM_GET_ONE_PPN & 0x1) 1553 // if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() ) 1554 if( (vpn == 0x403) && ((local_cxy == 0) || (this->type == THREAD_RPC)) ) 1569 if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() ) 1555 1570 printk("\n[DBG] %s : thread %x for vpn = %x / both mapper & BSS\n" 1556 1571 " %d bytes from mapper / %d bytes from BSS\n", … … 1599 1614 1600 1615 #if DEBUG_VMM_GET_ONE_PPN 1601 // if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() ) 1602 if( (vpn == 0x403) && ((local_cxy == 0) || (this->type == THREAD_RPC)) ) 1616 if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() ) 1603 1617 printk("\n[DBG] %s : thread %x exit for vpn = %x / ppn = %x\n", 1604 1618 __FUNCTION__ , this , vpn , *ppn ); … … 1628 1642 #if DEBUG_VMM_GET_PTE 1629 1643 uint32_t cycle = (uint32_t)hal_get_cycles(); 1630 // if( DEBUG_VMM_GET_PTE < cycle ) 1631 if( (vpn == 0x403) && ((local_cxy == 0) || (this->type == THREAD_RPC)) ) 1632 printk("\n[DBG] %s : thread %x enter / vpn %x / process %x / cow %d / cycle %d\n", 1633 __FUNCTION__ , this , vpn , process->pid , cow , cycle ); 1644 if( DEBUG_VMM_GET_PTE < cycle ) 1645 printk("\n[DBG] %s : thread %x in process %x enter / vpn %x / cow %d / cycle %d\n", 1646 __FUNCTION__, this->trdid, process->pid, vpn, cow, cycle ); 1634 1647 #endif 1635 1648 … … 1644 1657 // vseg has been checked by the vmm_handle_page_fault() function 1645 1658 assert( (vseg != NULL) , __FUNCTION__, 1646 "vseg undefined / vpn %x / thread %x /process %x / core[%x,%d] / cycle %d\n",1647 vpn, this , process->pid, local_cxy, this->core->lid,1659 "vseg undefined / vpn %x / thread %x in process %x / core[%x,%d] / cycle %d\n", 1660 vpn, this->trdid, process->pid, local_cxy, this->core->lid, 1648 1661 (uint32_t)hal_get_cycles() ); 1649 1662 … … 1658 1671 1659 1672 assert( (old_attr & GPT_MAPPED), __FUNCTION__, 1660 "PTE unmapped for a COW exception / vpn %x / thread %x /process %x / cycle %d\n",1673 "PTE unmapped for a COW exception / vpn %x / thread %x in process %x / cycle %d\n", 1661 1674 vpn, this, process->pid, (uint32_t)hal_get_cycles() ); 1662 1675 1663 1676 #if( DEBUG_VMM_GET_PTE & 1 ) 1664 // if( DEBUG_VMM_GET_PTE < cycle ) 1665 if( (vpn == 0x403) && ((local_cxy == 0) || (this->type == THREAD_RPC)) ) 1666 printk("\n[DBG] %s : thread %x handling COW for vpn %x in process %x\n", 1667 __FUNCTION__, this, vpn, process->pid ); 1677 if( DEBUG_VMM_GET_PTE < cycle ) 1678 printk("\n[DBG] %s : thread %x in process %x handling COW for vpn %x\n", 1679 __FUNCTION__, this->trdid, process->pid, vpn ); 1668 1680 #endif 1669 1681 … … 1673 1685 page_t * page_ptr = GET_PTR( page_xp ); 1674 1686 1687 // get extended pointers on forks and lock field in page descriptor 1688 xptr_t forks_xp = XPTR( page_cxy , &page_ptr->forks ); 1689 xptr_t lock_xp = XPTR( page_cxy , &page_ptr->lock ); 1690 1691 // take lock protecting page descriptor 1692 remote_spinlock_lock( lock_xp ); 1693 1675 1694 // get number of pending forks in page descriptor 1676 uint32_t forks = hal_remote_lw( XPTR( page_cxy , &page_ptr->forks ));1695 uint32_t forks = hal_remote_lw( forks_xp ); 1677 1696 1678 1697 if( forks ) // pending fork => allocate a new page, copy old to new … … 1696 1715 GET_PTR( old_base_xp ), 1697 1716 CONFIG_PPM_PAGE_SIZE ); 1717 1718 // decrement pending forks counter in page descriptor 1719 hal_remote_atomic_add( forks_xp , -1 ); 1698 1720 } 1699 else // no pending fork => keep the existing page , reset COW1721 else // no pending fork => keep the existing page 1700 1722 { 1701 1723 new_ppn = old_ppn; 1702 1724 } 1703 1725 1726 // release lock protecting page descriptor 1727 remote_spinlock_unlock( lock_xp ); 1728 1704 1729 // build new_attr : reset COW and set WRITABLE, 1705 1730 new_attr = (old_attr | GPT_WRITABLE) & (~GPT_COW); … … 1707 1732 // update GPT[vpn] for all GPT copies 1708 1733 vmm_global_update_pte( process, vpn, new_attr, new_ppn ); 1709 1710 // decrement pending forks counter in page descriptor1711 hal_remote_atomic_add( XPTR( page_cxy , &page_ptr->forks ) , -1 );1712 1734 } 1713 1735 else //////////// page_fault request /////////////////////////// … … 1724 1746 1725 1747 #if( DEBUG_VMM_GET_PTE & 1 ) 1726 // if( DEBUG_VMM_GET_PTE < cycle ) 1727 if( (vpn == 0x403) && ((local_cxy == 0) || (this->type == THREAD_RPC)) ) 1728 printk("\n[DBG] %s : thread %x handling page fault for vpn %x in process %x\n", 1729 __FUNCTION__, this, vpn, process->pid ); 1748 if( DEBUG_VMM_GET_PTE < cycle ) 1749 printk("\n[DBG] %s : thread %x in process %x handling page fault for vpn %x\n", 1750 __FUNCTION__, this->trdid, process->pid, vpn ); 1730 1751 #endif 1731 1752 // allocate new_ppn, and initialize the new page … … 1767 1788 #if DEBUG_VMM_GET_PTE 1768 1789 cycle = (uint32_t)hal_get_cycles(); 1769 // if( DEBUG_VMM_GET_PTE < cycle ) 1770 if( (vpn == 0x403) && (local_cxy == 0) ) 1771 printk("\n[DBG] %s : thread %x exit / vpn %x in process %x / ppn %x / attr %x / cycle %d\n", 1772 __FUNCTION__, this, vpn, process->pid, new_ppn, new_attr, cycle ); 1790 if( DEBUG_VMM_GET_PTE < cycle ) 1791 printk("\n[DBG] %s : thread %x in process %x exit / vpn %x / ppn %x / attr %x / cycle %d\n", 1792 __FUNCTION__, this->trdid, process->pid, vpn, new_ppn, new_attr, cycle ); 1773 1793 #endif 1774 1794 … … 1797 1817 #if DEBUG_VMM_HANDLE_PAGE_FAULT 1798 1818 uint32_t cycle = (uint32_t)hal_get_cycles(); 1799 // if( DEBUG_VMM_HANDLE_PAGE_FAULT < cycle ) 1800 if( (vpn == 0x403) && (local_cxy == 0) ) 1819 if( DEBUG_VMM_HANDLE_PAGE_FAULT < cycle ) 1801 1820 printk("\n[DBG] %s : thread %x in process %x enter for vpn %x / core[%x,%d] / cycle %d\n", 1802 1821 __FUNCTION__, this, process->pid, vpn, local_cxy, this->core->lid, cycle ); … … 1836 1855 1837 1856 #if DEBUG_VMM_HANDLE_PAGE_FAULT 1838 // if( DEBUG_VMM_HANDLE_PAGE_FAULT < cycle ) 1839 if( (vpn == 0x403) && (local_cxy == 0) ) 1857 if( DEBUG_VMM_HANDLE_PAGE_FAULT < cycle ) 1840 1858 printk("\n[DBG] %s : thread %x in process %x call RPC_VMM_GET_PTE\n", 1841 1859 __FUNCTION__, this, process->pid ); … … 1870 1888 #if DEBUG_VMM_HANDLE_PAGE_FAULT 1871 1889 cycle = (uint32_t)hal_get_cycles(); 1872 // if( DEBUG_VMM_HANDLE_PAGE_FAULT < cycle ) 1873 if( (vpn == 0x403) && (local_cxy == 0) ) 1890 if( DEBUG_VMM_HANDLE_PAGE_FAULT < cycle ) 1874 1891 printk("\n[DBG] %s : thread %x in process %x exit for vpn %x / core[%x,%d] / cycle %d\n", 1875 1892 __FUNCTION__, this, process->pid, vpn, local_cxy, this->core->lid, cycle ); -
trunk/kernel/mm/vmm.h
r457 r469 166 166 * valid GPT entries in parent GPT are copied to the child GPT. The COW flag is not set. 167 167 * - no STACK vseg is copied from parent VMM to child VMM, because the child STACK vseg 168 * must be copied from the cluster containing the user thread requesting the fork().168 * must be copied later from the cluster containing the user thread requesting the fork(). 169 169 ********************************************************************************************* 170 170 * @ child_process : local pointer on local child process descriptor.
Note: See TracChangeset
for help on using the changeset viewer.