Changeset 440 for trunk/kernel/mm


Ignore:
Timestamp:
May 3, 2018, 5:51:22 PM (7 years ago)
Author:
alain
Message:

1/ Fix a bug in the Multithreaded "sort" applicationr:
The pthread_create() arguments must be declared as global variables.
2/ The exit syscall can be called by any thread of a process..

Location:
trunk/kernel/mm
Files:
6 edited

Legend:

Unmodified
Added
Removed
  • trunk/kernel/mm/mapper.c

    r438 r440  
    33 *
    44 * Authors   Mohamed Lamine Karaoui (2015)
    5  *           Alain Greiner (2016)
     5 *           Alain Greiner (2016,2017,2018)
    66 *
    77 * Copyright (c)  UPMC Sorbonne Universites
  • trunk/kernel/mm/mapper.h

    r407 r440  
    33 *
    44 * Authors   Mohamed Lamine Karaoui (2015)
    5  *           Alain Greiner (2016)
     5 *           Alain Greiner (2016,2017,2018)
    66 *
    77 * Copyright (c)  UPMC Sorbonne Universites
  • trunk/kernel/mm/vmm.c

    r438 r440  
    198198                  bool_t      mapping )
    199199{
    200     assert( (process->ref_xp == XPTR( local_cxy , process )) , __FUNCTION__,
    201     "this function must be executed in reference cluster" );
    202 
    203200    vmm_t * vmm = &process->vmm;
    204201    gpt_t * gpt = &vmm->gpt;
    205202
    206     printk("\n***** VSL and GPT for process %x\n\n",
    207     process->pid );
     203    printk("\n***** VSL and GPT for process %x in cluster %x\n\n",
     204    process->pid , local_cxy );
    208205
    209206    // get lock protecting the vseg list
     
    10361033}  // end vmm_remove_vseg()
    10371034
    1038 //////////////////////////////////////////////
    1039 error_t vmm_map_kernel_vseg( vseg_t    * vseg,
    1040                              uint32_t    attr )
    1041 {
    1042     vpn_t       vpn;        // VPN of PTE to be set
    1043     vpn_t       vpn_min;    // VPN of first PTE to be set
    1044     vpn_t       vpn_max;    // VPN of last PTE to be set (excluded)
    1045         ppn_t       ppn;        // PPN of allocated physical page
    1046         uint32_t    order;      // ln( number of small pages for one single PTE )
    1047         page_t    * page;
    1048     error_t     error;
    1049 
    1050     // check vseg type : must be a kernel vseg
    1051     uint32_t type = vseg->type;
    1052     assert( ((type==VSEG_TYPE_KCODE) || (type==VSEG_TYPE_KDATA) || (type==VSEG_TYPE_KDEV)),
    1053             __FUNCTION__ , "not a kernel vseg\n" );
    1054 
    1055     // get pointer on page table
    1056     gpt_t * gpt = &process_zero.vmm.gpt;
    1057 
    1058     // define number of small pages per PTE
    1059         if( attr & GPT_SMALL ) order = 0;   // 1 small page
    1060         else                   order = 9;   // 512 small pages
    1061 
    1062     // loop on pages in vseg
    1063     vpn_min = vseg->vpn_base;
    1064     vpn_max = vpn_min + vseg->vpn_size;
    1065         for( vpn = vpn_min ; vpn < vpn_max ; vpn++ )
    1066         {
    1067         // allocate a physical page from local PPM
    1068             kmem_req_t req;
    1069             req.type  = KMEM_PAGE;
    1070             req.size  = order;
    1071             req.flags = AF_KERNEL | AF_ZERO;
    1072             page      = (page_t *)kmem_alloc( &req );
    1073                 if( page == NULL )
    1074         {
    1075             printk("\n[ERROR] in %s : cannot allocate physical memory\n", __FUNCTION__ );
    1076             return ENOMEM;
    1077         }
    1078 
    1079         // set page table entry
    1080         ppn = ppm_page2ppn( XPTR( local_cxy , page ) );
    1081         error = hal_gpt_set_pte( gpt,
    1082                                  vpn,
    1083                                  attr,
    1084                                  ppn );
    1085                 if( error )
    1086         {
    1087             printk("\n[ERROR] in %s : cannot register PPE\n", __FUNCTION__ );
    1088             return ENOMEM;
    1089         }
    1090         }
    1091 
    1092         return 0;
    1093 
    1094 }  // end vmm_map_kernel_vseg()
    1095 
    10961035/////////////////////////////////////////
    10971036void vmm_unmap_vseg( process_t * process,
     
    11931132
    11941133//////////////////////////////////////////////////////////////////////////////////////////
    1195 // This low-level static function is called by the vmm_get_vseg() and vmm_resize_vseg()
    1196 // functions.  It scan the list of registered vsegs to find the unique vseg containing
    1197 // a given virtual address.
     1134// This low-level static function is called by the vmm_get_vseg(), vmm_get_pte(),
     1135// and vmm_resize_vseg() functions.  It scan the local VSL to find the unique vseg
     1136// containing a given virtual address.
    11981137//////////////////////////////////////////////////////////////////////////////////////////
    11991138// @ vmm     : pointer on the process VMM.
     
    13311270                       vseg_t   ** found_vseg )
    13321271{
    1333     vmm_t  * vmm = &process->vmm;
    1334 
    1335     // get vseg from vaddr
    1336     vseg_t * vseg = vseg_from_vaddr( vmm , vaddr );
     1272    xptr_t   vseg_xp;
     1273    error_t  error;
     1274    vseg_t * vseg;
     1275    vmm_t  * vmm;
     1276
     1277    // get pointer on local VMM
     1278    vmm = &process->vmm;
     1279
     1280    // try to get vseg from local VMM
     1281    vseg = vseg_from_vaddr( vmm , vaddr );
    13371282
    13381283    if( vseg == NULL )   // vseg not found in local cluster => try to get it from ref
     
    13481293
    13491294        // get extended pointer on reference vseg
    1350         xptr_t   vseg_xp;
    1351         error_t  error;
    1352 
    13531295        rpc_vmm_get_vseg_client( ref_cxy , ref_ptr , vaddr , &vseg_xp , &error );
    13541296           
    1355         if( error )   return -1;       // vseg not found => illegal user vaddr
     1297        if( error )   return -1;                // vseg not found => illegal user vaddr
    13561298       
    13571299        // allocate a vseg in local cluster
    13581300        vseg = vseg_alloc();
    13591301
    1360         if( vseg == NULL ) return -1;
     1302        if( vseg == NULL ) return -1;           // cannot allocate a local vseg
    13611303
    13621304        // initialise local vseg from reference
     
    14961438
    14971439        // initialise missing page from .elf file mapper for DATA and CODE types
    1498         // (the vseg->mapper_xp field is an extended pointer on the .elf file mapper)
     1440        // the vseg->mapper_xp field is an extended pointer on the .elf file mapper
    14991441        if( (type == VSEG_TYPE_CODE) || (type == VSEG_TYPE_DATA) )
    15001442        {
     
    15211463#endif
    15221464
     1465
    15231466            // compute extended pointer on page base
    15241467            xptr_t base_xp  = ppm_page2base( page_xp );
     
    15351478__FUNCTION__, CURRENT_THREAD, vpn );
    15361479#endif
     1480
    15371481
    15381482                if( GET_CXY( page_xp ) == local_cxy )
     
    15531497__FUNCTION__, CURRENT_THREAD, vpn );
    15541498#endif
    1555 
    15561499                if( mapper_cxy == local_cxy )
    15571500                {
     
    16441587                     ppn_t     * ppn )
    16451588{
    1646     vseg_t  * vseg;       // vseg containing VPN
    1647     ppn_t     old_ppn;    // current PTE_PPN
    1648     uint32_t  old_attr;   // current PTE_ATTR
    1649     ppn_t     new_ppn;    // new PTE_PPN
    1650     uint32_t  new_attr;   // new PTE_ATTR
    1651     error_t   error;
    1652 
    1653     // this function must be called by a thread running in the reference cluster
    1654     assert( (GET_CXY( process->ref_xp ) == local_cxy ) , __FUNCTION__ ,
    1655     "not called in the reference cluster\n" );
     1589    ppn_t      old_ppn;    // current PTE_PPN
     1590    uint32_t   old_attr;   // current PTE_ATTR
     1591    ppn_t      new_ppn;    // new PTE_PPN
     1592    uint32_t   new_attr;   // new PTE_ATTR
     1593    vmm_t    * vmm;
     1594    vseg_t   * vseg;     
     1595    error_t    error;
    16561596
    16571597#if DEBUG_VMM_GET_PTE
     
    16631603
    16641604    // get VMM pointer
    1665     vmm_t * vmm = &process->vmm;
    1666 
    1667     // get vseg pointer from reference VSL
    1668     error = vmm_get_vseg( process , vpn<<CONFIG_PPM_PAGE_SHIFT , &vseg );
    1669 
    1670     if( error )
    1671     {
    1672         printk("\n[ERROR] in %s : out of segment / process = %x / vpn = %x\n",
    1673         __FUNCTION__ , process->pid , vpn );
    1674         return error;
    1675     }
    1676 
    1677 #if( DEBUG_VMM_GET_PTE & 1 )
    1678 cycle = (uint32_t)hal_get_cycles();
    1679 if( DEBUG_VMM_GET_PTE < cycle )
    1680 printk("\n[DBG] %s : thread %x found vseg %s / vpn_base = %x / vpn_size = %x\n",
    1681 __FUNCTION__, CURRENT_THREAD, vseg_type_str(vseg->type), vseg->vpn_base, vseg->vpn_size );
    1682 #endif
     1605    vmm = &process->vmm;
     1606
     1607    // get local vseg descriptor
     1608    error =  vmm_get_vseg( process,
     1609                           ((intptr_t)vpn << CONFIG_PPM_PAGE_SHIFT),
     1610                           &vseg );
     1611
     1612    // vseg has been checked by the vmm_handle_page_fault() function
     1613    assert( (vseg != NULL) , __FUNCTION__,
     1614    "vseg undefined / vpn %x / thread %x / process %x / core[%x,%d] / cycle %d\n",
     1615    vpn, CURRENT_THREAD, process->pid, local_cxy, CURRENT_THREAD->core->lid,
     1616    (uint32_t)hal_get_cycles() );
    16831617
    16841618    if( cow )  //////////////// copy_on_write request //////////////////////
    1685                // get PTE from reference GPT
     1619               // get PTE from local GPT
    16861620               // allocate a new physical page if there is pending forks,
    16871621               // initialize it from old physical page content,
    16881622               // update PTE in all GPT copies,
    16891623    {
    1690         // access GPT to get current PTE attributes and PPN
     1624        // access local GPT to get current PTE attributes and PPN
    16911625        hal_gpt_get_pte( &vmm->gpt , vpn , &old_attr , &old_ppn );
    16921626
    1693         assert( (old_attr & GPT_MAPPED) , __FUNCTION__ ,
    1694         "PTE must be mapped for a copy-on-write exception\n" );
     1627        assert( (old_attr & GPT_MAPPED), __FUNCTION__,
     1628        "PTE unmapped for a COW exception / vpn %x / thread %x / process %x / cycle %d\n",
     1629        vpn, CURRENT_THREAD, process->pid, (uint32_t)hal_get_cycles() );
    16951630
    16961631#if( DEBUG_VMM_GET_PTE & 1 )
    1697 cycle = (uint32_t)hal_get_cycles();
    16981632if( DEBUG_VMM_GET_PTE < cycle )
    16991633printk("\n[DBG] %s : thread %x handling COW for vpn %x in process %x\n",
     
    17451679    }
    17461680    else        //////////// page_fault request ///////////////////////////
    1747                 // get PTE from reference GPT
     1681                // get PTE from local GPT
    17481682                // allocate a physical page if it is a true page fault,
     1683                // initialize it if type is FILE, CODE, or DATA,
    17491684                // register in reference GPT, but don't update GPT copies
    17501685    { 
    1751         // access GPT to get current PTE
     1686        // access local GPT to get current PTE
    17521687        hal_gpt_get_pte( &vmm->gpt , vpn , &old_attr , &old_ppn );
    17531688
     
    17561691
    17571692#if( DEBUG_VMM_GET_PTE & 1 )
    1758 cycle = (uint32_t)hal_get_cycles();
    17591693if( DEBUG_VMM_GET_PTE < cycle )
    17601694printk("\n[DBG] %s : thread %x handling page fault for vpn %x in process %x\n",
    17611695__FUNCTION__, CURRENT_THREAD, vpn, process->pid );
    17621696#endif
    1763 
    1764             // allocate new_ppn, depending on vseg type
     1697            // allocate new_ppn, and initialize the new page
    17651698            error = vmm_get_one_ppn( vseg , vpn , &new_ppn );
    17661699            if( error )
     
    18011734cycle = (uint32_t)hal_get_cycles();
    18021735if( DEBUG_VMM_GET_PTE < cycle )
    1803 printk("\n[DBG] %s : thread,%x exit / vpn %x in process %x / ppn %x / attr %x / cycle %d\n",
     1736printk("\n[DBG] %s : thread %x exit / vpn %x in process %x / ppn %x / attr %x / cycle %d\n",
    18041737__FUNCTION__, CURRENT_THREAD, vpn, process->pid, new_ppn, new_attr, cycle );
    18051738#endif
     
    18141747///////////////////////////////////////////////////
    18151748error_t vmm_handle_page_fault( process_t * process,
    1816                                vpn_t       vpn )
     1749                               vpn_t       vpn,
     1750                               bool_t      is_cow )
    18171751{
    18181752    uint32_t         attr;          // missing page attributes
    18191753    ppn_t            ppn;           // missing page PPN
     1754    vseg_t         * vseg;          // vseg containing vpn
     1755    uint32_t         type;          // vseg type
     1756    cxy_t            ref_cxy;       // reference cluster for missing vpn
     1757    process_t      * ref_ptr;       // reference process for missing vpn
    18201758    error_t          error;
    18211759
    1822 #if DEBUG_VMM_GET_PTE
     1760    thread_t       * this = CURRENT_THREAD;
     1761
     1762#if DEBUG_VMM_HANDLE_PAGE_FAULT
    18231763uint32_t cycle = (uint32_t)hal_get_cycles();
    1824 if( DEBUG_VMM_GET_PTE < cycle )
    1825 printk("\n[DBG] %s : thread %x enter for vpn %x / process %x / cycle %d\n",
    1826 __FUNCTION__ , CURRENT_THREAD , vpn , process->pid , cycle );
    1827 #endif
     1764if( DEBUG_VMM_HANDLE_PAGE_FAULT < cycle )
     1765printk("\n[DBG] %s : thread %x enter for vpn %x / process %x / core[%x,%d] / cycle %d\n",
     1766__FUNCTION__, this, vpn, process->pid, local_cxy, this->core->lid, cycle );
     1767#endif
     1768
     1769    // get local vseg (access reference VSL if required)
     1770    error = vmm_get_vseg( process , vpn<<CONFIG_PPM_PAGE_SHIFT , &vseg );
     1771
     1772    if( error )
     1773    {
     1774        printk("\n[ERROR] in %s : vpn %x / process %x / thread %x / core[%x,%d] / cycle %d\n",
     1775        __FUNCTION__, vpn, process->pid, this->trdid, local_cxy, this->core->lid,
     1776        (uint32_t)hal_get_cycles() );
     1777        return error;
     1778    }
     1779
     1780    // get segment type
     1781    type = vseg->type;
    18281782
    18291783    // get reference process cluster and local pointer
    1830     cxy_t       ref_cxy = GET_CXY( process->ref_xp );
    1831     process_t * ref_ptr = GET_PTR( process->ref_xp );
    1832 
    1833     // get missing PTE attributes and PPN from reference cluster
     1784    // for private vsegs (CODE and DATA type),
     1785    // the reference is the local process descriptor.
     1786    if( (type == VSEG_TYPE_STACK) || (type == VSEG_TYPE_CODE) )
     1787    {
     1788        ref_cxy = local_cxy;
     1789        ref_ptr = process;
     1790    }
     1791    else
     1792    {
     1793        ref_cxy = GET_CXY( process->ref_xp );
     1794        ref_ptr = GET_PTR( process->ref_xp );
     1795    }
     1796
     1797    // get missing PTE attributes and PPN
    18341798    if( local_cxy != ref_cxy ) 
    18351799    {
     
    18371801                                ref_ptr,
    18381802                                vpn,
    1839                                 false,    // page_fault
     1803                                is_cow,
    18401804                                &attr,
    18411805                                &ppn,
     
    18551819        error = vmm_get_pte( process,
    18561820                             vpn,
    1857                              false,      // page-fault
     1821                             is_cow,
    18581822                             &attr,
    18591823                             &ppn );
    18601824    }
    18611825
    1862 #if DEBUG_VMM_GET_PTE
     1826#if DEBUG_VMM_HANDLE_PAGE_FAULT
    18631827cycle = (uint32_t)hal_get_cycles();
    1864 if( DEBUG_VMM_GET_PTE < cycle )
     1828if( DEBUG_VMM_HANDLE_PAGE_FAULT < cycle )
    18651829printk("\n[DBG] %s : thread %x exit for vpn %x / process %x / cycle %d\n",
    1866 __FUNCTION__ , CURRENT_THREAD , vpn , process->pid , cycle );
     1830__FUNCTION__, this->trdid, vpn, process->pid, cycle );
    18671831#endif
    18681832
     
    18711835}  // end vmm_handle_page_fault()
    18721836
    1873 ////////////////////////////////////////////
    1874 error_t vmm_handle_cow( process_t * process,
    1875                         vpn_t       vpn )
    1876 {
    1877     uint32_t         attr;          // page attributes
    1878     ppn_t            ppn;           // page PPN
    1879     error_t          error;
    1880 
    1881 #if DEBUG_VMM_GET_PTE
    1882 uint32_t cycle = (uint32_t)hal_get_cycles();
    1883 if( DEBUG_VMM_GET_PTE < cycle )
    1884 printk("\n[DBG] %s : thread %x enter for vpn %x / process %x / cycle %d\n",
    1885 __FUNCTION__ , CURRENT_THREAD , vpn , process->pid , cycle );
    1886 #endif
    1887    
    1888     // get reference process cluster and local pointer
    1889     cxy_t       ref_cxy = GET_CXY( process->ref_xp );
    1890     process_t * ref_ptr = GET_PTR( process->ref_xp );
    1891 
    1892     // get new PTE attributes and PPN from reference cluster
    1893     if( local_cxy != ref_cxy )
    1894     {
    1895         rpc_vmm_get_pte_client( ref_cxy,
    1896                                 ref_ptr,
    1897                                 vpn,
    1898                                 true,     // copy-on-write
    1899                                 &attr,
    1900                                 &ppn,
    1901                                 &error );
    1902 
    1903         // get local VMM pointer
    1904         vmm_t * vmm = &process->vmm;
    1905 
    1906         // update local GPT
    1907         error |= hal_gpt_set_pte( &vmm->gpt,
    1908                                   vpn,
    1909                                   attr,
    1910                                   ppn );
    1911     }
    1912     else   // local cluster is the reference cluster
    1913     {
    1914         error = vmm_get_pte( process,
    1915                              vpn,
    1916                              true,      // copy-on-write
    1917                              &attr,
    1918                              &ppn );
    1919     }
    1920 
    1921 #if DEBUG_VMM_GET_PTE
    1922 cycle = (uint32_t)hal_get_cycles();
    1923 if( DEBUG_VMM_GET_PTE < cycle )
    1924 printk("\n[DBG] %s : thread %x exit for vpn %x / process %x / cycle %d\n",
    1925 __FUNCTION__ , CURRENT_THREAD , vpn , process->pid , cycle );
    1926 #endif
    1927 
    1928     return error;
    1929 
    1930 }  // end vmm_handle_cow()
    1931 
    1932 ///////////////////////////////////////////
    1933 error_t vmm_v2p_translate( bool_t    ident,
    1934                            void    * ptr,
    1935                            paddr_t * paddr )
    1936 {
    1937     process_t * process = CURRENT_THREAD->process;
    1938 
    1939     if( ident )  // identity mapping
    1940     {
    1941         *paddr = (paddr_t)PADDR( local_cxy , (lpa_t)ptr );
    1942         return 0;
    1943     }
    1944 
     1837/* deprecated April 2018  [AG]
     1838
     1839error_t vmm_v2p_translate( process_t * process,
     1840                           void      * ptr,
     1841                           paddr_t   * paddr )
     1842{
    19451843    // access page table
    19461844    error_t  error;
     
    19531851    offset = (uint32_t)( ((intptr_t)ptr) & CONFIG_PPM_PAGE_MASK );
    19541852
    1955     if( local_cxy == GET_CXY( process->ref_xp) ) // calling process is reference process
     1853    if( local_cxy == GET_CXY( process->ref_xp) ) // local process is reference process
    19561854    {
    19571855        error = vmm_get_pte( process, vpn , false , &attr , &ppn );
     
    19711869}  // end vmm_v2p_translate()
    19721870
    1973 
     1871*/
  • trunk/kernel/mm/vmm.h

    r437 r440  
    293293
    294294/*********************************************************************************************
    295  * This function allocates physical memory from the local cluster to map all PTEs
    296  * of a "kernel" vseg (type KCODE , KDATA, or KDEV) in the page table of process_zero.
    297  * WARNING : It should not be used for "user" vsegs, that must be mapped using the
    298  * "on-demand-paging" policy.
    299  *********************************************************************************************
    300  * @ vseg     : pointer on the vseg to be mapped.
    301  * @ attr     : GPT attributes to be set for all vseg pages.
    302  * @ returns 0 if success / returns ENOMEM if no memory
    303  ********************************************************************************************/
    304 error_t vmm_map_kernel_vseg( vseg_t           * vseg,
    305                              uint32_t           attr );
    306 
    307 /*********************************************************************************************
    308295 * This function removes a given region (defined by a base address and a size) from
    309296 * the VMM of a given process descriptor. This can modify the number of vsegs:
     
    335322 * @ process   : [in] pointer on process descriptor
    336323 * @ vaddr     : [in] virtual address
    337  * @ vseg      : [out] pointer on found vseg
    338  * @ returns 0 if success / returns -1 if user error.
     324 * @ vseg      : [out] local pointer on local vseg
     325 * @ returns 0 if success / returns -1 if user error (out of segment).
    339326 *********************************************************************************************/
    340327error_t vmm_get_vseg( struct process_s  * process,
     
    343330
    344331/*********************************************************************************************
    345  * This function is called by the generic exception handler when a page-fault event
    346  * has been detected for a given process in a given cluster.
    347  * - If the local cluster is the reference, it call directly the vmm_get_pte() function.
    348  * - If the local cluster is not the reference cluster, it send a RPC_VMM_GET_PTE
    349  *   to the reference cluster to get the missing PTE attributes and PPN,
    350  *   and update the local page table.
    351  *********************************************************************************************
    352  * @ process   : pointer on process descriptor.
    353  * @ vpn       : VPN of the missing PTE.
    354  * @ returns 0 if success / returns ENOMEM if no memory.
     332 * This function is called by the generic exception handler in case of page-fault,
     333 * or copy-on-write event locally detected for a given <vpn> in a given <process>
     334 * as defined by the <is_cow> argument.
     335 * 1) For a Page-Fault:
     336 * - If the local cluster is the reference, or for the STACK and CODE segment types,
     337 *   it call directly the vmm_get_pte() function to access the local VMM.
     338 * - Otherwise, it send a RPC_VMM_GET_PTE to the reference cluster to get the missing
     339 *   PTE attributes and PPN.
     340 * This function check that the missing VPN belongs to a registered vseg, allocates
     341 * a new physical page if required, and updates the local page table.
     342 * 2) For a Copy-On-Write:
     343 * - If no pending fork, it reset the COW flag and set the WRITE flag in the reference
     344 *   GPT entry, and in all the GPT copies.
     345 * - If there is a pending fork, it allocates a new physical page from the cluster defined
     346 *   by the vseg type, copies the old physical page content to the new physical page,
     347 *   and decrements the pending_fork counter in old physical page descriptor.
     348 *********************************************************************************************
     349 * @ process   : pointer on local process descriptor copy.
     350 * @ vpn       : VPN of the missing or faulting PTE.
     351 * @ is_cow    : Copy-On-Write event if true / Page-fault if false.
     352 * @ returns 0 if success / returns ENOMEM if no memory or illegal VPN.
    355353 ********************************************************************************************/
    356354error_t vmm_handle_page_fault( struct process_s * process,
    357                                vpn_t              vpn );
    358 
    359 /*********************************************************************************************
    360  * This function is called by the generic exception handler when a copy-on-write event
    361  * has been detected for a given process in a given cluster.
    362  * It takes the lock protecting the physical page, and test the pending forks counter.
    363  * If no pending fork:
    364  * - it reset the COW flag and set the WRITE flag in the reference GPT entry, and in all
    365  *   the GPT copies
    366 
    367  * If there is a pending forkon the
    368  * - It get the involved vseg pointer.
    369  * - It allocates a new physical page from the cluster defined by the vseg type.
    370  * - It copies the old physical page content to the new physical page.
    371  * - It decrements the pending_fork counter in old physical page descriptor.
    372 
    373  *********************************************************************************************
    374  * @ process   : pointer on process descriptor.
    375  * @ vpn       : VPN of the missing PTE.
    376  * @ returns 0 if success / returns ENOMEM if no memory.
    377  ********************************************************************************************/
    378 error_t vmm_handle_cow( struct process_s * process,
    379                         vpn_t              vpn );
    380 
    381 /*********************************************************************************************
    382  * This function handle both the "page-fault" and "copy-on_write" events for a given <vpn>
    383  * in a given <process>.  The <cow> argument defines the type of event to be handled.
    384  * This function must be called by a thread running in reference cluster, and the vseg
    385  * containing the searched VPN must be registered in the reference VMM.
     355                               vpn_t              vpn,
     356                               bool_t             is_cow );
     357
     358/*********************************************************************************************
     359 * This function is called by the vmm_handle_page_fault() to handle both the "page-fault",
     360 * and the "copy-on_write" events for a given <vpn> in a given <process>, as defined
     361 * by the <is_cow> argument.
     362 * The vseg containing the searched VPN must be registered in the reference VMM.
    386363 * - for an page-fault, it allocates the missing physical page from the target cluster
    387364 *   defined by the vseg type, initializes it, and updates the reference GPT, but not
     
    390367 *   initialise it from the old physical page, and updates the reference GPT and all
    391368 *   the GPT copies, for coherence.
    392  * In both cases, it calls the RPC_PMEM_GET_PAGES to get the new physical page when
    393  * the target cluster is not the reference cluster.
     369 * It calls the RPC_PMEM_GET_PAGES to get the new physical page when the target cluster
     370 * is not the local cluster,
    394371 * It returns in the <attr> and <ppn> arguments the accessed or modified PTE.
    395372 *********************************************************************************************
    396373 * @ process   : [in] pointer on process descriptor.
    397374 * @ vpn       : [in] VPN defining the missing PTE.
    398  * @ cow       : [in] "copy_on_write" if true / "page_fault" if false.
     375 * @ is_cow    : [in] "copy_on_write" if true / "page_fault" if false.
    399376 * @ attr      : [out] PTE attributes.
    400377 * @ ppn       : [out] PTE ppn.
     
    403380error_t vmm_get_pte( struct process_s * process,
    404381                     vpn_t              vpn,
    405                      bool_t             cow,
     382                     bool_t             is_cow,
    406383                     uint32_t         * attr,
    407384                     ppn_t            * ppn );
     
    428405                         ppn_t  * ppn );
    429406
    430 /*********************************************************************************************
    431  * This function makes the virtual to physical address translation, using the calling
    432  * process page table. It uses identity mapping if required by the <ident> argument.
    433  * This address translation is required to configure the peripherals having a DMA
    434  * capability, or to implement the software L2/L3 cache cohérence, using the MMC device
    435  * synchronisation primitives.
    436  * WARNING : the <ident> value must be defined by the CONFIG_KERNEL_IDENTITY_MAP parameter.
    437  *********************************************************************************************
    438  * @ ident     : [in] uses identity mapping if true.
    439  * @ ptr       : [in] virtual address.
    440  * @ paddr     : [out] pointer on buffer for physical address.
    441  * @ returns 0 if success / returns ENOMEM if error.
    442  ********************************************************************************************/
    443 error_t vmm_v2p_translate( bool_t    ident,
    444                            void    * ptr,
    445                            paddr_t * paddr );
    446 
    447 
    448407
    449408#endif /* _VMM_H_ */
  • trunk/kernel/mm/vseg.c

    r429 r440  
    143143                      VSEG_CACHE   ;
    144144    }
    145     else if( type == VSEG_TYPE_KCODE )
    146     {
    147         vseg->flags = VSEG_EXEC    |
    148                       VSEG_CACHE   |
    149                       VSEG_PRIVATE ;
    150     }
    151     else if( type == VSEG_TYPE_KDATA )
    152     {
    153         vseg->flags = VSEG_WRITE   |
    154                       VSEG_CACHE   |
    155                       VSEG_PRIVATE ;
    156     }
    157     else if( type == VSEG_TYPE_KDEV )
    158     {
    159         vseg->flags = VSEG_WRITE   ;
    160     }
    161145    else
    162146    {
  • trunk/kernel/mm/vseg.h

    r409 r440  
    4747    VSEG_TYPE_FILE   = 4,          /*! file mmap              / public  / localized       */
    4848    VSEG_TYPE_REMOTE = 5,          /*! remote mmap            / public  / localized       */
    49 
    50     VSEG_TYPE_KDATA  = 10,
    51     VSEG_TYPE_KCODE  = 11,
    52     VSEG_TYPE_KDEV   = 12,
    5349}
    5450vseg_type_t;
Note: See TracChangeset for help on using the changeset viewer.