Changeset 623 for trunk/kernel/mm


Ignore:
Timestamp:
Mar 6, 2019, 4:37:15 PM (6 years ago)
Author:
alain
Message:

Introduce three new types of vsegs (KCODE,KDATA,KDEV)
to map the kernel vsegs in the process VSL and GPT.
This now used by both the TSAR and the I86 architectures.

Location:
trunk/kernel/mm
Files:
8 edited

Legend:

Unmodified
Added
Removed
  • trunk/kernel/mm/mapper.c

    r614 r623  
    33 *
    44 * Authors   Mohamed Lamine Karaoui (2015)
    5  *           Alain Greiner (2016,2017,2018)
     5 *           Alain Greiner (2016,2017,2018,2019)
    66 *
    77 * Copyright (c)  UPMC Sorbonne Universites
     
    261261vfs_inode_t * inode = mapper->inode;
    262262vfs_inode_get_name( XPTR( local_cxy , inode ) , name );
    263 // if( DEBUG_MAPPER_HANDLE_MISS < cycle )
    264 // if( (page_id == 1) && (cycle > 10000000) )
     263if( DEBUG_MAPPER_HANDLE_MISS < cycle )
    265264printk("\n[%s] enter for page %d in <%s> / cycle %d",
    266265__FUNCTION__, page_id, name, cycle );
     
    322321#if DEBUG_MAPPER_HANDLE_MISS
    323322cycle = (uint32_t)hal_get_cycles();
    324 // if( DEBUG_MAPPER_HANDLE_MISS < cycle )
    325 // if( (page_id == 1) && (cycle > 10000000) )
     323if( DEBUG_MAPPER_HANDLE_MISS < cycle )
    326324printk("\n[%s] exit for page %d in <%s> / ppn %x / cycle %d",
    327325__FUNCTION__, page_id, name, ppm_page2ppn( *page_xp ), cycle );
     
    442440            ppm_page_do_dirty( page_xp );
    443441            hal_copy_from_uspace( map_ptr , buf_ptr , page_count );
     442
     443putb(" in mapper_move_user()" , map_ptr , page_count );
     444
    444445        }
    445446
     
    645646
    646647}  // end mapper_remote_set_32()
     648
     649/////////////////////////////////////////
     650error_t mapper_sync( mapper_t *  mapper )
     651{
     652    page_t   * page;                // local pointer on current page descriptor
     653    xptr_t     page_xp;             // extended pointer on current page descriptor
     654    grdxt_t  * rt;                  // pointer on radix_tree descriptor
     655    uint32_t   start_key;           // start page index in mapper
     656    uint32_t   found_key;           // current page index in mapper
     657    error_t    error;
     658
     659#if DEBUG_MAPPER_SYNC
     660thread_t * this  = CURRENT_THREAD;
     661uint32_t   cycle = (uint32_t)hal_get_cycles();
     662char       name[CONFIG_VFS_MAX_NAME_LENGTH];
     663vfs_inode_get_name( XPTR( local_cxy , mapper->inode ) , name );
     664#endif
     665
     666    // get pointer on radix tree
     667    rt        = &mapper->rt;
     668
     669    // initialise loop variable
     670    start_key = 0;
     671
     672    // scan radix-tree until last page found
     673    while( 1 )
     674    {
     675        // get page descriptor from radix tree
     676        page = (page_t *)grdxt_get_first( rt , start_key , &found_key );
     677         
     678        if( page == NULL ) break;
     679
     680assert( (page->index == found_key ), __FUNCTION__, "wrong page descriptor index" );
     681assert( (page->order == 0),          __FUNCTION__, "mapper page order must be 0" );
     682
     683        // build extended pointer on page descriptor
     684        page_xp = XPTR( local_cxy , page );
     685
     686        // synchronize page if dirty
     687        if( (page->flags & PG_DIRTY) != 0 )
     688        {
     689
     690#if DEBUG_MAPPER_SYNC
     691if( cycle > DEBUG_MAPPER_SYNC )
     692printk("\n[%s] thread[%x,%x] synchonise page %d of <%s> to device\n",
     693__FUNCTION__, this->process->pid, this->trdid, page->index, name );
     694#endif
     695            // copy page to file system
     696            error = vfs_fs_move_page( page_xp , IOC_WRITE );
     697
     698            if( error )
     699            {
     700                printk("\n[ERROR] in %s : cannot synchonize dirty page %d\n",
     701                __FUNCTION__, page->index );
     702                return -1;
     703            }
     704
     705            // remove page from PPM dirty list
     706            ppm_page_undo_dirty( page_xp );
     707        }
     708        else
     709        {
     710
     711#if DEBUG_MAPPER_SYNC
     712if( cycle > DEBUG_MAPPER_SYNC )
     713printk("\n[%s] thread[%x,%x] skip page %d for <%s>\n",
     714__FUNCTION__, this->process->pid, this->trdid, page->index, name );
     715#endif
     716        }
     717
     718        // update loop variable
     719        start_key = page->index + 1;
     720    }  // end while
     721
     722    return 0;
     723
     724}  // end mapper_sync()
    647725
    648726//////////////////////////////////////////////////
  • trunk/kernel/mm/mapper.h

    r614 r623  
    33 *
    44 * Authors   Mohamed Lamine Karaoui (2015)
    5  *           Alain Greiner (2016,2017,2018)
     5 *           Alain Greiner (2016,2017,2018,2019)
    66 *
    77 * Copyright (c)  UPMC Sorbonne Universites
     
    4848 *   "readers", and only one "writer".
    4949 * - A "reader" thread, calling the mapper_remote_get_page() function to get a page
    50  *   descriptor pointer from the page index in file, can be remote (running in any cluster).
     50 *   descriptor pointer from the page index in file, can be running in any cluster.
    5151 * - A "writer" thread, calling the mapper_handle_miss() function to handle a page miss
    5252 *   must be local (running in the mapper cluster).
    53  * - The vfs_mapper_move_page() function access the file system to handle a mapper miss,
     53 * - The vfs_fs_move_page() function access the file system to handle a mapper miss,
    5454 *   or update a dirty page on device.
    5555 * - The vfs_mapper_load_all() functions is used to load all pages of a directory
     
    6363 *
    6464 * TODO : the mapper being only used to implement the VFS cache(s), the mapper.c
    65  *        and mapper.h file should be trandfered to the vfs directory.
     65 *        and mapper.h file should be trandfered to the fs directory.
    6666 ******************************************************************************************/
    6767
     
    230230
    231231/*******************************************************************************************
     232 * This scans all pages present in the mapper identified by the <mapper> argument,
     233 * and synchronize all pages maked as dirty" on disk.
     234 * These pages are unmarked and removed from the local PPM dirty_list.
     235 * This function must be called by a local thread running in same cluster as the mapper.
     236 * A remote thread must call the RPC_MAPPER_SYNC function.
     237 *******************************************************************************************
     238 * @ mapper     : [in]  local pointer on local mapper.
     239 * @ returns 0 if success / return -1 if error.
     240 ******************************************************************************************/
     241error_t mapper_sync( mapper_t *  mapper );
     242
     243/*******************************************************************************************
    232244 * This debug function displays the content of a given page of a given mapper.
    233245 * - the mapper is identified by the <mapper_xp> argument.
  • trunk/kernel/mm/page.h

    r612 r623  
    4141#define PG_INIT             0x0001     // page descriptor has been initialised
    4242#define PG_RESERVED         0x0002     // cannot be allocated by PPM
    43 #define PG_FREE             0x0004     // page can be allocated by PPM
     43#define PG_FREE             0x0004     // page not yet allocated by PPM
    4444#define PG_DIRTY            0x0040     // page has been written
    4545#define PG_COW          0x0080     // page is copy-on-write
  • trunk/kernel/mm/ppm.h

    r611 r623  
    33 *
    44 * Authors  Ghassan Almaless (2008,2009,2010,2011,2012)
    5  *          Alain Greiner    (2016,2017,2018)
     5 *          Alain Greiner    (2016,2017,2018,2019)
    66 *
    77 * Copyright (c) UPMC Sorbonne Universites
     
    3737 * This structure defines the Physical Pages Manager in a cluster.
    3838 * In each cluster, the physical memory bank starts at local physical address 0 and
    39  * contains an integer number of pages, defined by the <pages_nr> field in the
     39 * contains an integer number of small pages, defined by the <pages_nr> field in the
    4040 * boot_info structure. It is split in three parts:
    4141 *
    4242 * - the "kernel_code" section contains the kernel code, loaded by the boot-loader.
    43  *   It starts at PPN = 0 and the size is defined by the <pages_offset> field in the
    44  *   boot_info structure.
    45  * - the "pages_tbl" section contains the physical page descriptors array. It starts
    46  *   at PPN = pages_offset, and it contains one entry per small physical page in cluster.
     43 *   It starts at local PPN = 0 and the size is defined by the <pages_offset> field
     44 *   in the boot_info structure.
     45 * - the local "pages_tbl" section contains the physical page descriptors array.
     46 *   It starts at local PPN = pages_offset, and it contains one entry per small page.
    4747 *   It is created and initialized by the hal_ppm_create() function.
    4848 * - The "kernel_heap" section contains all physical pages that are are not in the
    49  *   kernel_code and pages_tbl sections, and that have not been reserved by the
    50  *   architecture specific bootloader. The reserved pages are defined in the boot_info
    51  *   structure.
     49 *   "kernel_code" and "pages_tbl" sections, and that have not been reserved.
     50 *   The reserved pages are defined in the boot_info structure.
    5251 *
    5352 * The main service provided by the PMM is the dynamic allocation of physical pages
     
    6059 *
    6160 * Another service is to register the dirty pages in a specific dirty_list, that is
    62  * also rooted in the PPM, in order to be able to save all dirty pages on disk.
     61 * also rooted in the PPM, in order to be able to synchronize all dirty pages on disk.
    6362 * This dirty list is protected by a specific remote_queuelock, because it can be
    6463 * modified by a remote thread, but it contains only local pages.
     
    198197 *   . if page already dirty => do nothing
    199198 *   . it page not dirty => set the PG_DIRTY flag and register page in PPM dirty list.
    200  * - it releases the busylock protcting the page flags.
     199 * - it releases the busylock protecting the page flags.
    201200 * - it releases the queuelock protecting the PPM dirty_list.
    202201 *****************************************************************************************
     
    214213 *   . if page not dirty => do nothing
    215214 *   . it page dirty => reset the PG_DIRTY flag and remove page from PPM dirty list.
    216  * - it releases the busylock protcting the page flags.
     215 * - it releases the busylock protecting the page flags.
    217216 * - it releases the queuelock protecting the PPM dirty_list.
    218217 *****************************************************************************************
  • trunk/kernel/mm/vmm.c

    r621 r623  
    5959{
    6060    error_t   error;
    61     vseg_t  * vseg_kentry;
    6261    vseg_t  * vseg_args;
    6362    vseg_t  * vseg_envs;
     
    9190(CONFIG_VMM_VSPACE_SIZE - CONFIG_VMM_STACK_BASE)) ,
    9291"STACK zone too small\n");
    93 
    94     // register kentry vseg in VSL
    95     base = CONFIG_VMM_KENTRY_BASE << CONFIG_PPM_PAGE_SHIFT;
    96     size = CONFIG_VMM_KENTRY_SIZE << CONFIG_PPM_PAGE_SHIFT;
    97 
    98     vseg_kentry = vmm_create_vseg( process,
    99                                    VSEG_TYPE_CODE,
    100                                    base,
    101                                    size,
    102                                    0,             // file_offset unused
    103                                    0,             // file_size unused
    104                                    XPTR_NULL,     // mapper_xp unused
    105                                    local_cxy );
    106 
    107     if( vseg_kentry == NULL )
    108     {
    109         printk("\n[ERROR] in %s : cannot register kentry vseg\n", __FUNCTION__ );
    110         return -1;
    111     }
    112 
    113     vmm->kent_vpn_base = base;
    11492
    11593    // register args vseg in VSL
     
    162140
    163141    if( error )
    164     printk("\n[ERROR] in %s : cannot create GPT\n", __FUNCTION__ );
     142    {
     143        printk("\n[ERROR] in %s : cannot create GPT\n", __FUNCTION__ );
     144        return -1;
     145    }
    165146
    166147    // initialize GPT lock
    167148    remote_rwlock_init( XPTR( local_cxy , &vmm->gpt_lock ) , LOCK_VMM_GPT );
    168149
    169     // architecture specic GPT initialisation
    170     // (For TSAR, identity map the kentry_vseg)
    171     error = hal_vmm_init( vmm );
    172 
    173     if( error )
    174     printk("\n[ERROR] in %s : cannot initialize GPT\n", __FUNCTION__ );
     150    // update process VMM with kernel vsegs
     151    error = hal_vmm_kernel_update( process );
     152
     153    if( error )
     154    {
     155        printk("\n[ERROR] in %s : cannot update GPT for kernel vsegs\n", __FUNCTION__ );
     156        return -1;
     157    }
    175158
    176159    // initialize STACK allocator
     
    326309    }
    327310
    328     // release physical memory allocated for vseg descriptor if no MMAP type
    329     if( (type != VSEG_TYPE_ANON) && (type != VSEG_TYPE_FILE) && (type != VSEG_TYPE_REMOTE) )
     311    // release physical memory allocated for vseg if no MMAP and no kernel type
     312    if( (type != VSEG_TYPE_ANON) && (type != VSEG_TYPE_FILE) && (type != VSEG_TYPE_REMOTE) &&
     313        (type != VSEG_TYPE_KCODE) && (type != VSEG_TYPE_KDATA) && (type != VSEG_TYPE_KDEV) )
    330314    {
    331315        vseg_free( vseg );
     
    606590    child_vmm->vsegs_nr = 0;
    607591
    608     // create child GPT
     592    // create the child GPT
    609593    error = hal_gpt_create( &child_vmm->gpt );
    610594
     
    639623#endif
    640624
    641         // all parent vsegs - but STACK - must be copied in child VSL
    642         if( type != VSEG_TYPE_STACK )
     625        // all parent vsegs - but STACK and kernel vsegs - must be copied in child VSL
     626        if( (type != VSEG_TYPE_STACK) && (type != VSEG_TYPE_KCODE) &&
     627            (type != VSEG_TYPE_KDATA) && (type != VSEG_TYPE_KDEV) )
    643628        {
    644629            // allocate memory for a new child vseg
     
    726711    remote_rwlock_rd_release( parent_lock_xp );
    727712
    728     // initialize child GPT (architecture specic)
    729     // => For TSAR, identity map the kentry_vseg
    730     error = hal_vmm_init( child_vmm );
     713    // update child VMM with kernel vsegs
     714    error = hal_vmm_kernel_update( child_process );
    731715
    732716    if( error )
    733717    {
    734         printk("\n[ERROR] in %s : cannot create GPT\n", __FUNCTION__ );
     718        printk("\n[ERROR] in %s : cannot update child VMM\n", __FUNCTION__ );
    735719        return -1;
    736720    }
     
    10981082        base = vpn_base << CONFIG_PPM_PAGE_SHIFT;
    10991083    }
    1100     else    // VSEG_TYPE_DATA or VSEG_TYPE_CODE
     1084    else    // VSEG_TYPE_DATA, VSEG_TYPE_CODE or KERNEL vseg
    11011085    {
    11021086        uint32_t vpn_min = base >> CONFIG_PPM_PAGE_SHIFT;
     
    11781162    xptr_t      lock_xp;    // extended pointer on lock protecting forks counter
    11791163    uint32_t    forks;      // actual number of pendinf forks
     1164    uint32_t    type;       // vseg type
    11801165
    11811166#if DEBUG_VMM_DELETE_VSEG
     
    11901175    process = cluster_get_local_process_from_pid( pid );
    11911176
    1192     if( process == NULL ) return;
     1177    if( process == NULL )
     1178    {
     1179        printk("\n[ERRORR] in %s : cannot get local process descriptor\n",
     1180        __FUNCTION__ );
     1181        return;
     1182    }
    11931183
    11941184    // get pointers on local process VMM an GPT
     
    11991189    vseg = vmm_vseg_from_vaddr( vmm , vaddr );
    12001190
    1201     if( vseg == NULL ) return;
    1202 
    1203     // loop to invalidate all vseg PTEs in GPT
     1191    if( vseg == NULL )
     1192    {
     1193        printk("\n[ERRORR] in %s : cannot get vseg descriptor\n",
     1194        __FUNCTION__ );
     1195        return;
     1196    }
     1197
     1198    // get relevant vseg infos
     1199    type    = vseg->type;
    12041200    vpn_min = vseg->vpn_base;
    12051201    vpn_max = vpn_min + vseg->vpn_size;
     1202
     1203    // loop to invalidate all vseg PTEs in GPT
    12061204        for( vpn = vpn_min ; vpn < vpn_max ; vpn++ )
    12071205    {
     
    12161214printk("- unmap vpn %x / ppn %x / vseg %s \n" , vpn , ppn, vseg_type_str(vseg->type) );
    12171215#endif
    1218 
    1219 // check small page
    1220 assert( (attr & GPT_SMALL) , "an user vseg must use small pages" );
    1221 
    12221216            // unmap GPT entry in local GPT
    12231217            hal_gpt_reset_pte( gpt , vpn );
    12241218
    1225             // handle pending forks counter if
    1226             // 1) not identity mapped
    1227             // 2) reference cluster
    1228             if( ((vseg->flags & VSEG_IDENT)  == 0) &&
    1229                 (GET_CXY( process->ref_xp ) == local_cxy) )
     1219            // the allocated page is not released to KMEM for kernel vseg
     1220            if( (type != VSEG_TYPE_KCODE) &&
     1221                (type != VSEG_TYPE_KDATA) &&
     1222                (type != VSEG_TYPE_KDEV ) )
    12301223            {
     1224
     1225// FIXME This code must be completely re-written, as the actual release must depend on
     1226// - the vseg type
     1227// - the reference cluster
     1228// - the page refcount and/or the forks counter
     1229
    12311230                // get extended pointer on physical page descriptor
    12321231                page_xp  = ppm_ppn2page( ppn );
     
    12381237                lock_xp  = XPTR( page_cxy , &page_ptr->lock );
    12391238
     1239                // get the lock protecting the page
    12401240                remote_busylock_acquire( lock_xp );
     1241
    12411242                // get pending forks counter
    12421243                forks = hal_remote_l32( forks_xp );
     1244
    12431245                if( forks )  // decrement pending forks counter
    12441246                {
     
    12631265#endif
    12641266                }
     1267
     1268                // release the lock protecting the page
    12651269                remote_busylock_release( lock_xp );
    12661270            }
     
    13111315    // return failure
    13121316    remote_rwlock_rd_release( lock_xp );
     1317
    13131318    return NULL;
    13141319
     
    13251330    vpn_t     vpn_max;
    13261331
     1332#if DEBUG_VMM_RESIZE_VSEG
     1333uint32_t   cycle = (uint32_t)hal_get_cycles();
     1334thread_t * this  = CURRENT_THREAD;
     1335if( DEBUG_VMM_RESIZE_VSEG < cycle )
     1336printk("\n[%s] thread[%x,%x] enter / process %x / base %x / size %d / cycle %d\n",
     1337__FUNCTION__, this->process->pid, this->trdid, process->pid, base, size, cycle );
     1338#endif
     1339
    13271340    // get pointer on process VMM
    13281341    vmm_t * vmm = &process->vmm;
     
    13341347        vseg_t * vseg = vmm_vseg_from_vaddr( vmm , base );
    13351348
    1336         if( vseg == NULL)  return EINVAL;
    1337 
    1338     // get extended pointer on VSL lock
    1339     xptr_t lock_xp = XPTR( local_cxy , &vmm->vsegs_lock );
    1340 
    1341     // get lock protecting VSL
    1342         remote_rwlock_wr_acquire( lock_xp );
    1343 
     1349        if( vseg == NULL)
     1350    {
     1351        printk("\n[ERROR] in %s : vseg(%x,%d) not found\n",
     1352        __FUNCTION__, base , size );
     1353        return -1;
     1354    }
     1355
     1356    // resize depends on unmapped region base and size
    13441357        if( (vseg->min > addr_min) || (vseg->max < addr_max) )        // not included in vseg
    13451358    {
     1359        printk("\n[ERROR] in %s : unmapped region[%x->%x[ not included in vseg[%x->%x[\n",
     1360        __FUNCTION__, addr_min, addr_max, vseg->min, vseg->max );
     1361
    13461362        error = -1;
    13471363    }
    13481364        else if( (vseg->min == addr_min) && (vseg->max == addr_max) )  // vseg must be deleted
    13491365    {
     1366
     1367#if( DEBUG_VMM_RESIZE_VSEG & 1 )
     1368if( DEBUG_VMM_RESIZE_VSEG < cycle )
     1369printk("\n[%s] unmapped region[%x->%x[ equal vseg[%x->%x[\n",
     1370__FUNCTION__, addr_min, addr_max, vseg->min, vseg->max );
     1371#endif
    13501372        vmm_delete_vseg( process->pid , vseg->min );
     1373
     1374#if( DEBUG_VMM_RESIZE_VSEG & 1 )
     1375if( DEBUG_VMM_RESIZE_VSEG < cycle )
     1376printk("\n[%s] thread[%x,%x] deleted vseg\n",
     1377__FUNCTION__, this->process->pid, this->trdid );
     1378#endif
    13511379        error = 0;
    13521380    }
    13531381        else if( vseg->min == addr_min )                               // vseg must be resized
    13541382    {
    1355         // update vseg base address
     1383
     1384#if( DEBUG_VMM_RESIZE_VSEG & 1 )
     1385if( DEBUG_VMM_RESIZE_VSEG < cycle )
     1386printk("\n[%s] unmapped region[%x->%x[ included in vseg[%x->%x[\n",
     1387__FUNCTION__, addr_min, addr_max, vseg->min, vseg->max );
     1388#endif
     1389        // update vseg min address
    13561390        vseg->min = addr_max;
    13571391
     
    13611395        vseg->vpn_base = vpn_min;
    13621396        vseg->vpn_size = vpn_max - vpn_min + 1;
     1397
     1398#if( DEBUG_VMM_RESIZE_VSEG & 1 )
     1399if( DEBUG_VMM_RESIZE_VSEG < cycle )
     1400printk("\n[%s] thread[%x,%x] changed vseg_min\n",
     1401__FUNCTION__, this->process->pid, this->trdid );
     1402#endif
    13631403        error = 0;
    13641404    }
    13651405        else if( vseg->max == addr_max )                              // vseg must be resized
    13661406    {
     1407
     1408#if( DEBUG_VMM_RESIZE_VSEG & 1 )
     1409if( DEBUG_VMM_RESIZE_VSEG < cycle )
     1410printk("\n[%s] unmapped region[%x->%x[ included in vseg[%x->%x[\n",
     1411__FUNCTION__, addr_min, addr_max, vseg->min, vseg->max );
     1412#endif
    13671413        // update vseg max address
    13681414        vseg->max = addr_min;
     
    13731419        vseg->vpn_base = vpn_min;
    13741420        vseg->vpn_size = vpn_max - vpn_min + 1;
     1421
     1422#if( DEBUG_VMM_RESIZE_VSEG & 1 )
     1423if( DEBUG_VMM_RESIZE_VSEG < cycle )
     1424printk("\n[%s] thread[%x,%x] changed vseg_max\n",
     1425__FUNCTION__, this->process->pid, this->trdid );
     1426#endif
    13751427        error = 0;
     1428
    13761429    }
    13771430    else                                                          // vseg cut in three regions
    13781431    {
     1432
     1433#if( DEBUG_VMM_RESIZE_VSEG & 1 )
     1434if( DEBUG_VMM_RESIZE_VSEG < cycle )
     1435printk("\n[%s] unmapped region[%x->%x[ included in vseg[%x->%x[\n",
     1436__FUNCTION__, addr_min, addr_max, vseg->min, vseg->max );
     1437#endif
    13791438        // resize existing vseg
    13801439        vseg->max = addr_min;
     
    13961455                               vseg->cxy );
    13971456
    1398         if( new == NULL ) error = EINVAL;
     1457#if( DEBUG_VMM_RESIZE_VSEG & 1 )
     1458if( DEBUG_VMM_RESIZE_VSEG < cycle )
     1459printk("\n[%s] thread[%x,%x] replaced vseg by two smal vsegs\n",
     1460__FUNCTION__, this->process->pid, this->trdid );
     1461#endif
     1462
     1463        if( new == NULL ) error = -1;
    13991464        else              error = 0;
    14001465    }
    14011466
    1402     // release VMM lock
    1403         remote_rwlock_wr_release( lock_xp );
     1467#if DEBUG_VMM_RESIZE_VSEG
     1468if( DEBUG_VMM_RESIZE_VSEG < cycle )
     1469printk("\n[%s] thread[%x,%x] exit / process %x / base %x / size %d / cycle %d\n",
     1470__FUNCTION__, this->process->pid, this->trdid, process->pid, base, size, cycle );
     1471#endif
    14041472
    14051473        return error;
  • trunk/kernel/mm/vmm.h

    r614 r623  
    44 * Authors   Ghassan Almaless (2008,2009,2010,2011, 2012)
    55 *           Mohamed Lamine Karaoui (2015)
    6  *           Alain Greiner (2016,2017,2018)
     6 *           Alain Greiner (2016,2017,2018,2019)
    77 *
    88 * Copyright (c) UPMC Sorbonne Universites
  • trunk/kernel/mm/vseg.c

    r595 r623  
    44 * Authors   Ghassan Almaless (2008,2009,2010,2011, 2012)
    55 *           Mohamed Lamine Karaoui (2015)
    6  *           Alain Greiner (2016,2018,2019)
     6 *           Alain Greiner (2016,2017,2018,2019)
    77 *
    88 * Copyright (c) UPMC Sorbonne Universites
     
    5555        else if( vseg_type == VSEG_TYPE_FILE   ) return "FILE";
    5656        else if( vseg_type == VSEG_TYPE_REMOTE ) return "REMO";
     57        else if( vseg_type == VSEG_TYPE_KCODE  ) return "KCOD";
     58        else if( vseg_type == VSEG_TYPE_KDATA  ) return "KDAT";
     59        else if( vseg_type == VSEG_TYPE_KDEV   ) return "KDEV";
    5760    else                                     return "undefined";
    5861}
     
    142145                      VSEG_CACHE   ;
    143146    }
     147    else if( type == VSEG_TYPE_KCODE )
     148    {
     149        vseg->flags = VSEG_EXEC    |
     150                      VSEG_CACHE   |
     151                      VSEG_PRIVATE ;
     152    }
     153    else if( type == VSEG_TYPE_KDATA )
     154    {
     155        vseg->flags = VSEG_CACHE   |
     156                      VSEG_WRITE   ;
     157    }
     158    else if( type == VSEG_TYPE_KDEV )
     159    {
     160        vseg->flags = VSEG_WRITE   ;
     161    }
    144162    else
    145163    {
     
    158176
    159177    // initialize vseg with remote_read access
    160     vseg->type        =           hal_remote_l32 ( XPTR( cxy , &ptr->type        ) );
     178    vseg->type        =           hal_remote_l32( XPTR( cxy , &ptr->type        ) );
    161179    vseg->min         = (intptr_t)hal_remote_lpt( XPTR( cxy , &ptr->min         ) );
    162180    vseg->max         = (intptr_t)hal_remote_lpt( XPTR( cxy , &ptr->max         ) );
    163     vseg->vpn_base    =           hal_remote_l32 ( XPTR( cxy , &ptr->vpn_base    ) );
    164     vseg->vpn_size    =           hal_remote_l32 ( XPTR( cxy , &ptr->vpn_size    ) );
    165     vseg->flags       =           hal_remote_l32 ( XPTR( cxy , &ptr->flags       ) );
    166     vseg->file_offset =           hal_remote_l32 ( XPTR( cxy , &ptr->file_offset ) );
    167     vseg->file_size   =           hal_remote_l32 ( XPTR( cxy , &ptr->file_size   ) );
     181    vseg->vpn_base    =           hal_remote_l32( XPTR( cxy , &ptr->vpn_base    ) );
     182    vseg->vpn_size    =           hal_remote_l32( XPTR( cxy , &ptr->vpn_size    ) );
     183    vseg->flags       =           hal_remote_l32( XPTR( cxy , &ptr->flags       ) );
     184    vseg->file_offset =           hal_remote_l32( XPTR( cxy , &ptr->file_offset ) );
     185    vseg->file_size   =           hal_remote_l32( XPTR( cxy , &ptr->file_size   ) );
    168186        vseg->mapper_xp   = (xptr_t)  hal_remote_l64( XPTR( cxy , &ptr->mapper_xp   ) );
    169187
    170188    switch (vseg->type)
    171189    {
    172         case VSEG_TYPE_DATA:
     190        case VSEG_TYPE_DATA:      // unused
    173191        {
    174192            vseg->cxy = 0xffff;
    175193            break;
    176194        }
    177         case VSEG_TYPE_CODE:
     195        case VSEG_TYPE_CODE:      // always local
    178196        case VSEG_TYPE_STACK:
     197        case VSEG_TYPE_KCODE:
    179198        {
    180199            vseg->cxy = local_cxy;
    181200            break;
    182201        }
    183         case VSEG_TYPE_ANON:
     202        case VSEG_TYPE_ANON:      // intrinsic
    184203        case VSEG_TYPE_FILE:
    185204        case VSEG_TYPE_REMOTE:
     205        case VSEG_TYPE_KDEV:
     206        case VSEG_TYPE_KDATA:
    186207        {
    187208            vseg->cxy = (cxy_t) hal_remote_l32( XPTR(cxy, &ptr->cxy) );
  • trunk/kernel/mm/vseg.h

    r611 r623  
    44 * Authors   Ghassan Almaless (2008,2009,2010,2011, 2012)
    55 *           Mohamed Lamine Karaoui (2015)
    6  *           Alain Greiner (2016)
     6 *           Alain Greiner (2016,2017,2018,2019)
    77 *
    88 * Copyright (c) UPMC Sorbonne Universites
     
    3535
    3636/*******************************************************************************************
    37  * This enum defines the vseg types for an user process.
     37 * This enum defines the vseg types.
     38 * Note : the KDATA and KDEV types are not used by the TSAR HAL, because the accesses
     39 *        to kernel data or kernel devices are done through the DATA extension address
     40 *        register, but these types are probably required by the I86 HAL [AG].
    3841 ******************************************************************************************/
    3942
    4043typedef enum
    4144{
    42     VSEG_TYPE_CODE   = 0,          /*! executable user code   / private / localized       */
    43     VSEG_TYPE_DATA   = 1,          /*! initialized user data  / public  / distributed     */
    44     VSEG_TYPE_STACK  = 2,          /*! execution user stack   / private / localized       */
    45     VSEG_TYPE_ANON   = 3,          /*! anonymous mmap         / public  / localized       */
    46     VSEG_TYPE_FILE   = 4,          /*! file mmap              / public  / localized       */
    47     VSEG_TYPE_REMOTE = 5,          /*! remote mmap            / public  / localized       */
     45    VSEG_TYPE_CODE   = 0,          /*! executable user code     / private / localized     */
     46    VSEG_TYPE_DATA   = 1,          /*! initialized user data    / public  / distributed   */
     47    VSEG_TYPE_STACK  = 2,          /*! execution user stack     / private / localized     */
     48    VSEG_TYPE_ANON   = 3,          /*! anonymous mmap           / public  / localized     */
     49    VSEG_TYPE_FILE   = 4,          /*! file mmap                / public  / localized     */
     50    VSEG_TYPE_REMOTE = 5,          /*! remote mmap              / public  / localized     */
     51
     52    VSEG_TYPE_KCODE  = 6,          /*! executable kernel code   / private / localized     */
     53    VSEG_TYPE_KDATA  = 7,          /*! initialized kernel data  / private / localized     */
     54    VSEG_TYPE_KDEV   = 8,          /*! kernel peripheral device / public  / localized     */
    4855}
    4956vseg_type_t;
     
    6067#define VSEG_PRIVATE  0x0010       /*! should not be accessed from another cluster        */
    6168#define VSEG_DISTRIB  0x0020       /*! physically distributed on all clusters             */
    62 #define VSEG_IDENT    0x0040       /*! identity mapping                                   */
    6369
    6470/*******************************************************************************************
Note: See TracChangeset for help on using the changeset viewer.