Changeset 656 for trunk/kernel/mm


Ignore:
Timestamp:
Dec 6, 2019, 12:07:51 PM (5 years ago)
Author:
alain
Message:

Fix several bugs in the FATFS and in the VFS,
related to the creation of big files requiring
more than 4 Kbytes (one cluster) on device.

Location:
trunk/kernel/mm
Files:
10 edited

Legend:

Unmodified
Added
Removed
  • trunk/kernel/mm/kcm.c

    r635 r656  
    509509        {
    510510        // get one 4 Kbytes page from remote PPM
    511         page_t * page = ppm_remote_alloc_pages( kcm_cxy , 0 );
    512 
    513             if( page == NULL )
     511        xptr_t page_xp = ppm_remote_alloc_pages( kcm_cxy , 0 );
     512
     513            if( page_xp == XPTR_NULL )
    514514            {
    515515                    printk("\n[ERROR] in %s : failed to allocate page in cluster %x\n",
     
    519519        }
    520520
    521             // get remote page base address
    522             xptr_t base_xp = ppm_page2base( XPTR( kcm_cxy , page ) );
     521            // get extended pointer on allocated buffer
     522            xptr_t base_xp = ppm_page2base( page_xp );
    523523
    524524        // get local pointer on kcm_page
     
    529529            hal_remote_s64( XPTR( kcm_cxy , &kcm_page->status ) , 0 );
    530530            hal_remote_spt( XPTR( kcm_cxy , &kcm_page->kcm )    , kcm_ptr );
    531             hal_remote_spt( XPTR( kcm_cxy , &kcm_page->page )   , page );
     531            hal_remote_spt( XPTR( kcm_cxy , &kcm_page->page )   , GET_PTR( page_xp ) );
    532532
    533533            // introduce new page in remote KCM active_list
  • trunk/kernel/mm/kmem.c

    r635 r656  
    4545        flags = req->flags;
    4646
    47     ////////////////////////////////// PPM
     47    //////////////////////
    4848        if( type == KMEM_PPM )
    4949        {
     
    7676        return ptr;
    7777        }
    78     ///////////////////////////////////// KCM
     78    ///////////////////////////
    7979        else if( type == KMEM_KCM )
    8080        {
     
    102102        return ptr;
    103103        }
    104     //////////////////////////////////// KHM
     104    ///////////////////////////
    105105        else if( type == KMEM_KHM )
    106106        {
     
    140140    uint32_t type = req->type;
    141141
     142    //////////////////////
    142143        if( type == KMEM_PPM )
    143144        {
     
    146147        ppm_free_pages( page );
    147148    }
     149    ///////////////////////////
    148150    else if( type == KMEM_KCM )
    149151    {
    150152        kcm_free( req->ptr );
    151153        }
     154    ///////////////////////////
    152155    else if( type == KMEM_KHM )
    153156    {
     
    172175        flags = req->flags;
    173176
    174         ///////////////////////////////// PPM
    175         if( type == KMEM_PPM )
    176         {
    177                 // allocate the number of requested pages
    178                 page_t * page_ptr = ppm_remote_alloc_pages( cxy , order );
    179 
    180                 if( page_ptr == NULL )
     177        //////////////////////
     178        if( type == KMEM_PPM )
     179        {
     180                // allocate the number of requested pages from remote cluster
     181                xptr_t page_xp = ppm_remote_alloc_pages( cxy , order );
     182
     183                if( page_xp == XPTR_NULL )
    181184                {
    182185                        printk("\n[ERROR] in %s : failed for PPM / order %d in cluster %x\n",
     
    185188                }
    186189
    187         xptr_t page_xp = XPTR( cxy , page_ptr );
    188 
    189         // get pointer on buffer from the page descriptor
     190        // get extended pointer on remote buffer
    190191        xptr_t base_xp = ppm_page2base( page_xp );
    191192
     
    193194                if( flags & AF_ZERO ) hal_remote_memset( base_xp , 0 , CONFIG_PPM_PAGE_SIZE );
    194195
    195         void * ptr = GET_PTR( base_xp );
    196196
    197197#if DEBUG_KMEM_REMOTE
     
    201201printk("\n[%s] thread[%x,%x] from PPM / %d page(s) / ppn %x / cxy %x / cycle %d\n",
    202202__FUNCTION__, this->process->pid, this->trdid,
    203 1<<order, ppm_page2ppn(XPTR(local_cxy,ptr)), cxy, cycle );
    204 #endif
    205         return ptr;
    206         }
    207     /////////////////////////////////// KCM
     2031<<order, ppm_page2ppn( page_xp ), cxy, cycle );
     204#endif
     205        return GET_PTR( base_xp );
     206        }
     207    ///////////////////////////
    208208        else if( type == KMEM_KCM )
    209209        {
     
    231231        return ptr;
    232232        }
    233         /////////////////////////////////// KHM
     233        ///////////////////////////
    234234        else if( type == KMEM_KHM )               
    235235        {
     
    250250    uint32_t type = req->type;
    251251
     252    //////////////////////
    252253        if( type == KMEM_PPM )
    253254        {
     
    256257        ppm_remote_free_pages( cxy , page );
    257258    }
     259    ///////////////////////////
    258260    else if( type == KMEM_KCM )
    259261    {
    260262        kcm_remote_free( cxy , req->ptr );
    261263        }
     264    ///////////////////////////
    262265    else if( type == KMEM_KHM )
    263266    {
  • trunk/kernel/mm/kmem.h

    r635 r656  
    2929
    3030/*************************************************************************************
    31  * This enum defines the three Kernel Memory Allocaror types:
     31 * This enum defines the three Kernel Memory Allocaror types
    3232 ************************************************************************************/
    3333
     
    7171 * - KHM (Kernel Heap Manager) allocates physical memory buffers of M bytes,
    7272 *       M can have any value, and req.order = M.
     73 *
     74 * WARNING: the physical memory allocated with a given allocator type must be
     75 *          released using the same allocator type.
    7376 *************************************************************************************
    7477 * @ cxy   : target cluster identifier for a remote access.
  • trunk/kernel/mm/mapper.c

    r651 r656  
    2727#include <hal_special.h>
    2828#include <hal_uspace.h>
     29#include <hal_vmm.h>
    2930#include <grdxt.h>
    3031#include <string.h>
     
    141142    error_t    error;
    142143
     144    uint32_t   inode_size;   
     145    uint32_t   inode_type;
     146
    143147    thread_t * this = CURRENT_THREAD;
    144148
    145149    // get target mapper cluster and local pointer
    146     cxy_t      mapper_cxy = GET_CXY( mapper_xp );
    147     mapper_t * mapper_ptr = GET_PTR( mapper_xp );
     150    cxy_t         mapper_cxy = GET_CXY( mapper_xp );
     151    mapper_t    * mapper_ptr = GET_PTR( mapper_xp );
     152
     153    // get inode pointer
     154    vfs_inode_t * inode = hal_remote_lpt( XPTR( mapper_cxy , &mapper_ptr->inode ) );
     155
     156    // get inode size and type if relevant
     157    if( inode != NULL )
     158    {
     159        inode_size = hal_remote_l32( XPTR( mapper_cxy , &inode->size ) );
     160        inode_type = hal_remote_l32( XPTR( mapper_cxy , &inode->type ) );
     161    }
     162    else
     163    {
     164        inode_size = 0;
     165        inode_type = 0;
     166    }
    148167
    149168#if DEBUG_MAPPER_HANDLE_MISS
    150169uint32_t      cycle = (uint32_t)hal_get_cycles();
    151170char          name[CONFIG_VFS_MAX_NAME_LENGTH];
    152 vfs_inode_t * inode = mapper->inode;
    153171if( (DEBUG_MAPPER_HANDLE_MISS < cycle) && (inode != NULL) )
    154172{
    155     vfs_inode_get_name( XPTR( local_cxy , inode ) , name );
    156     printk("\n[%s] thread[%x,%x] enter for page %d in <%s> / cluster %x / cycle %d",
     173    vfs_inode_get_name( XPTR( mapper_cxy , inode ) , name );
     174    printk("\n[%s] thread[%x,%x] enter for page %d in <%s> / cxy %x / cycle %d\n",
    157175    __FUNCTION__, this->process->pid, this->trdid, page_id, name, mapper_cxy, cycle );
    158     if( DEBUG_MAPPER_HANDLE_MISS & 1 ) grdxt_display( XPTR(local_cxy,&mapper->rt), name );
    159176}
    160177if( (DEBUG_MAPPER_HANDLE_MISS < cycle) && (inode == NULL) )
    161178{
    162     printk("\n[%s] thread[%x,%x] enter for page %d in FAT / cluster %x / cycle %d",
     179    printk("\n[%s] thread[%x,%x] enter for page %d in FAT / cxy %x / cycle %d\n",
    163180    __FUNCTION__, this->process->pid, this->trdid, page_id, mapper_cxy, cycle );
    164     if( DEBUG_MAPPER_HANDLE_MISS & 1 ) grdxt_display( XPTR(local_cxy,&mapper->rt), "FAT" );
     181}
     182#endif
     183
     184#if( DEBUG_MAPPER_HANDLE_MISS & 2 )
     185if( DEBUG_MAPPER_HANDLE_MISS < cycle )
     186{
     187    if (inode != NULL) grdxt_remote_display( XPTR( mapper_cxy , &mapper_ptr->rt ) , name );
     188    else               grdxt_remote_display( XPTR( mapper_cxy , &mapper_ptr->rt ) , "FAT" );
    165189}
    166190#endif
    167191
    168192    // allocate one 4 Kbytes page from the remote mapper cluster
    169     page_t * page_ptr = ppm_remote_alloc_pages( mapper_cxy , 0 );
     193    xptr_t page_xp = ppm_remote_alloc_pages( mapper_cxy , 0 );
     194    page_t * page_ptr = GET_PTR( page_xp );
    170195                           
    171     if( page_ptr == NULL )
     196    if( page_xp == XPTR_NULL )
    172197    {
    173198        printk("\n[ERROR] in %s : thread [%x,%x] cannot allocate page in cluster %x\n",
     
    176201    }
    177202
    178     // build extended pointer on new page descriptor
    179     xptr_t page_xp = XPTR( mapper_cxy , page_ptr );
    180 
    181203    // initialize the page descriptor
    182204    page_remote_init( page_xp );
    183205
     206    // initialize specific page descriptor fields
    184207    hal_remote_s32( XPTR( mapper_cxy , &page_ptr->refcount ) , 1          );
    185208    hal_remote_s32( XPTR( mapper_cxy , &page_ptr->index )    , page_id    );
     
    200223    }
    201224
    202     // launch I/O operation to load page from IOC device to mapper
    203     error = vfs_fs_move_page( page_xp , IOC_SYNC_READ );
    204 
    205     if( error )
    206     {
    207         printk("\n[ERROR] in %s : thread[%x,%x] cannot load page from device\n",
    208         __FUNCTION__ , this->process->pid, this->trdid );
    209         mapper_remote_release_page( mapper_xp , page_ptr );
    210         return -1;
     225    // launch I/O operation to load page from IOC device when required:
     226    // - it is the FAT mapper
     227    // - it is a directory mapper
     228    // - it is a file mapper, and it exist data on IOC device for this page
     229    if( (inode == NULL) || (inode_type == INODE_TYPE_DIR) || (inode_size > (page_id << 10) ) )
     230    {
     231        error = vfs_fs_move_page( page_xp , IOC_SYNC_READ );
     232
     233        if( error )
     234        {
     235            printk("\n[ERROR] in %s : thread[%x,%x] cannot load page from device\n",
     236            __FUNCTION__ , this->process->pid, this->trdid );
     237            mapper_remote_release_page( mapper_xp , page_ptr );
     238            return -1;
     239         }
    211240    }
    212241
     
    215244
    216245#if DEBUG_MAPPER_HANDLE_MISS
    217 cycle = (uint32_t)hal_get_cycles();
     246ppn_t ppn = ppm_page2ppn( page_xp );
    218247if( (DEBUG_MAPPER_HANDLE_MISS < cycle) && (inode != NULL) )
    219248{
    220     printk("\n[%s] thread[%x,%x] exit for page %d in <%s> / ppn %x / cycle %d",
    221     __FUNCTION__, this->process->pid, this->trdid,
    222     page_id, name, ppm_page2ppn( page_xp ), cycle );
    223     if( DEBUG_MAPPER_HANDLE_MISS & 1 ) grdxt_display( XPTR(local_cxy,&mapper->rt) , name );
     249    printk("\n[%s] thread[%x,%x] exit for page %d in <%s> / page %x / ppn %x\n",
     250    __FUNCTION__, this->process->pid, this->trdid, page_id, name, page_ptr, ppn );
    224251}
    225252if( (DEBUG_MAPPER_HANDLE_MISS < cycle) && (inode == NULL) )
    226253{
    227     printk("\n[%s] thread[%x,%x] exit for page %d in FAT / ppn %x / cycle %d",
    228     __FUNCTION__, this->process->pid, this->trdid,
    229     page_id, ppm_page2ppn( page_xp ), cycle );
    230     if( DEBUG_MAPPER_HANDLE_MISS & 1 ) grdxt_display( XPTR(local_cxy,&mapper->rt ), "FAT" );
     254    printk("\n[%s] thread[%x,%x] exit for page %d in FAT / page %x / ppn %x\n",
     255    __FUNCTION__, this->process->pid, this->trdid, page_id, page_ptr, ppn );
     256}
     257#endif
     258
     259#if( DEBUG_MAPPER_HANDLE_MISS & 2 )
     260if( DEBUG_MAPPER_HANDLE_MISS < cycle )
     261{
     262    if (inode != NULL) grdxt_remote_display( XPTR( mapper_cxy , &mapper_ptr->rt ) , name );
     263    else               grdxt_remote_display( XPTR( mapper_cxy , &mapper_ptr->rt ) , "FAT" );
    231264}
    232265#endif
     
    241274{
    242275    error_t       error;
    243     mapper_t    * mapper_ptr;
    244     cxy_t         mapper_cxy;
    245     xptr_t        lock_xp;        // extended pointer on mapper lock
    246     xptr_t        page_xp;        // extended pointer on searched page descriptor
    247     xptr_t        rt_xp;          // extended pointer on radix tree in mapper
    248276
    249277    thread_t * this = CURRENT_THREAD;
    250278
    251279    // get mapper cluster and local pointer
    252     mapper_ptr = GET_PTR( mapper_xp );
    253     mapper_cxy = GET_CXY( mapper_xp );
     280    mapper_t * mapper_ptr = GET_PTR( mapper_xp );
     281    cxy_t      mapper_cxy = GET_CXY( mapper_xp );
    254282
    255283#if DEBUG_MAPPER_GET_PAGE
     
    270298#endif
    271299
     300#if( DEBUG_MAPPER_GET_PAGE & 2 )
     301if( DEBUG_MAPPER_GET_PAGE < cycle )
     302ppm_remote_display( local_cxy );
     303#endif
     304
    272305    // check thread can yield
    273306    thread_assert_can_yield( this , __FUNCTION__ );
    274307
    275308    // build extended pointer on mapper lock and mapper rt
    276     lock_xp  = XPTR( mapper_cxy , &mapper_ptr->lock );
    277     rt_xp    = XPTR( mapper_cxy , &mapper_ptr->rt );
     309    xptr_t lock_xp  = XPTR( mapper_cxy , &mapper_ptr->lock );
     310    xptr_t rt_xp    = XPTR( mapper_cxy , &mapper_ptr->rt );
    278311
    279312    // take mapper lock in READ_MODE
     
    281314
    282315    // search page in radix tree
    283     page_xp  = grdxt_remote_lookup( rt_xp , page_id );
     316    xptr_t page_xp  = grdxt_remote_lookup( rt_xp , page_id );
    284317
    285318    // test mapper miss
     
    310343
    311344#if (DEBUG_MAPPER_GET_PAGE & 1)
    312 if( DEBUG_MAPPER_GET_PAGE < cycle )
    313 printk("\n[%s] thread[%x,%x] load missing page from FS : ppn %x\n",
    314 __FUNCTION__, this->process->pid, this->trdid, ppm_page2ppn(page_xp) );
     345if( (DEBUG_MAPPER_GET_PAGE < cycle) && (inode != NULL) )
     346{
     347    printk("\n[%s] thread[%x,%x] introduced missing page in <%s> mapper / ppn %x\n",
     348    __FUNCTION__, this->process->pid, this->trdid, name, ppm_page2ppn(page_xp) );
     349}
     350if( (DEBUG_MAPPER_GET_PAGE < cycle) && (inode == NULL) )
     351{
     352    printk("\n[%s] thread[%x,%x] introduced missing page in FAT mapper / ppn %x\n",
     353    __FUNCTION__, this->process->pid, this->trdid, ppm_page2ppn(page_xp) );
     354}
    315355#endif
    316356       
     
    328368if( (DEBUG_MAPPER_GET_PAGE < cycle) && (inode != NULL) )
    329369{
    330     printk("\n[%s] thread[%x,%x] exit for page %d of <%s> mapper / ppn %x / cycle %d\n",
    331     __FUNCTION__, this->process->pid, this->trdid, page_id,
    332     name, ppm_page2ppn(page_xp), cycle );
     370    printk("\n[%s] thread[%x,%x] exit for page %d of <%s> mapper / ppn %x\n",
     371    __FUNCTION__, this->process->pid, this->trdid, page_id, name, ppm_page2ppn(page_xp) );
    333372}
    334373if( (DEBUG_MAPPER_GET_PAGE < cycle) && (inode == NULL) )
    335374{
    336     printk("\n[%s] thread[%x,%x] exit for page %d of FAT mapper  / ppn %x / cycle %d\n",
    337     __FUNCTION__, this->process->pid, this->trdid, page_id,
    338     ppm_page2ppn(page_xp), cycle );
    339 }
     375    printk("\n[%s] thread[%x,%x] exit for page %d of FAT mapper  / ppn %x\n",
     376    __FUNCTION__, this->process->pid, this->trdid, page_id, ppm_page2ppn(page_xp) );
     377}
     378#endif
     379
     380#if( DEBUG_MAPPER_GET_PAGE & 2)
     381if( DEBUG_MAPPER_GET_PAGE < cycle )
     382ppm_remote_display( local_cxy );
    340383#endif
    341384
     
    476519__FUNCTION__, this->process->pid, this->trdid, page_bytes,
    477520local_cxy, buf_ptr, name, GET_CXY(map_xp), GET_PTR(map_xp) );
    478 mapper_display_page(  mapper_xp , page_id, 128 );
     521mapper_display_page(  mapper_xp , page_xp , 128 );
    479522#endif
    480523
     
    600643{
    601644    if( to_buffer )
    602     printk("\n[%s] mapper <%s> page %d => buffer(%x,%x) / %d bytes\n",
     645    printk("\n[%s] mapper <%s> page %d => buffer (%x,%x) / %d bytes\n",
    603646    __FUNCTION__, name, page_id, dst_cxy, dst_ptr, page_bytes );
    604647    else
    605     printk("\n[%s] buffer(%x,%x) => mapper <%s> page %d / %d bytes\n",
     648    printk("\n[%s] buffer (%x,%x) => mapper <%s> page %d / %d bytes\n",
    606649    __FUNCTION__, src_cxy, src_ptr, name, page_id, page_bytes );
    607650}
     
    617660cycle  = (uint32_t)hal_get_cycles();
    618661if( DEBUG_MAPPER_MOVE_KERNEL < cycle )
    619 printk("\n[%s] thread[%x,%x] exit / cycle %d\n",
    620 __FUNCTION__, this->process->pid, this->trdid, cycle );
     662printk("\n[%s] thread[%x,%x] exit / mapper <%s> / buffer (%x,%x) / cycle %d\n",
     663__FUNCTION__, this->process->pid, this->trdid, name, buffer_cxy, buffer_ptr, cycle );
    621664#endif
    622665
     
    707750        if( page == NULL ) break;
    708751
    709 assert( (page->index == found_key ), "wrong page descriptor index" );
    710 assert( (page->order == 0),          "mapper page order must be 0" );
     752assert( (page->index == found_key ), "page_index (%d) != key (%d)", page->index, found_key );
     753assert( (page->order == 0), "page_order (%d] != 0", page->order );
    711754
    712755        // build extended pointer on page descriptor
     
    753796}  // end mapper_sync()
    754797
    755 //////////////////////////////////////////////////
    756 error_t mapper_display_page( xptr_t     mapper_xp,
    757                              uint32_t   page_id,
    758                              uint32_t   nbytes )
    759 {
    760     xptr_t        page_xp;        // extended pointer on page descriptor
    761     xptr_t        base_xp;        // extended pointer on page base
     798///////////////////////////////////////////////
     799void mapper_display_page( xptr_t     mapper_xp,
     800                          xptr_t     page_xp,
     801                          uint32_t   nbytes )
     802{
    762803    char          buffer[4096];   // local buffer
    763     uint32_t    * tabi;           // pointer on uint32_t to scan buffer
    764804    uint32_t      line;           // line index
    765805    uint32_t      word;           // word index
    766     cxy_t         mapper_cxy;     // mapper cluster identifier
    767     mapper_t    * mapper_ptr;     // mapper local pointer
    768     vfs_inode_t * inode_ptr;      // inode local pointer
    769806 
    770807    char       name[CONFIG_VFS_MAX_NAME_LENGTH];
    771808
    772     if( nbytes > 4096)
    773     {
    774         printk("\n[ERROR] in %s : nbytes (%d) cannot be larger than 4096\n",
    775         __FUNCTION__, nbytes );
    776         return -1;
    777     }
    778    
    779     // get extended pointer on page descriptor
    780     page_xp = mapper_remote_get_page( mapper_xp , page_id );
    781 
    782     if( page_xp == XPTR_NULL)
    783     {
    784         printk("\n[ERROR] in %s : cannot access page %d in mapper\n",
    785         __FUNCTION__, page_id );
    786         return -1;
    787     }
    788 
    789     // get cluster and local pointer
    790     mapper_cxy = GET_CXY( mapper_xp );
    791     mapper_ptr = GET_PTR( mapper_xp );
     809assert( (nbytes <= 4096)         , "nbytes cannot be larger than 4096");
     810assert( (mapper_xp != XPTR_NULL) , "mapper_xp argument cannot be null");
     811assert( (page_xp   != XPTR_NULL) , "page_xp argument cannot be null");
     812
     813    // get mapper cluster and local pointer
     814    cxy_t      mapper_cxy = GET_CXY( mapper_xp );
     815    mapper_t * mapper_ptr = GET_PTR( mapper_xp );
     816
     817    // get page cluster an local pointer
     818    cxy_t    page_cxy = GET_CXY( page_xp );
     819    page_t * page_ptr = GET_PTR( page_xp );
     820
     821    // get page_id and mapper from page descriptor
     822    uint32_t   page_id = hal_remote_l32( XPTR( page_cxy , &page_ptr->index ) );
     823    mapper_t * mapper  = hal_remote_lpt( XPTR( page_cxy , &page_ptr->mapper ) );
     824
     825assert( (mapper_cxy == page_cxy ) , "mapper and page must be in same cluster");
     826assert( (mapper_ptr == mapper   ) , "unconsistent mapper_xp & page_xp arguments");
    792827
    793828    // get inode
    794     inode_ptr = hal_remote_lpt( XPTR( mapper_cxy , &mapper_ptr->inode ) );
     829    vfs_inode_t * inode_ptr = hal_remote_lpt( XPTR( mapper_cxy , &mapper_ptr->inode ) );
    795830
    796831    // get inode name
    797     if( inode_ptr == NULL ) strcpy( name , "fat" );
     832    if( inode_ptr == NULL ) strcpy( name , "FAT" );
    798833    else  vfs_inode_get_name( XPTR( mapper_cxy , inode_ptr ) , name );
    799834   
    800835    // get extended pointer on page base
    801     base_xp = ppm_page2base( page_xp );
     836    xptr_t base_xp = ppm_page2base( page_xp );
    802837   
    803838    // copy remote page to local buffer
    804839    hal_remote_memcpy( XPTR( local_cxy , buffer ) , base_xp , nbytes );
    805840
     841    // display header
     842    uint32_t * tabi = (uint32_t *)buffer;
     843    printk("\n***** mapper <%s> / page_id %d / cxy %x / mapper %x / buffer %x\n",
     844    name, page_id, mapper_cxy, mapper_ptr, GET_PTR( base_xp ) );
     845
    806846    // display 8 words per line
    807     tabi = (uint32_t *)buffer;
    808     printk("\n***** mapper <%s> / %d bytes in page %d (%x,%x)\n",
    809     name, nbytes, page_id, GET_CXY(base_xp), GET_PTR(base_xp) );
    810847    for( line = 0 ; line < (nbytes >> 5) ; line++ )
    811848    {
     
    815852    }
    816853
    817     return 0;
    818 
    819 }  // end mapper_display_page
    820 
    821 
     854}  // end mapper_display_page()
     855
     856
  • trunk/kernel/mm/mapper.h

    r635 r656  
    6262 *   and the  allocated memory is only released when the mapper/inode is destroyed.
    6363 *
    64  * TODO (1) the mapper being only used to implement the VFS cache(s), the mapper.c
    65  *          and mapper.h file should be trandfered to the fs directory.
    66  * TODO (2) the "type" field in mapper descriptor is redundant and probably unused.
     64 * TODO the "type" field in mapper descriptor is redundant and probably unused.
    6765 ******************************************************************************************/
    6866
     
    161159
    162160/********************************************************************************************
    163  * This function move data between a remote mapper, identified by the <mapper_xp> argument,
    164  * and a localised remote kernel buffer. It can be called by a thread running any cluster.
     161 * This function move <size> bytes from/to a remote mapper, identified by the <mapper_xp>
     162 * argument, to/from a remote kernel buffer, identified by the <buffer_xp> argument.
     163 * It can be called by a thread running in any cluster.
    165164 * If required, the data transfer is split in "fragments", where one fragment contains
    166  * contiguous bytes in the same mapper page.
    167  * It uses a "remote_memcpy" to move a fragment to/from the kernel buffer.
    168  * In case of write, the dirty bit is set for all pages written in the mapper.
     165 * contiguous bytes in the same mapper page. Each fragment uses a "remote_memcpy".
     166 * In case of write to mapper, the dirty bit is set for all pages written in the mapper.
    169167 *******************************************************************************************
    170168 * @ mapper_xp    : extended pointer on mapper.
     
    248246
    249247/*******************************************************************************************
    250  * This debug function displays the content of a given page of a given mapper.
    251  * - the mapper is identified by the <mapper_xp> argument.
    252  * - the page is identified by the <page_id> argument.
    253  * - the number of bytes to display in page is defined by the <nbytes> argument.
     248 * This debug function displays the content of a given page of a given mapper, identified
     249 * by the <mapper_xp> and <page_xp> arguments.
     250 * The number of bytes to display in page is defined by the <nbytes> argument.
    254251 * The format is eigth (32 bits) words per line in hexadecimal.
    255252 * It can be called by any thread running in any cluster.
    256  * In case of miss in mapper, it load the missing page from device to mapper.
    257253 *******************************************************************************************
    258254 * @ mapper_xp  : [in]  extended pointer on the mapper.
    259  * @ page_id    : [in]  page index in file.
    260  * @ nbytes     : [in]  value to be written.
    261  * @ returns 0 if success / return -1 if error.
    262  ******************************************************************************************/
    263 error_t mapper_display_page( xptr_t     mapper_xp,
    264                              uint32_t   page_id,
    265                              uint32_t   nbytes );
     255 * @ page_xp    : [in]  extended pointer on page descriptor.
     256 * @ nbytes     : [in]  number of bytes in page.
     257 * @ returns 0 if success / return -1 if error.
     258 ******************************************************************************************/
     259void mapper_display_page( xptr_t     mapper_xp,
     260                          xptr_t     page_xp,
     261                          uint32_t   nbytes );
    266262
    267263
  • trunk/kernel/mm/page.h

    r635 r656  
    4949 * - The remote_busylock is used to allows any remote thread to atomically
    5050 *   test/modify the forks counter or the flags.
    51  * - The list entry is used to register the page in a free list or in dirty list.
    52  *   The refcount is used for page release to KMEM.
     51 * - The list field is used to register the page in a free list, or in dirty list,
     52 *   as a given page cannot be simultaneously dirty and free.
     53 * - The refcount is used to release the page to the PPM.
    5354 * NOTE: the size is 48 bytes for a 32 bits core.
    5455 ************************************************************************************/
  • trunk/kernel/mm/ppm.c

    r651 r656  
    151151        page_t   * buddy;               // searched buddy page descriptor
    152152        uint32_t   buddy_index;         // buddy page index in page_tbl[]
    153         page_t   * current;             // current (merged) page descriptor
     153        page_t   * current_ptr;         // current (merged) page descriptor
    154154        uint32_t   current_index;       // current (merged) page index in page_tbl[]
    155155        uint32_t   current_order;       // current (merged) page order
     
    168168
    169169    // initialise loop variables
    170     current       = page;
     170    current_ptr   = page;
    171171    current_order = page->order;
    172172        current_index = page - ppm->pages_tbl;
     
    191191                buddy->order = 0;
    192192
    193                 // compute next (merged) page index in page_tbl[]
     193                // compute next values for loop variables
    194194                current_index &= buddy_index;
    195 
    196         // compute next (merged) page order
    197195        current_order++;
    198 
    199         // compute next (merged) page descripror
    200         current = pages_tbl + current_index;
     196        current_ptr = pages_tbl + current_index;
    201197    }
    202198
    203199        // update order field for merged page descriptor
    204         current->order = current_order;
     200        current_ptr->order = current_order;
    205201
    206202        // insert merged page in relevant free list
    207         list_add_first( &ppm->free_pages_root[current_order] , &current->list );
     203        list_add_first( &ppm->free_pages_root[current_order] , &current_ptr->list );
    208204        ppm->free_pages_nr[current_order] ++;
    209205
    210206}  // end ppm_free_pages_nolock()
    211 
    212207
    213208////////////////////////////////////////////
     
    221216    thread_t * this = CURRENT_THREAD;
    222217
     218        ppm_t    * ppm = &LOCAL_CLUSTER->ppm;
     219
    223220#if DEBUG_PPM_ALLOC_PAGES
    224221uint32_t cycle = (uint32_t)hal_get_cycles();
    225222#endif
    226223
    227 #if (DEBUG_PPM_ALLOC_PAGES & 1)
     224#if DEBUG_PPM_ALLOC_PAGES
    228225if( DEBUG_PPM_ALLOC_PAGES < cycle )
    229226{
    230227    printk("\n[%s] thread[%x,%x] enter for %d page(s) in cluster %x / cycle %d\n",
    231228    __FUNCTION__, this->process->pid, this->trdid, 1<<order, local_cxy, cycle );
    232     ppm_remote_display( local_cxy );
    233 }
    234 #endif
    235 
    236         ppm_t    * ppm = &LOCAL_CLUSTER->ppm;
     229    if( DEBUG_PPM_ALLOC_PAGES & 1 ) ppm_remote_display( local_cxy );
     230}
     231#endif
    237232
    238233// check order
     
    316311    dqdt_increment_pages( local_cxy , order );
    317312
     313    hal_fence();
     314
    318315#if DEBUG_PPM_ALLOC_PAGES
    319316if( DEBUG_PPM_ALLOC_PAGES < cycle )
     
    322319    __FUNCTION__, this->process->pid, this->trdid,
    323320    1<<order, local_cxy, ppm_page2ppn(XPTR( local_cxy , found_block )), cycle );
    324     ppm_remote_display( local_cxy );
     321    if( DEBUG_PPM_ALLOC_PAGES & 1 ) ppm_remote_display( local_cxy );
    325322}
    326323#endif
     
    340337#endif
    341338
    342 #if ( DEBUG_PPM_FREE_PAGES & 1 )
     339#if DEBUG_PPM_FREE_PAGES
    343340if( DEBUG_PPM_FREE_PAGES < cycle )
    344341{
     
    346343    __FUNCTION__, this->process->pid, this->trdid,
    347344    1<<page->order, local_cxy, ppm_page2ppn(XPTR(local_cxy , page)), cycle );
    348     ppm_remote_display( local_cxy );
     345    if( DEBUG_PPM_FREE_PAGES & 1 ) ppm_remote_display( local_cxy );
     346}
    349347#endif
    350348
     
    362360    // update DQDT
    363361    dqdt_decrement_pages( local_cxy , page->order );
     362
     363    hal_fence();
    364364
    365365#if DEBUG_PPM_FREE_PAGES
     
    369369    __FUNCTION__, this->process->pid, this->trdid,
    370370    1<<page->order, local_cxy, ppm_page2ppn(XPTR(local_cxy , page)) , cycle );
    371     ppm_remote_display( local_cxy );
     371    if( DEBUG_PPM_FREE_PAGES & 1 ) ppm_remote_display( local_cxy );
    372372}
    373373#endif
     
    376376
    377377
    378 
    379 
    380378/////////////////////////////////////////////
    381 void * ppm_remote_alloc_pages( cxy_t     cxy,
     379xptr_t ppm_remote_alloc_pages( cxy_t     cxy,
    382380                               uint32_t  order )
    383381{
     
    389387    thread_t * this  = CURRENT_THREAD;
    390388
     389// check order
     390assert( (order < CONFIG_PPM_MAX_ORDER) , "illegal order argument = %d\n" , order );
     391
     392    // get local pointer on PPM (same in all clusters)
     393        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
     394
    391395#if DEBUG_PPM_REMOTE_ALLOC_PAGES
    392396uint32_t   cycle = (uint32_t)hal_get_cycles();
    393397#endif
    394398
    395 #if ( DEBUG_PPM_REMOTE_ALLOC_PAGES & 1 )
     399#if DEBUG_PPM_REMOTE_ALLOC_PAGES
    396400if( DEBUG_PPM_REMOTE_ALLOC_PAGES < cycle )
    397401{
    398     printk("\n[%s] thread[%x,%x] enter for %d small page(s) in cluster %x / cycle %d\n",
     402    printk("\n[%s] thread[%x,%x] enter for %d page(s) in cluster %x / cycle %d\n",
    399403    __FUNCTION__, this->process->pid, this->trdid, 1<<order, cxy, cycle );
    400     ppm_remote_display( cxy );
    401 }
    402 #endif
    403 
    404 // check order
    405 assert( (order < CONFIG_PPM_MAX_ORDER) , "illegal order argument = %d\n" , order );
    406 
    407     // get local pointer on PPM (same in all clusters)
    408         ppm_t * ppm = &LOCAL_CLUSTER->ppm;
     404    if( DEBUG_PPM_REMOTE_ALLOC_PAGES & 1 ) ppm_remote_display( cxy );
     405}
     406#endif
    409407
    410408    //build extended pointer on lock protecting remote PPM
     
    489487    dqdt_increment_pages( cxy , order );
    490488
     489    hal_fence();
     490
    491491#if DEBUG_PPM_REMOTE_ALLOC_PAGES
    492492if( DEBUG_PPM_REMOTE_ALLOC_PAGES < cycle )
     
    495495    __FUNCTION__, this->process->pid, this->trdid,
    496496    1<<order, cxy, ppm_page2ppn(XPTR( cxy , found_block )), cycle );
    497     ppm_remote_display( cxy );
    498 }
    499 #endif
    500 
    501         return found_block;
     497    if( DEBUG_PPM_REMOTE_ALLOC_PAGES & 1 ) ppm_remote_display( cxy );
     498}
     499#endif
     500
     501        return XPTR( cxy , found_block );
    502502
    503503}  // end ppm_remote_alloc_pages()
     
    515515        uint32_t   current_order;    // current (merged) page order
    516516
     517    // get local pointer on PPM (same in all clusters)
     518        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
     519
     520    // get page ppn and order
     521    uint32_t   order = hal_remote_l32( XPTR( page_cxy , &page_ptr->order ) );
     522
    517523#if DEBUG_PPM_REMOTE_FREE_PAGES
    518524thread_t * this  = CURRENT_THREAD;
    519525uint32_t   cycle = (uint32_t)hal_get_cycles();
    520 #endif
    521 
    522 #if ( DEBUG_PPM_REMOTE_FREE_PAGES & 1 )
     526ppn_t      ppn   = ppm_page2ppn( XPTR( page_cxy , page_ptr ) );
     527#endif
     528
     529#if DEBUG_PPM_REMOTE_FREE_PAGES
    523530if( DEBUG_PPM_REMOTE_FREE_PAGES < cycle )
    524531{
    525532    printk("\n[%s] thread[%x,%x] enter for %d page(s) in cluster %x / ppn %x / cycle %d\n",
    526     __FUNCTION__, this->process->pid, this->trdid,
    527     1<<page_ptr->order, page_cxy, ppm_page2ppn(XPTR( page_cxy , page_ptr )), cycle );
    528     ppm_remote_display( page_cxy );
     533    __FUNCTION__, this->process->pid, this->trdid, 1<<order, page_cxy, ppn, cycle );
     534    if( DEBUG_PPM_REMOTE_FREE_PAGES & 1 ) ppm_remote_display( page_cxy );
    529535}
    530536#endif
     
    533539    page_xp = XPTR( page_cxy , page_ptr );
    534540   
    535     // get local pointer on PPM (same in all clusters)
    536         ppm_t * ppm = &LOCAL_CLUSTER->ppm;
    537 
    538541    // build extended pointer on lock protecting remote PPM
    539542    xptr_t lock_xp = XPTR( page_cxy , &ppm->free_lock );
     
    556559    // initialise loop variables
    557560    current_ptr   = page_ptr;
    558     current_order = hal_remote_l32( XPTR( page_cxy , &page_ptr->order ) );
     561    current_order = order;
    559562        current_index = page_ptr - ppm->pages_tbl;
    560563
     
    582585        hal_remote_s32( XPTR( page_cxy , &buddy_ptr->order ) , 0 );
    583586
    584                 // compute next (merged) page index in page_tbl[]
     587                // compute next values for loop variables
    585588                current_index &= buddy_index;
    586 
    587         // compute next (merged) page order
    588589        current_order++;
    589 
    590         // compute next (merged) page descripror
    591590        current_ptr = pages_tbl + current_index;
    592591
     
    594593
    595594        // update current (merged) page descriptor order field
    596         current_ptr = pages_tbl + current_index;
    597595    hal_remote_s32( XPTR( page_cxy , &current_ptr->order ) , current_order );
    598596
    599597        // insert current (merged) page into relevant free list
    600         list_remote_add_first( page_cxy , &ppm->free_pages_root[current_order] , &current_ptr->list );
     598        list_remote_add_first( page_cxy, &ppm->free_pages_root[current_order], &current_ptr->list );
    601599    hal_remote_atomic_add( XPTR( page_cxy , &ppm->free_pages_nr[current_order] ) , 1 );
    602600
     
    607605    dqdt_decrement_pages( page_cxy , page_ptr->order );
    608606
     607    hal_fence();
     608
    609609#if DEBUG_PPM_REMOTE_FREE_PAGES
    610610if( DEBUG_PPM_REMOTE_FREE_PAGES < cycle )
    611611{
    612612    printk("\n[%s] thread[%x,%x] released %d page(s) in cluster %x / ppn %x / cycle %d\n",
    613     __FUNCTION__, this->process->pid, this->trdid,
    614     1<<page_ptr->order, page_cxy, ppm_page2ppn(XPTR( page_cxy , page_ptr ) ), cycle );
    615     ppm_remote_display( page_cxy );
     613    __FUNCTION__, this->process->pid, this->trdid, 1<<order, page_cxy, ppn, cycle );
     614    if( DEBUG_PPM_REMOTE_FREE_PAGES & 1 ) ppm_remote_display( page_cxy );
    616615}
    617616#endif
     
    658657        uint32_t n = hal_remote_l32( XPTR( cxy , &ppm->free_pages_nr[order] ) );
    659658
    660         // display direct free_list[order]
    661                 nolock_printk("- forward  : order = %d / n = %d\t: ", order , n );
     659        // display forward free_list[order]
     660                nolock_printk("- forward  : order = %d / n = %d : ", order , n );
    662661                LIST_REMOTE_FOREACH( cxy , &ppm->free_pages_root[order] , iter )
     662                {
     663            page_xp = XPTR( cxy , LIST_ELEMENT( iter , page_t , list ) );
     664                        nolock_printk("%x," , ppm_page2ppn( page_xp ) );
     665                }
     666                nolock_printk("\n");
     667
     668        // display backward free_list[order]
     669                nolock_printk("- backward : order = %d / n = %d : ", order , n );
     670                LIST_REMOTE_FOREACH_BACKWARD( cxy , &ppm->free_pages_root[order] , iter )
    663671                {
    664672            page_xp = XPTR( cxy , LIST_ELEMENT( iter , page_t , list ) );
  • trunk/kernel/mm/ppm.h

    r635 r656  
    8484/*****************************************************************************************
    8585 * This local allocator must be called by a thread running in local cluster.
    86  * It allocates n contiguous physical 4 Kbytes pages from the local cluster, where
    87  * n is a power of 2 defined by the <order> argument.
     86 * It allocates N contiguous physical 4 Kbytes pages from the local cluster, where
     87 * N is a power of 2 defined by the <order> argument.
    8888 * In normal use, it should not be called directly, as the recommended way to allocate
    8989 * physical pages is to call the generic allocator defined in kmem.h.
     
    116116/*****************************************************************************************
    117117 * This remote  allocator can be called by any thread running in any cluster.
    118  * It allocates n contiguous physical 4 Kbytes pages from cluster identified
    119  * by the <cxy> argument, where n is a power of 2 defined by the <order> argument.
     118 * It allocates N contiguous physical 4 Kbytes pages from cluster identified
     119 * by the <cxy> argument, where N is a power of 2 defined by the <order> argument.
    120120 * In normal use, it should not be called directly, as the recommended way to allocate
    121121 * physical pages is to call the generic allocator defined in kmem.h.
     
    123123 * @ cxy       : remote cluster identifier.
    124124 * @ order     : ln2( number of 4 Kbytes pages)
    125  * @ returns a local pointer on remote page descriptor if success / XPTR_NULL if error.
    126  ****************************************************************************************/
    127 void *  ppm_remote_alloc_pages( cxy_t    cxy,
     125 * @ returns an extended pointer on page descriptor if success / XPTR_NULL if error.
     126 ****************************************************************************************/
     127xptr_t  ppm_remote_alloc_pages( cxy_t    cxy,
    128128                                uint32_t order );
    129129
  • trunk/kernel/mm/vmm.c

    r651 r656  
    17451745
    17461746////////////////////////////////////////////////////////////////////////////////////////////
    1747 // This static function is called by the vmm_remove_vseg() and vmm_resize_vseg() functions.
    1748 // Depending on the vseg <type>, it decrements the physical page refcount, and
    1749 // conditionnally release to the relevant kmem the physical page identified by <ppn>.
     1747// This static function is called by the vmm_remove_vseg() and vmm_resize_vseg() functions
     1748// to update the physical page descriptor identified by the <ppn> argument.
     1749// It decrements the refcount, set the dirty bit when required, and releases the physical
     1750// page to kmem depending on the vseg type.
     1751// - KERNEL : refcount decremented / not released to kmem    / dirty bit not set
     1752// - FILE   : refcount decremented / not released to kmem    / dirty bit set when required.
     1753// - CODE   : refcount decremented / released to kmem        / dirty bit not set.
     1754// - STAK   : refcount decremented / released to kmem        / dirty bit not set.
     1755// - DATA   : refcount decremented / released to kmem if ref / dirty bit not set.
     1756// - MMAP   : refcount decremented / released to kmem if ref / dirty bit not set.
    17501757////////////////////////////////////////////////////////////////////////////////////////////
    17511758// @ process  : local pointer on process.
    17521759// @ vseg     : local pointer on vseg.
    17531760// @ ppn      : released pysical page index.
     1761// @ dirty    : set the dirty bit in page descriptor when non zero.
    17541762////////////////////////////////////////////////////////////////////////////////////////////
    17551763static void vmm_ppn_release( process_t * process,
    17561764                             vseg_t    * vseg,
    1757                              ppn_t       ppn )
     1765                             ppn_t       ppn,
     1766                             uint32_t    dirty )
    17581767{
    1759     bool_t do_release;
     1768    bool_t do_kmem_release;
    17601769
    17611770    // get vseg type
    17621771    vseg_type_t type = vseg->type;
    17631772
    1764     // compute is_ref
     1773    // compute is_ref <=> this vseg is the reference vseg
    17651774    bool_t is_ref = (GET_CXY( process->ref_xp ) == local_cxy);
    17661775
     
    17741783    hal_remote_atomic_add( count_xp , -1 );
    17751784
    1776     // compute the do_release condition depending on vseg type
    1777     if( (type == VSEG_TYPE_FILE)  ||
    1778         (type == VSEG_TYPE_KCODE) ||
     1785    // compute the do_kmem_release condition depending on vseg type
     1786    if( (type == VSEG_TYPE_KCODE) ||
    17791787        (type == VSEG_TYPE_KDATA) ||
    17801788        (type == VSEG_TYPE_KDEV) )           
    17811789    {
    1782         // no physical page release for FILE and KERNEL
    1783         do_release = false;
    1784     }
     1790        // no physical page release for KERNEL
     1791        do_kmem_release = false;
     1792    }
     1793    else if( type == VSEG_TYPE_FILE )
     1794    {
     1795        // no physical page release for KERNEL
     1796        do_kmem_release = false;
     1797
     1798        // set dirty bit if required
     1799        if( dirty ) ppm_page_do_dirty( page_xp );
     1800    }   
    17851801    else if( (type == VSEG_TYPE_CODE)  ||
    17861802             (type == VSEG_TYPE_STACK) )
    17871803    {
    17881804        // always release physical page for private vsegs
    1789         do_release = true;
     1805        do_kmem_release = true;
    17901806    }
    17911807    else if( (type == VSEG_TYPE_ANON)  ||
     
    17931809    {
    17941810        // release physical page if reference cluster
    1795         do_release = is_ref;
     1811        do_kmem_release = is_ref;
    17961812    }
    17971813    else if( is_ref )  // vseg_type == DATA in reference cluster
     
    18141830
    18151831        // release physical page if forks == 0
    1816         do_release = (forks == 0);
     1832        do_kmem_release = (forks == 0);
    18171833    }
    18181834    else              // vseg_type == DATA not in reference cluster
    18191835    {
    18201836        // no physical page release if not in reference cluster
    1821         do_release = false;
     1837        do_kmem_release = false;
    18221838    }
    18231839
    18241840    // release physical page to relevant kmem when required
    1825     if( do_release )
    1826     {
    1827         ppm_remote_free_pages( page_cxy , page_ptr );
     1841    if( do_kmem_release )
     1842    {
     1843        kmem_req_t req;
     1844        req.type = KMEM_PPM;
     1845        req.ptr  = GET_PTR( ppm_ppn2base( ppn ) );
     1846
     1847        kmem_remote_free( page_cxy , &req );
    18281848
    18291849#if DEBUG_VMM_PPN_RELEASE
     
    18921912            hal_gpt_reset_pte( gpt_xp , vpn );
    18931913
    1894             // release physical page when required
    1895             vmm_ppn_release( process , vseg , ppn );
     1914            // release physical page depending on vseg type
     1915            vmm_ppn_release( process , vseg , ppn , attr & GPT_DIRTY );
    18961916        }
    18971917    }
     
    19862006
    19872007            // release physical page when required
    1988             vmm_ppn_release( process , vseg , ppn );
     2008            vmm_ppn_release( process , vseg , ppn , attr & GPT_DIRTY );
    19892009        }
    19902010    }
     
    20082028
    20092029            // release physical page when required
    2010             vmm_ppn_release( process , vseg , ppn );
     2030            vmm_ppn_release( process , vseg , ppn , attr & GPT_DIRTY );
    20112031        }
    20122032    }
     
    21702190// @ vseg   : local pointer on vseg.
    21712191// @ vpn    : unmapped vpn.
    2172 // @ return an extended pointer on the allocated page
     2192// @ return an extended pointer on the allocated page descriptor.
    21732193//////////////////////////////////////////////////////////////////////////////////////
    21742194static xptr_t vmm_page_allocate( vseg_t * vseg,
     
    21862206    xptr_t       page_xp;
    21872207    cxy_t        page_cxy;
    2188     page_t     * page_ptr;
    21892208    uint32_t     index;
    21902209
     
    21972216assert( ( type != VSEG_TYPE_FILE ) , "illegal vseg type\n" );
    21982217
     2218    // compute target cluster identifier
    21992219    if( flags & VSEG_DISTRIB )    // distributed => cxy depends on vpn LSB
    22002220    {
     
    22142234
    22152235    // allocate one small physical page from target cluster
    2216     page_ptr = ppm_remote_alloc_pages( page_cxy , 0 );
    2217 
    2218     page_xp = XPTR( page_cxy , page_ptr );
     2236    kmem_req_t req;
     2237    req.type  = KMEM_PPM;
     2238    req.order = 0;
     2239    req.flags = AF_ZERO;
     2240
     2241    // get local pointer on page base
     2242    void * ptr = kmem_remote_alloc( page_cxy , &req );
     2243
     2244    // get extended pointer on page descriptor
     2245    page_xp = ppm_base2page( XPTR( page_cxy , ptr ) );
    22192246
    22202247#if DEBUG_VMM_PAGE_ALLOCATE
     
    22452272uint32_t   cycle = (uint32_t)hal_get_cycles();
    22462273thread_t * this  = CURRENT_THREAD;
    2247 if( (DEBUG_VMM_GET_ONE_PPN < cycle) && (vpn == 0x40b) )
    2248 printk("\n[%s] thread[%x,%x] enter for vpn %x / type %s / page_id  %d / cycle %d\n",
     2274if( DEBUG_VMM_GET_ONE_PPN < cycle )
     2275printk("\n[%s] thread[%x,%x] enter for vpn %x / vseg %s / page_id  %d / cycle %d\n",
    22492276__FUNCTION__, this->process->pid, this->trdid, vpn, vseg_type_str(type), page_id, cycle );
     2277#endif
     2278
     2279#if (DEBUG_VMM_GET_ONE_PPN & 2)
     2280if( DEBUG_VMM_GET_ONE_PPN < cycle )
     2281hal_vmm_display( XPTR( local_cxy , this->process ) , true );
    22502282#endif
    22512283
     
    22912323
    22922324#if (DEBUG_VMM_GET_ONE_PPN & 0x1)
    2293 if( (DEBUG_VMM_GET_ONE_PPN < cycle) && (vpn == 0x40b) )
     2325if( DEBUG_VMM_GET_ONE_PPN < cycle )
    22942326printk("\n[%s] thread[%x,%x] for vpn = %x / elf_offset = %x\n",
    22952327__FUNCTION__, this->process->pid, this->trdid, vpn, elf_offset );
     
    23052337
    23062338#if (DEBUG_VMM_GET_ONE_PPN & 0x1)
    2307 if( (DEBUG_VMM_GET_ONE_PPN < cycle) && (vpn == 0x40b) )
     2339if( DEBUG_VMM_GET_ONE_PPN < cycle )
    23082340printk("\n[%s] thread[%x,%x] for vpn  %x / fully in BSS\n",
    23092341__FUNCTION__, this->process->pid, this->trdid, vpn );
     
    23222354
    23232355#if (DEBUG_VMM_GET_ONE_PPN & 0x1)
    2324 if( (DEBUG_VMM_GET_ONE_PPN < cycle) && (vpn == 0x40b) )
     2356if( DEBUG_VMM_GET_ONE_PPN < cycle )
    23252357printk("\n[%s] thread[%x,%x] for vpn  %x / fully in mapper\n",
    23262358__FUNCTION__, this->process->pid, this->trdid, vpn );
     
    23392371
    23402372#if (DEBUG_VMM_GET_ONE_PPN & 0x1)
    2341 if( (DEBUG_VMM_GET_ONE_PPN < cycle) && (vpn == 0x40b) )
     2373if( DEBUG_VMM_GET_ONE_PPN < cycle )
    23422374printk("\n[%s] thread[%x,%x] for vpn  %x / both mapper & BSS\n"
    23432375"      %d bytes from mapper / %d bytes from BSS\n",
     
    23652397                }
    23662398            }   
    2367         }  // end initialisation for CODE or DATA types   
     2399
     2400        }  // end if CODE or DATA types   
    23682401    }
    23692402
     
    23722405
    23732406#if DEBUG_VMM_GET_ONE_PPN
    2374 cycle = (uint32_t)hal_get_cycles();
    2375 if( (DEBUG_VMM_GET_ONE_PPN < cycle) && (vpn == 0x40b) )
     2407if( DEBUG_VMM_GET_ONE_PPN < cycle )
    23762408printk("\n[%s] thread[%x,%x] exit for vpn %x / ppn %x / cycle %d\n",
    23772409__FUNCTION__ , this->process->pid, this->trdid , vpn , *ppn, cycle );
     2410#endif
     2411
     2412#if (DEBUG_VMM_GET_ONE_PPN & 2)
     2413if( DEBUG_VMM_GET_ONE_PPN < cycle )
     2414hal_vmm_display( XPTR( local_cxy , this->process ) , true );
    23782415#endif
    23792416
     
    24042441
    24052442#if DEBUG_VMM_HANDLE_PAGE_FAULT
    2406 if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) )
     2443if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) & (vpn > 0) )
    24072444printk("\n[%s] thread[%x,%x] enter for vpn %x / cycle %d\n",
    24082445__FUNCTION__, this->process->pid, this->trdid, vpn, start_cycle );
    24092446#endif
    24102447
    2411 #if (DEBUG_VMM_HANDLE_PAGE_FAULT & 1)
     2448#if (DEBUG_VMM_HANDLE_PAGE_FAULT & 2)
    24122449if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) )
    2413 hal_vmm_display( this->process , true );
     2450hal_vmm_display( XPTR( local_cxy , this->process ) , true );
    24142451#endif
    24152452
     
    25042541#if (CONFIG_INSTRUMENTATION_PGFAULTS || DEBUG_VMM_HANDLE_PAGE_FAULT)
    25052542uint32_t end_cycle = (uint32_t)hal_get_cycles();
    2506 uint32_t cost      = end_cycle - start_cycle;
    25072543#endif
    25082544
     
    25132549#endif
    25142550
     2551#if (DEBUG_VMM_HANDLE_PAGE_FAULT & 2)
     2552if( (end_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) )
     2553hal_vmm_display( XPTR( local_cxy , this->process ) , true );
     2554#endif
     2555
    25152556#if CONFIG_INSTRUMENTATION_PGFAULTS
     2557uint32_t cost      = end_cycle - start_cycle;
    25162558this->info.local_pgfault_nr++;
    25172559this->info.local_pgfault_cost += cost;
     
    25842626#if (CONFIG_INSTRUMENTATION_PGFAULTS || DEBUG_VMM_HANDLE_PAGE_FAULT)
    25852627uint32_t end_cycle = (uint32_t)hal_get_cycles();
    2586 uint32_t cost      = end_cycle - start_cycle;
    25872628#endif
    25882629
     
    25932634#endif
    25942635
     2636#if (DEBUG_VMM_HANDLE_PAGE_FAULT & 2)
     2637if( (end_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) )
     2638hal_vmm_display( XPTR( local_cxy , this->process ) , true );
     2639#endif
     2640
    25952641#if CONFIG_INSTRUMENTATION_PGFAULTS
     2642uint32_t cost      = end_cycle - start_cycle;
    25962643this->info.false_pgfault_nr++;
    25972644this->info.false_pgfault_cost += cost;
     
    26512698#if (CONFIG_INSTRUMENTATION_PGFAULTS || DEBUG_VMM_HANDLE_PAGE_FAULT)
    26522699uint32_t end_cycle = (uint32_t)hal_get_cycles();
    2653 uint32_t cost      = end_cycle - start_cycle;
    26542700#endif
    26552701
     
    26602706#endif
    26612707
     2708#if (DEBUG_VMM_HANDLE_PAGE_FAULT & 2)
     2709if( (end_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) )
     2710hal_vmm_display( XPTR( local_cxy , this->process ) , true );
     2711#endif
     2712
    26622713#if CONFIG_INSTRUMENTATION_PGFAULTS
     2714uint32_t cost      = end_cycle - start_cycle;
    26632715this->info.global_pgfault_nr++;
    26642716this->info.global_pgfault_cost += cost;
     
    26762728#if (CONFIG_INSTRUMENTATION_PGFAULTS || DEBUG_VMM_HANDLE_PAGE_FAULT)
    26772729uint32_t end_cycle = (uint32_t)hal_get_cycles();
    2678 uint32_t cost      = end_cycle - start_cycle;
    26792730#endif
    26802731
     
    26862737
    26872738#if CONFIG_INSTRUMENTATION_PGFAULTS
     2739uint32_t cost      = end_cycle - start_cycle;
    26882740this->info.false_pgfault_nr++;
    26892741this->info.false_pgfault_cost += cost;
     
    27202772#endif
    27212773
    2722 #if ((DEBUG_VMM_HANDLE_COW & 3) == 3 )
     2774#if (DEBUG_VMM_HANDLE_COW & 2)
    27232775hal_vmm_display( XPTR( local_cxy , process ) , true );
    27242776#endif
     
    29022954#endif
    29032955
    2904 #if ((DEBUG_VMM_HANDLE_COW & 3) == 3)
     2956#if (DEBUG_VMM_HANDLE_COW & 2)
    29052957hal_vmm_display( XPTR( local_cxy , process ) , true );
    29062958#endif
  • trunk/kernel/mm/vmm.h

    r651 r656  
    312312
    313313/*********************************************************************************************
    314  * This function removes from the VMM of a process descriptor identified by the <process>
    315  * argument the vseg identified by the <vseg> argument. 
    316  * It is called by the vmm_user_reset(), vmm_global_delete_vseg() and vmm_destroy() functions.
     314 * This function removes from the VMM of a local process descriptor, identified by
     315 * the <process> argument, the vseg identified by the <vseg> argument. 
     316 * It is called by the vmm_user_reset(), vmm_global_delete_vseg(), vmm_destroy() functions.
    317317 * It must be called by a local thread, running in the cluster containing the modified VMM.
    318318 * Use the RPC_VMM_REMOVE_VSEG if required.
     
    324324 *   . for ANON and REMOTE, the vseg is not released, but registered in local zombi_list.
    325325 *   . for STACK the vseg is released to the local stack allocator.
    326  *   . for all other types, the vseg is released to the local kmem.
     326 *   . for all other types, the vseg descriptor is released to the local kmem.
    327327 * Regarding the physical pages release:
    328328 *   . for KERNEL and FILE, the pages are not released to kmem.
    329  *   . for CODE and STACK, the pages are released to local kmem when they are not COW.
     329 *   . for CODE and STACK, the pages are released to local kmem.
    330330 *   . for DATA, ANON and REMOTE, the pages are released to relevant kmem only when
    331331 *     the local cluster is the reference cluster.
Note: See TracChangeset for help on using the changeset viewer.