Changeset 18 for trunk


Ignore:
Timestamp:
Jun 3, 2017, 4:42:49 PM (8 years ago)
Author:
max@…
Message:

cosmetic, and a few typos

Location:
trunk/kernel/mm
Files:
15 edited

Legend:

Unmodified
Added
Removed
  • trunk/kernel/mm/kcm.c

    r14 r18  
    11/*
    22 * kcm.c - Per cluster & per type Kernel Cache Manager access functions
    3  * 
     3 *
    44 * Author  Ghassan Almaless (2008,2009,2010,2011,2012)
    55 *         Alain Greiner    (2016)
     
    5050
    5151    // get first block available
    52         int32_t index = bitmap_ffs( page->bitmap , kcm->blocks_nr ); 
     52        int32_t index = bitmap_ffs( page->bitmap , kcm->blocks_nr );
    5353
    5454    assert( (index != -1) , __FUNCTION__ , "kcm page should not be full" );
    55  
     55
    5656    // allocate block
    5757        bitmap_clear( page->bitmap , index );
     
    8787{
    8888        kcm_page_t * page;
    89     uint32_t     index; 
    90  
     89    uint32_t     index;
     90
    9191        page = (kcm_page_t*)((intptr_t)ptr & CONFIG_PPM_PAGE_MASK);
    9292        index = ((uint8_t*)ptr - page->base) / kcm->block_size;
    93  
     93
    9494        bitmap_set( page->bitmap , index );
    9595        page->refcount --;
    96  
     96
    9797    // change the page to active if it was busy
    9898        if( page->busy )
     
    121121/////////////////////////////////////////////////////////////////////////////////////
    122122// This static function allocates one page from PPM. It initializes
    123 // the KCM-page descriptor, and introduces the new page into freelist. 
     123// the KCM-page descriptor, and introduces the new page into freelist.
    124124/////////////////////////////////////////////////////////////////////////////////////
    125125static error_t freelist_populate( kcm_t * kcm )
     
    134134    req.flags = AF_KERNEL;
    135135    page = kmem_alloc( &req );
    136  
     136
    137137        if( page == NULL )
    138138        {
    139                 printk("\n[ERROR] in %s : failed to allocate page in cluster %d\n", 
     139                printk("\n[ERROR] in %s : failed to allocate page in cluster %d\n",
    140140               __FUNCTION__ , local_cxy );
    141141        return ENOMEM;
     
    158158        list_add_first( &kcm->free_root , &ptr->list );
    159159        kcm->free_pages_nr ++;
    160  
     160
    161161        return 0;
    162162
     
    179179        }
    180180
    181     // get first KCM page from freelist and change its status to active 
     181    // get first KCM page from freelist and change its status to active
    182182        page = LIST_FIRST( &kcm->free_root, kcm_page_t , list );
    183183        list_unlink( &page->list );
     
    200200        spinlock_init( &kcm->lock );
    201201
    202     // initialize KCM type 
     202    // initialize KCM type
    203203        kcm->type = type;
    204204
     
    219219        kcm->blocks_nr  = blocks_nr;
    220220        kcm->block_size = block_size;
    221      
     221
    222222    kcm_dmsg("\n[INFO] %s : KCM %s initialised / block_size = %d / blocks_nr = %d\n",
    223223             __FUNCTION__ , kmem_type_str( type ) , block_size , blocks_nr );
     
    230230        kcm_page_t   * page;
    231231        list_entry_t * iter;
    232  
     232
    233233    // get KCM lock
    234234        spinlock_lock( &kcm->lock );
     
    274274    // get lock
    275275        spinlock_lock( &kcm->lock );
    276    
     276
    277277    // get an active page
    278278    if( list_is_empty( &kcm->active_root ) )  // no active page => get one
     
    303303    ptr  = kcm_get_block( kcm , page );
    304304
    305     // release lock 
     305    // release lock
    306306        spinlock_unlock(&kcm->lock);
    307307
     
    318318        kcm_page_t * page;
    319319        kcm_t      * kcm;
    320  
     320
    321321        if( ptr == NULL ) return;
    322        
     322
    323323        page = (kcm_page_t *)((intptr_t)ptr & CONFIG_PPM_PAGE_MASK);
    324324        kcm  = page->kcm;
     
    330330        kcm_put_block( kcm , ptr );
    331331
    332     // release lock 
     332    // release lock
    333333        spinlock_unlock( &kcm->lock );
    334334}
     
    338338{
    339339        printk("*** KCM type = %s / free_pages = %d / busy_pages = %d / active_pages = %d\n",
    340            kmem_type_str( kcm->type ) , 
    341            kcm->free_pages_nr , 
     340           kmem_type_str( kcm->type ) ,
     341           kcm->free_pages_nr ,
    342342           kcm->busy_pages_nr ,
    343343           kcm->active_pages_nr );
  • trunk/kernel/mm/kcm.h

    r7 r18  
    3939 * contain one single object.
    4040 * The various KCM allocators themselves are not statically allocated in the cluster
    41  * manager, but are dynamically allocated when required, using the embedded KCM 
     41 * manager, but are dynamically allocated when required, using the embedded KCM
    4242 * allocator defined in the cluster manager, to allocate the other ones...
    4343 ***************************************************************************************/
    4444
    45 typedef struct kcm_s                     
     45typedef struct kcm_s
    4646{
    4747        spinlock_t           lock;             /*! protect exclusive access to allocator   */
     
    5858
    5959    uint32_t             type;             /*! KCM type                                */
    60 } 
     60}
    6161kcm_t;
    6262
     
    7979        uint8_t         active;                /*! page active if non zero                 */
    8080        uint8_t         unused;                /*!                                         */
    81 } 
     81}
    8282kcm_page_t;
    8383
    8484/****************************************************************************************
    85  * This function initializes a generic Kernel Cache Manager. 
     85 * This function initializes a generic Kernel Cache Manager.
    8686 ****************************************************************************************
    8787 * @ kcm      : pointer on KCM manager to initialize.
     
    9292
    9393/****************************************************************************************
    94  * This function releases all memory allocated to a generic Kernel Cache Manager. 
     94 * This function releases all memory allocated to a generic Kernel Cache Manager.
    9595 ****************************************************************************************
    9696 * @ kcm      : pointer on KCM manager to destroy.
     
    9999
    100100/****************************************************************************************
    101  * This function allocates one single object in a Kernel Cache Manager 
     101 * This function allocates one single object in a Kernel Cache Manager
    102102 * The object size must be smaller than one page size.
    103103 ****************************************************************************************
  • trunk/kernel/mm/khm.c

    r14 r18  
    11/*
    22 * khm.c - kernel heap manager implementation.
    3  * 
     3 *
    44 * Authors  Ghassan Almaless (2008,2009,2010,2011,2012)
    55 *          Alain Greiner (2016)
     
    4444
    4545    // initialize lock
    46         spinlock_init( &khm->lock ); 
    47    
    48     //  compute kernel heap size
     46        spinlock_init( &khm->lock );
     47
     48    // compute kernel heap size
    4949    intptr_t heap_size = (1 << CONFIG_PPM_HEAP_ORDER) << CONFIG_PPM_PAGE_SHIFT;
    5050
    51     // get kernel heap base from PPM 
     51    // get kernel heap base from PPM
    5252    page_t * page      = ppm_alloc_pages( CONFIG_PPM_HEAP_ORDER );
    5353    void   * heap_base = ppm_page2base( page );
     
    6565
    6666/////////////////////////////////
    67 void * khm_alloc( khm_t    * khm, 
     67void * khm_alloc( khm_t    * khm,
    6868                  uint32_t   size )
    6969{
    70         khm_block_t  * current;       
     70        khm_block_t  * current;
    7171        khm_block_t  * next;
    7272        uint32_t       effective_size;
     
    7878    // get lock protecting heap
    7979        spinlock_lock( &khm->lock );
    80  
    81     // define a starting block to scan existing blocks 
     80
     81    // define a starting block to scan existing blocks
    8282    if( ((khm_block_t*)khm->next)->size < effective_size ) current = (khm_block_t*)khm->base;
    8383    else                                                   current = (khm_block_t*)khm->next;
    8484
    8585    // scan all existing blocks to find a large enough free block
    86         while( current->busy || (current->size < effective_size)) 
     86        while( current->busy || (current->size < effective_size))
    8787        {
    8888        // get next block pointer
    8989                current = (khm_block_t*)((char*)current + current->size);
    90    
     90
    9191                if( (intptr_t)current >= (khm->base + khm->size) )  // heap full
    9292                {
    9393                        spinlock_unlock(&khm->lock);
    9494
    95                         printk("\n[ERROR] in %s : failed to allocate block of size %d\n", 
     95                        printk("\n[ERROR] in %s : failed to allocate block of size %d\n",
    9696                               __FUNCTION__ , effective_size );
    9797                        return NULL;
     
    133133        khm_block_t * current;
    134134        khm_block_t * next;
    135  
     135
    136136        if(ptr == NULL) return;
    137  
     137
    138138        current = (khm_block_t *)((char*)ptr - sizeof(khm_block_t));
    139  
     139
    140140    // get lock protecting heap
    141141        spinlock_lock(&khm->lock);
    142142
    143     // release block 
     143    // release block
    144144        current->busy = 0;
    145  
     145
    146146    // try to merge released block with the next
    147147        while ( 1 )
    148         { 
     148        {
    149149        next = (khm_block_t*)((char*)current + current->size);
    150150                if ( ((intptr_t)next >= (khm->base + khm->size)) || (next->busy == 1) ) break;
     
    153153
    154154        if( (intptr_t)current < khm->next ) khm->next = (intptr_t)current;
    155  
     155
    156156    // release lock protecting heap
    157157        spinlock_unlock( &khm->lock );
  • trunk/kernel/mm/khm.h

    r14 r18  
    11/*
    22 * khm.h - kernel heap manager used for variable size memory allocation.
    3  * 
     3 *
    44 * Authors  Ghassan Almaless (2008,2009,2010,2011,2012)
    55 *          Mohamed Lamine Karaoui (2015)
     
    3333/*******************************************************************************************
    3434 * This structure defines a Kernel Heap Manager (KHM) in a given cluster.
    35  * It is used to allocate memory objects, that are not 
     35 * It is used to allocate memory objects, that are not
    3636 * enough replicated to justify a dedicated KCM allocator.
    3737 ******************************************************************************************/
     
    4848/*******************************************************************************************
    4949 * This structure defines an allocated block descriptor for the KHM.
    50  * This block descriptor is stored at the beginning of the allocated block. 
     50 * This block descriptor is stored at the beginning of the allocated block.
    5151 * The returned pointer is the allocated memory block base + block descriptor size.
    5252 ******************************************************************************************/
     
    5656        uint32_t   busy:1;         /*! free block if zero                                     */
    5757        uint32_t   size:31;        /*! size coded on 31 bits                                  */
    58 } 
     58}
    5959khm_block_t;
    6060
     
    6262/*******************************************************************************************
    6363 * This function initializes a KHM heap manager in a given cluster.
    64  * It is used to allocate variable size memory objects, that are not 
     64 * It is used to allocate variable size memory objects, that are not
    6565 * enough replicated to justify a dedicated KCM allocator.
    6666 *******************************************************************************************
     
    7272
    7373/*******************************************************************************************
    74  * This function allocates a memory block from the local KHM. 
     74 * This function allocates a memory block from the local KHM.
    7575 * The actual size of the allocated block is the requested size, plus the block descriptor
    7676 * size, rounded to a cache line size.
  • trunk/kernel/mm/kmem.c

    r14 r18  
    6363        if( kcm != NULL )
    6464        {
    65             if( index == kcm->type ) 
     65            if( index == kcm->type )
    6666            {
    6767                printk("     - KCM[%s] (at address %x) is OK\n",
    6868                       kmem_type_str( index ) , (intptr_t)kcm );
    6969            }
    70             else 
     70            else
    7171            {
    7272                printk("     - KCM[%s] (at address %x) is KO : has type %s\n",
    7373                       kmem_type_str( index ) , (intptr_t)kcm , kmem_type_str( kcm->type ) );
    74             }           
     74            }
    7575        }
    7676    }
     
    100100    else if( type == KMEM_SEM )           return sizeof( remote_sem_t );
    101101    else                                  return 0;
    102 } 
     102}
    103103
    104104/////////////////////////////////////
     
    121121    else if( type == KMEM_VFS_CTX )       return "KMEM_VFS_CTX";
    122122    else if( type == KMEM_VFS_INODE )     return "KMEM_VFS_INODE";
    123     else if( type == KMEM_VFS_DENTRY )    return "KMEM_VFS_DENTRY"; 
     123    else if( type == KMEM_VFS_DENTRY )    return "KMEM_VFS_DENTRY";
    124124    else if( type == KMEM_VFS_FILE )      return "KMEM_VFS_FILE";
    125125    else if( type == KMEM_SEM )           return "KMEM_SEM";
     
    153153    }
    154154
    155     // initializes the new KCM allocator 
     155    // initializes the new KCM allocator
    156156        kcm_init( kcm , type );
    157157
     
    168168}  // end kmem_create_kcm()
    169169
    170  
     170
    171171
    172172/////////////////////////////////////
     
    184184        size  = req->size;
    185185        flags = req->flags;
    186  
     186
    187187        assert( (type < KMEM_TYPES_NR) , __FUNCTION__ , "illegal KMEM request type" );
    188  
     188
    189189        kmem_dmsg("\n[INFO] %s : enters in cluster %x for type %s / size %d\n",
    190190                      __FUNCTION__ , local_cxy , kmem_type_str( type ) , size );
     
    192192    // analyse request type
    193193        if( type ==  KMEM_PAGE )                       // PPM allocator
    194     {       
     194    {
    195195        // allocate the number of requested pages
    196196                ptr = (void *)ppm_alloc_pages( size );
     
    198198        // reset page if required
    199199                if( flags & AF_ZERO ) page_zero( (page_t *)ptr );
    200    
     200
    201201        kmem_dmsg("\n[INFO] %s : exit in cluster %x for type %s / page = %x / base = %x\n",
    202                   __FUNCTION__, local_cxy , kmem_type_str( type ) , 
     202                  __FUNCTION__, local_cxy , kmem_type_str( type ) ,
    203203                  (intptr_t)ptr , (intptr_t)ppm_page2base( ptr ) );
    204204        }
     
    218218        // initialize the KCM allocator if not already done
    219219            if( cluster->kcm_tbl[type] == NULL )
    220             { 
     220            {
    221221            spinlock_lock( &cluster->kcm_lock );
    222222                        error_t error = kmem_create_kcm( type );
     
    225225            }
    226226
    227         // allocate memory from KCM 
     227        // allocate memory from KCM
    228228        ptr = kcm_alloc( cluster->kcm_tbl[type] );
    229229
     
    237237    if( ptr == NULL )
    238238    {
    239             printk("\n[ERROR] in %s : failed for type %d / size %d in cluster %x\n", 
     239            printk("\n[ERROR] in %s : failed for type %d / size %d in cluster %x\n",
    240240               __FUNCTION__ , type , size , local_cxy );
    241  
     241
    242242            return NULL;
    243243    }
     
    255255        hal_core_sleep();
    256256    }
    257  
     257
    258258        switch(req->type)
    259259        {
  • trunk/kernel/mm/kmem.h

    r7 r18  
    6262
    6363/*************************************************************************************
    64  * This defines the generic Allocation Flags that can be associated to 
     64 * This defines the generic Allocation Flags that can be associated to
    6565 * a Kernel Memory Request.
    6666 ************************************************************************************/
    6767
    6868#define AF_NONE       0x0000   // no attributes
    69 #define AF_KERNEL     0x0001   // for kernel use   
     69#define AF_KERNEL     0x0001   // for kernel use
    7070#define AF_ZERO       0x0002   // must be reset to 0
    7171
    7272/*************************************************************************************
    73  * This structure defines a Kernel Memory Request. 
     73 * This structure defines a Kernel Memory Request.
    7474 ************************************************************************************/
    7575
     
    8080    uint32_t      flags;  /*! request attributes                                    */
    8181    void        * ptr;    /*! local pointer on allocated buffer (only used by free) */
    82 } 
     82}
    8383kmem_req_t;
    8484
     
    124124
    125125/*************************************************************************************
    126  * This functions display the content of the KCM pointers Table
     126 * This function displays the content of the KCM pointers Table
    127127 ************************************************************************************/
    128128void kmem_print_kcm_table();
  • trunk/kernel/mm/mapper.c

    r14 r18  
    105105        page = (page_t *)grdxt_get_first( &mapper->radix , start_index , &found_index );
    106106
    107         if( page != NULL ) 
     107        if( page != NULL )
    108108        {
    109109            // remove page from mapper and release to PPM
     
    127127
    128128    return 0;
    129  
     129
    130130}  // end mapper_destroy()
    131131
     
    147147    page = (page_t *)grdxt_lookup( &mapper->radix , index );
    148148
    149     // test if page available in mapper 
     149    // test if page available in mapper
    150150    if( ( page == NULL) || page_is_flag( page , PG_INLOAD ) )  // page not available            /
    151151    {
     
    167167            req.flags = AF_NONE;
    168168            page = kmem_alloc( &req );
    169      
     169
    170170            if( page == NULL )
    171171            {
     
    190190            rwlock_wr_unlock( &mapper->lock );
    191191
    192             if( error ) 
     192            if( error )
    193193            {
    194194                printk("\n[ERROR] in %s : thread %x cannot insert page in mapper\n",
     
    201201                return NULL;
    202202            }
    203        
     203
    204204            // launch I/O operation to load page from file system
    205205            error = mapper_updt_page( mapper , index , page );
     
    236236                if(  page_is_flag( page , PG_INLOAD ) ) break;
    237237
    238                 // deschedule 
     238                // deschedule
    239239                sched_yield();
    240240            }
    241          
     241
    242242        }
    243243
    244244        return page;
    245245    }
    246     else 
     246    else
    247247    {
    248248         // release lock from READ_MODE
     
    268268        return EIO;
    269269    }
    270        
     270
    271271    // take mapper lock in WRITE_MODE
    272272    rwlock_wr_lock( &mapper->lock );
     
    331331    rwlock_wr_unlock( &mapper->lock );
    332332
    333     // release page lock 
     333    // release page lock
    334334    page_unlock( page );
    335335
     
    339339        return EIO;
    340340    }
    341        
     341
    342342    return 0;
    343343}  // end mapper_updt_page
     
    364364    }
    365365
    366         if( page_is_flag( page , PG_DIRTY ) ) 
     366        if( page_is_flag( page , PG_DIRTY ) )
    367367        {
    368368        // get file system type and inode pointer
     
    388388        rwlock_rd_unlock( &mapper->lock );
    389389
    390         // release page lock 
     390        // release page lock
    391391        page_unlock( page );
    392392
     
    396396            return EIO;
    397397        }
    398    
     398
    399399        // clear dirty bit
    400400                page_undo_dirty( page );
    401401     }
    402        
     402
    403403    return 0;
    404404
     
    408408// This static function is called by the mapper_move fragments() function.
    409409// It moves one fragment between an user buffer and the kernel mapper.
    410 // Implementation Note: It can require access to one or two pages in mapper: 
     410// Implementation Note: It can require access to one or two pages in mapper:
    411411//  [max_page_index == min_page_index]     <=>  fragment fit in one mapper page
    412412//  [max_page index == min_page_index + 1] <=>  fragment spread on two mapper pages
     
    416416                                         fragment_t * fragment )
    417417{
    418     uint32_t   size;                 // number of bytes in fragment 
     418    uint32_t   size;                 // number of bytes in fragment
    419419    cxy_t      buf_cxy;              // cluster identifier for user buffer
    420420    uint8_t  * buf_ptr;              // local pointer on first byte in user buffer
    421    
     421
    422422    xptr_t     xp_buf;               // extended pointer on byte in user buffer
    423423    xptr_t     xp_map;               // extended pointer on byte in kernel mapper
    424424
    425     uint32_t   min_file_offset;      // offset of first byte in file 
    426     uint32_t   max_file_offset;      // offset of last byte in file   
     425    uint32_t   min_file_offset;      // offset of first byte in file
     426    uint32_t   max_file_offset;      // offset of last byte in file
    427427
    428428    uint32_t   first_page_index;     // index of first page in mapper
    429     uint32_t   first_page_offset;    // offset of first byte in first page in mapper   
    430     uint32_t   first_page_size;      // offset of first byte in first page in mapper   
    431 
    432     uint32_t   second_page_index;    // index of last page in mapper   
    433     uint32_t   second_page_offset;   // offset of last byte in last page in mapper   
    434     uint32_t   second_page_size;     // offset of last byte in last page in mapper   
    435  
     429    uint32_t   first_page_offset;    // offset of first byte in first page in mapper
     430    uint32_t   first_page_size;      // offset of first byte in first page in mapper
     431
     432    uint32_t   second_page_index;    // index of last page in mapper
     433    uint32_t   second_page_offset;   // offset of last byte in last page in mapper
     434    uint32_t   second_page_size;     // offset of last byte in last page in mapper
     435
    436436    page_t   * page;                 // pointer on one page descriptor in mapper
    437437    uint8_t  * map_ptr;              // local pointer on first byte in mapper
     
    448448        return EINVAL;
    449449    }
    450    
     450
    451451    // compute offsets of first and last bytes in file
    452452    min_file_offset = fragment->file_offset;
     
    459459    if ( first_page_index == second_page_index )  // only one page in mapper
    460460    {
    461         // compute offset and size for page in mapper 
     461        // compute offset and size for page in mapper
    462462        first_page_offset = min_file_offset & (1<<CONFIG_PPM_PAGE_SHIFT);
    463463        first_page_size   = size;
     
    499499        // compute local pointer on first byte in first page in mapper
    500500        map_ptr = (uint8_t *)ppm_page2base(page) + first_page_offset;
    501    
     501
    502502        // compute extended pointers
    503503        xp_map = XPTR( local_cxy , map_ptr );
     
    517517        // compute offset and size for second page in mapper
    518518        second_page_offset = 0;
    519         second_page_size   = size - first_page_size; 
     519        second_page_size   = size - first_page_size;
    520520
    521521        // get pointer on second page in mapper
     
    526526        // compute local pointer on first byte in second page in mapper
    527527        map_ptr = (uint8_t *)ppm_page2base( page ) + second_page_offset;
    528    
     528
    529529        // compute extended pointers
    530530        xp_map = XPTR( local_cxy , map_ptr );
     
    573573        frags_array = client_frags;
    574574    }
    575     else                           // make a local copy of fragments array 
     575    else                           // make a local copy of fragments array
    576576    {
    577577        hal_remote_memcpy( XPTR( local_cxy , local_frags ) , xp_frags ,
  • trunk/kernel/mm/mapper.h

    r1 r18  
    3838
    3939/*******************************************************************************************
    40  * The mapper implement the kernel cache for a given file or directory.
     40 * The mapper implements the kernel cache for a given file or directory.
    4141 * There is one mapper per file. It is implemented as a three levels radix tree,
    4242 * entirely stored in the same cluster as the inode representing the file/dir.
    43  * - The fast retrieval key is the page index in the file. 
     43 * - The fast retrieval key is the page index in the file.
    4444 *   The ix1_width, ix2_width, ix3_width sub-indexes are configuration parameters.
    4545 * - The leaves are pointers on physical page descriptors, dynamically allocated
     
    5252 *   used to move pages to or from the relevant file system on IOC device.
    5353 * - the mapper_move fragments() function is used to move data to or from a distributed
    54  *   user buffer. 
     54 *   user buffer.
    5555 * - The mapper_get_page() function that return a page descriptor pointer from a page
    5656 *   index in file is in charge of handling the miss on the mapper cache.
     
    8080 * and an user buffer, that can be split in several distributed physical pages located
    8181 * in different clusters. A fragment is a set of contiguous bytes in the file.
    82  * - It can be stored in one single physical page in the user buffer. 
     82 * - It can be stored in one single physical page in the user buffer.
    8383 * - It can spread two successive physical pages in the kernel mapper.
    8484 ******************************************************************************************/
     
    9494
    9595/*******************************************************************************************
    96  * This function allocates physical memory for a mapper descriptor, and inititalizes it
     96 * This function allocates physical memory for a mapper descriptor, and initializes it
    9797 * (refcount <= 0) / inode <= NULL).
    9898 * It must be executed by a thread running in the cluster containing the mapper.
     
    105105 * This function releases all physical pages allocated for the mapper.
    106106 * It synchronizes all dirty pages (i.e. update the file on disk) if required.
    107  * The mapper descriptor and the radix three themselve are released.
     107 * The mapper descriptor and the radix tree themselves are released.
    108108 * It must be executed by a thread running in the cluster containing the mapper.
    109109 *******************************************************************************************
     
    114114
    115115/*******************************************************************************************
    116  * This function moves all fragments covering a distributed user buffer between 
     116 * This function moves all fragments covering a distributed user buffer between
    117117 * a mapper (associated to a local inode), and the user buffer.
    118  * [See the fragment definition in the mapper.h file] 
     118 * [See the fragment definition in the mapper.h file]
    119119 * It must be executed by a thread running in the cluster containing the mapper.
    120120 * The lock protecting the mapper must have been taken in WRITE_MODE or READ_MODE
    121121 * by the caller thread, depending on the transfer direction.
    122122 * In case of write, the dirty bit is set for all pages written in the mapper.
    123  * The offset in the file descriptor is not modified by this function. 
     123 * The offset in the file descriptor is not modified by this function.
    124124 * Implementation note:
    125125 * For each fragment, this function makes ONE hal_remote_memcpy() when the fragment is
     
    155155
    156156/*******************************************************************************************
    157  * This function search a physical page descriptor from its index in mapper.
     157 * This function searches a physical page descriptor from its index in mapper.
    158158 * It must be executed by a thread running in the cluster containing the mapper.
    159159 * In case of miss, it takes the mapper lock in WRITE_MODE, load the missing
    160  * page from device to the mapper, and release the mapper lock. 
     160 * page from device to the mapper, and release the mapper lock.
    161161 *******************************************************************************************
    162162 * @ mapper     : local pointer on the mapper.
     
    174174 * @ mapper     : local pointer on the mapper.
    175175 * @ index      : page index in file.
    176  * @ page   : local pointer on the page descriptor in mapper. 
     176 * @ page   : local pointer on the page descriptor in mapper.
    177177 * @ returns 0 if success / return EINVAL if it cannot access the device.
    178178 ******************************************************************************************/
    179179error_t mapper_updt_page( mapper_t      * mapper,
    180180                          uint32_t        index,
    181                           struct page_s * page );       
    182              
     181                          struct page_s * page );
     182
    183183/*******************************************************************************************
    184184 * This function makes an I/0 operation to move one page from mapper to FS.
     
    191191 * @ mapper     : local pointer on the mapper.
    192192 * @ index      : page index in file.
    193  * @ page   : local pointer on the page descriptor in mapper. 
     193 * @ page   : local pointer on the page descriptor in mapper.
    194194 * @ returns 0 if success / return EINVAL if it cannot access the device.
    195195 ******************************************************************************************/
    196196error_t mapper_sync_page( mapper_t      * mapper,
    197197                          uint32_t        index,
    198                           struct page_s * page );       
     198                          struct page_s * page );
    199199
    200200#endif /* _MAPPER_H_ */
  • trunk/kernel/mm/page.c

    r7 r18  
    8989        // set dirty flag in page descriptor
    9090                page_set_flag( page , PG_DIRTY );
    91  
     91
    9292        // register page in PPM dirty list
    9393                list_add_first( &ppm->dirty_root , &page->list );
     
    9797    // unlock the PPM dirty_list
    9898        spinlock_unlock( &ppm->dirty_lock );
    99  
     99
    100100        return done;
    101101}
     
    128128
    129129/////////////////////
    130 void sync_all_pages() 
     130void sync_all_pages()
    131131{
    132132        page_t   * page;
     
    138138        spinlock_lock( &ppm->dirty_lock );
    139139
    140         while( !list_is_empty( &ppm->dirty_root ) ) 
     140        while( !list_is_empty( &ppm->dirty_root ) )
    141141        {
    142142                page = LIST_FIRST( &ppm->dirty_root ,  page_t , list );
     
    147147                mapper = page->mapper;
    148148        index  = page->index;
    149  
     149
    150150        // lock the page
    151151                page_lock( page );
     
    203203    // take the spinlock protecting the PG_LOCKED flag
    204204        spinlock_lock( &page->lock );
    205  
     205
    206206    // check the page waiting list
    207207        bool_t is_empty = xlist_is_empty( XPTR( local_cxy , &page->wait_root ) );
     
    210210    {
    211211        // get an extended pointer on the first waiting thread
    212         xptr_t root_xp   = XPTR( local_cxy , &page->wait_root ); 
     212        xptr_t root_xp   = XPTR( local_cxy , &page->wait_root );
    213213        xptr_t thread_xp = XLIST_FIRST_ELEMENT( root_xp , thread_t , wait_list );
    214214
     
    244244        void    * src_base;
    245245        void    * dst_base;
    246  
     246
    247247        if( dst->order != src->order )
    248248    {
     
    272272//        size , (uint32_t)base , (uint32_t)(&LOCAL_CLUSTER->kcm_tbl[0] ) );
    273273
    274         memset( base , 0 , size ); 
     274        memset( base , 0 , size );
    275275
    276276// kmem_print_kcm_table();
     
    282282{
    283283        printk("*** Page %d : base = %x / flags = %x / order = %d / count = %d\n",
    284                 page->index, 
     284                page->index,
    285285                ppm_page2base( page ),
    286                 page->flags, 
    287                 page->order, 
     286                page->flags,
     287                page->order,
    288288                page->refcount );
    289289}
  • trunk/kernel/mm/page.h

    r14 r18  
    3636
    3737struct mapper_s;
    38  
     38
    3939/*************************************************************************************
    4040 * This  defines the flags that can be attached to a physical page.
     
    4444#define PG_RESERVED         0x0002     // cannot be allocated by PPM
    4545#define PG_FREE             0x0004     // page can be allocated by PPM
    46 #define PG_INLOAD           0x0008     // on-going load from disk 
     46#define PG_INLOAD           0x0008     // on-going load from disk
    4747#define PG_IO_ERR           0x0010     // mapper signals a read/write access error
    4848#define PG_BUFFER           0x0020     // used in blockio.c
     
    6464    struct mapper_s * mapper;         /*! local pointer on associated mapper   (4)  */
    6565    uint32_t          index;          /*! page index in mapper                 (4)  */
    66  
     66
    6767        union                             /*!                                      (4)  */
    6868        {
     
    7272        };
    7373
    74         list_entry_t      list;           /*! for both dirty pages and free pages  (8)  */ 
     74        list_entry_t      list;           /*! for both dirty pages and free pages  (8)  */
    7575
    7676    xlist_entry_t     wait_root;      /*! root of list of waiting threads      (16) */
     
    9090 * This function set one or several flags in page descriptor flags.
    9191 * @ page    : pointer to page descriptor.
    92  * @ value   : all non zero bits in value will be set. 
     92 * @ value   : all non zero bits in value will be set.
    9393 ************************************************************************************/
    9494inline void page_set_flag( page_t   * page,
     
    9898 * This function reset one or several flags in page descriptor flags.
    9999 * @ page    : pointer to page descriptor.
    100  * @ value   : all non zero bits in value will be cleared. 
     100 * @ value   : all non zero bits in value will be cleared.
    101101 ************************************************************************************/
    102102inline void page_clear_flag( page_t   * page,
     
    107107 * @ page    : pointer to page descriptor.
    108108 * @ value   : all non zero bits will be tested.
    109  * @ returns true if at least one non zero bit in value is set / false otherwise. 
     109 * @ returns true if at least one non zero bit in value is set / false otherwise.
    110110 ************************************************************************************/
    111111inline bool_t page_is_flag( page_t   * page,
     
    114114/*************************************************************************************
    115115 * This function synchronizes (i.e. update the disk) all dirty pages in a cluster.
    116  * It scan the PPM dirty list, that should be empty when this operation is completed. 
     116 * It scan the PPM dirty list, that should be empty when this operation is completed.
    117117 ************************************************************************************/
    118118void sync_all_pages();
     
    130130 * and remove the page from the dirty list in PPM.
    131131 * @ page     : pointer on page descriptor.
    132  * @ returns true if page was dirty / returns false if page was not dirty 
     132 * @ returns true if page was dirty / returns false if page was not dirty
    133133 ************************************************************************************/
    134134bool_t page_undo_dirty( page_t * page );
     
    143143
    144144/*************************************************************************************
    145  * This function reset to 0 all bytes in a given page. 
     145 * This function reset to 0 all bytes in a given page.
    146146 * @ page     : pointer on page descriptor.
    147147 ************************************************************************************/
     
    151151 * This blocking function set the PG_LOCKED flag on the page.
    152152 * It deschedule if the page has already been locked by another thread,
    153  * and returns only when the flag has been successfully set. 
     153 * and returns only when the flag has been successfully set.
    154154 * @ page     : pointer on page descriptor.
    155155 ************************************************************************************/
  • trunk/kernel/mm/ppm.c

    r14 r18  
    5656
    5757////////////////////////////////////////////
    58 inline page_t * ppm_base2page( void * base ) 
     58inline page_t * ppm_base2page( void * base )
    5959{
    6060        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
     
    105105    // search the buddy page descriptor
    106106    // - merge with current page descriptor if found
    107     // - exit to release the current page descriptor if not found 
    108     current       = page , 
     107    // - exit to release the current page descriptor if not found
     108    current       = page ,
    109109    current_index = (uint32_t)(page - ppm->pages_tbl);
    110         for( current_order = page->order ; 
     110        for( current_order = page->order ;
    111111         current_order < CONFIG_PPM_MAX_ORDER ;
    112112         current_order++ )
     
    114114                buddy_index = current_index ^ (1 << current_order);
    115115                buddy       = pages_tbl + buddy_index;
    116    
     116
    117117                if( !page_is_flag( buddy , PG_FREE ) || (buddy->order != current_order) ) break;
    118118
    119         // remove buddy from free list 
     119        // remove buddy from free list
    120120                list_unlink( &buddy->list );
    121121                ppm->free_pages_nr[current_order] --;
    122122        ppm->total_free_pages -= (1 << current_order);
    123    
     123
    124124        // merge buddy with current
    125125                buddy->order = 0;
    126126                current_index &= buddy_index;
    127127        }
    128  
     128
    129129    // update merged page descriptor order
    130130        current        = pages_tbl + current_index;
     
    162162    list_root_init( &ppm->dirty_root );
    163163
    164     // initialize pointer on page descriptors array 
     164    // initialize pointer on page descriptors array
    165165        ppm->pages_tbl = (page_t*)( pages_offset << CONFIG_PPM_PAGE_SHIFT );
    166166
     
    174174        uint32_t reserved_pages = pages_offset + pages_array;
    175175
    176         // set pages numbers 
     176        // set pages numbers
    177177        ppm->pages_nr      = pages_nr;
    178178    ppm->pages_offset  = reserved_pages;
     
    180180    // initialises all page descriptors in pages_tbl[]
    181181        for( i = 0 ; i < pages_nr ; i++ )
    182     { 
     182    {
    183183        page_init( &ppm->pages_tbl[i] );
    184184
     
    186186        // complete the initialisation when page is allocated [AG]
    187187        // ppm->pages_tbl[i].flags = 0;
    188     } 
    189 
    190     // - set PG_RESERVED flag for reserved pages (kernel code & pages_tbl[]) 
     188    }
     189
     190    // - set PG_RESERVED flag for reserved pages (kernel code & pages_tbl[])
    191191    // - release all other pages to populate the free lists
    192         for( i = 0 ; i < reserved_pages ; i++) 
     192        for( i = 0 ; i < reserved_pages ; i++)
    193193    {
    194194        page_set_flag( &ppm->pages_tbl[i] , PG_RESERVED );
     
    250250        return NULL;
    251251    }
    252  
    253     // update free-lists after removing a block 
     252
     253    // update free-lists after removing a block
    254254        ppm->total_free_pages -= (1 << current_order);
    255         ppm->free_pages_nr[current_order] --; 
     255        ppm->free_pages_nr[current_order] --;
    256256        current_size = (1 << current_order);
    257257
     
    262262                current_order --;
    263263                current_size >>= 1;
    264    
     264
    265265                remaining_block = block + current_size;
    266266                remaining_block->order = current_order;
     
    270270        ppm->total_free_pages += (1 << current_order);
    271271        }
    272  
     272
    273273    // update page descriptor
    274274    page_clear_flag( block , PG_FREE );
     
    278278    // release lock protecting free lists
    279279        spinlock_unlock( &ppm->free_lock );
    280  
     280
    281281    ppm_dmsg("\n[INFO] %s : base = %x / order = %d\n",
    282282             __FUNCTION__ , (uint32_t)ppm_page2base( block ) , order );
     
    294294{
    295295        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
    296  
     296
    297297    // get lock protecting free_pages[] array
    298298        spinlock_lock( &ppm->free_lock );
    299299
    300         ppm_free_pages_nolock( page ); 
     300        ppm_free_pages_nolock( page );
    301301
    302302    // release lock protecting free_pages[] array
     
    316316
    317317        printk("\n***  PPM state in cluster %x %s : pages = %d / offset = %d / free = %d ***\n",
    318                local_cxy , string , ppm->pages_nr , ppm->pages_offset , ppm->total_free_pages ); 
    319        
     318               local_cxy , string , ppm->pages_nr , ppm->pages_offset , ppm->total_free_pages );
     319
    320320        for( order = 0 ; order < CONFIG_PPM_MAX_ORDER ; order++ )
    321321        {
    322322                printk("- order = %d / free_pages = %d  [",
    323323               order , ppm->free_pages_nr[order] );
    324                
     324
    325325                LIST_FOREACH( &ppm->free_pages_root[order] , iter )
    326326                {
     
    328328                        printk("%d," , page - ppm->pages_tbl );
    329329                }
    330    
     330
    331331                printk("]\n", NULL );
    332332        }
     
    336336
    337337}  // end ppm_print()
    338  
     338
    339339//////////////////////////u/////////
    340340void ppm_assert_order( ppm_t * ppm )
     
    343343        list_entry_t * iter;
    344344        page_t       * page;
    345        
     345
    346346        for(order=0; order < CONFIG_PPM_MAX_ORDER; order++)
    347347        {
    348348                if( list_is_empty( &ppm->free_pages_root[order] ) ) continue;
    349                
     349
    350350                LIST_FOREACH( &ppm->free_pages_root[order] , iter )
    351351                {
  • trunk/kernel/mm/ppm.h

    r7 r18  
    11/*
    22 * ppm.h - Per-cluster Physical Pages Manager Interface
    3  * 
     3 *
    44 * Authors  Ghassan Almaless (2008,2009,2010,2011,2012)
    55 *          Alain Greiner    (2016)
     
    3939 * The segments kcode and kdata are mapped in the first "offset" pages.
    4040 * The physical page descriptors array is implemented just after this offset zone.
    41  * The main service provided by the PMM is the dynamic allocation of physical pages. 
    42  * This low-level allocator implements the buddy algorithm: an allocated block is 
     41 * The main service provided by the PMM is the dynamic allocation of physical pages.
     42 * This low-level allocator implements the buddy algorithm: an allocated block is
    4343 * is an integer number n of 4 Kbytes pages, and n (called order) is a power of 2.
    4444 ****************************************************************************************/
     
    6262 * This function initializes a PPM (Physical Pages Manager) in a cluster.
    6363 * The physical memory base address in all clusters is zero.
    64  * The physical memory size is NOT constrained to be smaller than 4 Gbytes. 
     64 * The physical memory size is NOT constrained to be smaller than 4 Gbytes.
    6565 *****************************************************************************************
    6666 * @ ppm          : pointer on physical pages manager.
     
    6868 * @ pages_offset : number of pages already allocated in this physical memory.
    6969 ****************************************************************************************/
    70 void ppm_init( ppm_t    * ppm, 
     70void ppm_init( ppm_t    * ppm,
    7171               uint32_t   pages_nr,
    7272                   uint32_t   pages_offset );
     
    7575 * This is the low-level physical pages allocation function.
    7676 * It allocates N contiguous physical pages. N is a power of 2.
    77  * In normal use, you don't need to call it directly, as the recommanded way to get
     77 * In normal use, you don't need to call it directly, as the recommended way to get
    7878 * physical pages is to call the generic allocator defined in kmem.h.
    7979 *****************************************************************************************
     
    8585/*****************************************************************************************
    8686 * This is the low-level physical pages release function.
    87  * In normal use, you do not need to call it directly, as the recommanded way to free
     87 * In normal use, you do not need to call it directly, as the recommended way to free
    8888 * physical pages is to call the generic allocator defined in kmem.h.
    8989 *****************************************************************************************
  • trunk/kernel/mm/vmm.h

    r1 r18  
    55 *           Mohamed Lamine Karaoui (2015)
    66 *           Alain Greiner (2016)
    7  * 
     7 *
    88 * Copyright (c) UPMC Sorbonne Universites
    99 *
     
    5050 * The slot index can be computed form the slot base address, and reversely.
    5151 * All allocation / release operations are registered in the stack_bitmap, that completely
    52  * define the STACK zone state. 
     52 * define the STACK zone state.
    5353 * In this implementation, the max number of slots is 32.
    5454 ********************************************************************************************/
     
    6565 * This structure defines the MMAP allocator used by the VMM to dynamically allocate
    6666 * MMAP vsegs requested or released by an user process.
    67  * This allocator sould be only used in the reference cluster.
    68  * - allocation policy : all allocated vsegs occupy an integer number of pages that is 
     67 * This allocator should be only used in the reference cluster.
     68 * - allocation policy : all allocated vsegs occupy an integer number of pages that is
    6969 *   power of 2, and are aligned on a page boundary. The requested number of pages is
    70  *   rounded if reqired. The first_free_vpn variable defines completely the MMAP zone state.
     70 *   rounded if required. The first_free_vpn variable defines completely the MMAP zone state.
    7171 *   It is never decremented, as the released vsegs are simply registered in a zombi_list.
    72  *   The relevant zombi_list is checked first for each allocation request. 
     72 *   The relevant zombi_list is checked first for each allocation request.
    7373 * - release policy : a released MMAP vseg is registered in an array of zombi_lists.
    7474 *   This array is indexed by ln(number of pages), and each entry contains the root of
     
    9595 *    and in the associated radix-tree.
    9696 * 2) It allocates virtual memory space for the STACKS and MMAP vsegs,
    97  *    using dedicated allocators. 
     97 *    using dedicated allocators.
    9898 * 3) It contains the local copy of the generic page table descriptor.
    9999 ********************************************************************************************/
     
    136136typedef struct mmap_attr_s
    137137{
    138         void     * addr;            /*! requested virtual address (unused : should be NULL)     */ 
     138        void     * addr;            /*! requested virtual address (unused : should be NULL)     */
    139139        uint32_t   length;          /*! requested vseg size (bytes)                             */
    140140        uint32_t   prot;            /*! access modes                                            */
     
    159159/*********************************************************************************************
    160160 * This function removes all vsegs registered in in a virtual memory manager,
    161  * and releases the memory allocated to the local generic page table. 
     161 * and releases the memory allocated to the local generic page table.
    162162 *********************************************************************************************
    163163 * @ vmm   : pointer on process descriptor.
     
    166166
    167167/*********************************************************************************************
    168  * This function scan the list of vsegs registered in the VMM of a given process descriptor
     168 * This function scans the list of vsegs registered in the VMM of a given process descriptor
    169169 * to check if a given virtual region (defined by a base and size) overlap an existing vseg.
    170170 *********************************************************************************************
     
    179179
    180180/*********************************************************************************************
    181  * This function allocates memory for a vseg descriptor, initialises it, and register it 
     181 * This function allocates memory for a vseg descriptor, initialises it, and register it
    182182 * in the VMM of the process. It checks the collision with pre-existing vsegs in VMM.
    183183 * For STACK and MMAP types vseg, it does not use the base argument, but uses the VMM STACK
    184184 * and MMAP specific allocators to get a base address in virtual space.
    185  * To comply with the "on-demand" paging policy, this function does NOT mofify the
     185 * To comply with the "on-demand" paging policy, this function does NOT modify the
    186186 * page table, and does not allocate physical memory for vseg data.
    187187 *********************************************************************************************
     
    193193 ********************************************************************************************/
    194194vseg_t * vmm_create_vseg( struct process_s * process,
    195                           intptr_t           base, 
    196                               intptr_t           size, 
     195                          intptr_t           base,
     196                              intptr_t           size,
    197197                              uint32_t           type );
    198198
    199199/*********************************************************************************************
    200  * Ths function removes a vseg identified by it's pointer from the VMM of the calling process.
     200 * This function removes a vseg identified by it's pointer from the VMM of the calling process.
    201201 * - If the vseg has not the STACK or MMAP type, it is removed from the vsegs list,
    202202 *   and the physical memory allocated to vseg descriptor is released to KMEM.
     
    213213
    214214/*********************************************************************************************
    215  * This function allocates physical memory from the local cluster to map all PTEs 
     215 * This function allocates physical memory from the local cluster to map all PTEs
    216216 * of a "kernel" vseg (type KCODE , KDATA, or KDEV) in the page table of process_zero.
    217  * It should not be used for other vseg types, because "user" vsegs use the 
     217 * It should not be used for other vseg types, because "user" vsegs use the
    218218 * "on-demand-paging" policy.
    219219 *********************************************************************************************
     
    226226
    227227/*********************************************************************************************
    228  * This function unmap all PTEs of a given vseg, in the generic page table asociated
     228 * This function unmaps all PTEs of a given vseg, in the generic page table associated
    229229 * to a given process descriptor, and releases the corresponding physical memory.
    230230 * It can be used for any type of vseg.
     
    237237
    238238/*********************************************************************************************
    239  * This function remove a given region (defined by a base address and a size) from
     239 * This function removes a given region (defined by a base address and a size) from
    240240 * the VMM of a given process descriptor. This can modify several vsegs:
    241241 * (a) if the region is not entirely mapped in an existing vseg, it's an error.
     
    254254
    255255/*********************************************************************************************
    256  * This function search if a given virtual address is contained in a vseg registered in
     256 * This function searches if a given virtual address is contained in a vseg registered in
    257257 * the local process VMM and returns the vseg pointer if success.
    258258 *********************************************************************************************
     
    265265
    266266/*********************************************************************************************
    267  * This function is called by the architecture specific exception handler when a 
    268  * page fault has been detected in a given cluster. 
     267 * This function is called by the architecture specific exception handler when a
     268 * page fault has been detected in a given cluster.
    269269 * If the local cluster is not the reference cluster, it send a RPC_VMM_GET_PTE
    270270 * to the reference cluster to get the missing PTE attributes and PPN, and update
     
    279279error_t vmm_handle_page_fault( struct process_s * process,
    280280                               vseg_t           * vseg,
    281                                vpn_t              vpn ); 
     281                               vpn_t              vpn );
    282282
    283283/*********************************************************************************************
    284284 * This function returns in the "attr" and "ppn" arguments the PTE associated to a given
    285  * VPN for a given process. This function must be called on the reference cluster. 
     285 * VPN for a given process. This function must be called on the reference cluster.
    286286 * To get the PTE from another cluster, use the RPC_VMM_GET_PTE.
    287287 * The vseg containing the searched VPN should be registered in the reference VMM.
    288288 * If the PTE in the reference page table is unmapped, this function allocates the missing
    289289 * physical page from the target cluster defined by the vseg type, and update the reference
    290  * page table. It can call a RPC_PMEM_GET_PAGES to get the missing physical page, 
     290 * page table. It can call a RPC_PMEM_GET_PAGES to get the missing physical page,
    291291 * if the target cluster is not the reference cluster.
    292292 *********************************************************************************************
     
    303303
    304304/*********************************************************************************************
    305  * This function makes the virtual to physical address translation, using the calling 
    306  * process page table. It uses identity mapping if required by the ident flag. 
     305 * This function makes the virtual to physical address translation, using the calling
     306 * process page table. It uses identity mapping if required by the ident flag.
    307307 * This address translation is required to configure the devices
    308  * that have a DMA capability, or to implement the software L2/L3 cache cohérence, 
     308 * that have a DMA capability, or to implement the software L2/L3 cache cohérence,
    309309 * using the MMC device synchronisation primitives.
    310310 * WARNING : the <ident> value must be defined by the CONFIG_KERNEL_IDENT parameter.
     
    333333/*********************************************************************************************
    334334 ********************************************************************************************/
    335 int sys_sbrk( uint32_t current_heap_ptr, 
     335int sys_sbrk( uint32_t current_heap_ptr,
    336336              uint32_t size );
    337337
    338338/*********************************************************************************************
    339339 ********************************************************************************************/
    340 error_t vmm_sbrk( vmm_t   * vmm, 
     340error_t vmm_sbrk( vmm_t   * vmm,
    341341                  uint32_t  current,
    342342                  uint32_t  size );
     
    345345 ********************************************************************************************/
    346346error_t vmm_madvise_migrate( vmm_t    * vmm,
    347                              uint32_t   start, 
     347                             uint32_t   start,
    348348                             uint32_t   len );
    349349
     
    361361
    362362/*********************************************************************************************
    363  * Hypothesis: the region is shared-anon, mapper list is rdlocked, page is locked 
     363 * Hypothesis: the region is shared-anon, mapper list is rdlocked, page is locked
    364364 ********************************************************************************************/
    365365error_t vmm_broadcast_inval( vseg_t * region,
     
    368368
    369369/*********************************************************************************************
    370  * Hypothesis: the region is shared-anon, mapper list is rdlocked, page is locked 
     370 * Hypothesis: the region is shared-anon, mapper list is rdlocked, page is locked
    371371 ********************************************************************************************/
    372372error_t vmm_migrate_shared_page_seq( vseg_t * region,
  • trunk/kernel/mm/vseg.c

    r1 r18  
    4343
    4444////////////////////////////////////////////////////////////////////////////////////////
    45 //   global variables for display / must be consistant with enum in "vseg.h"
     45//   global variables for display / must be consistent with enum in "vseg.h"
    4646////////////////////////////////////////////////////////////////////////////////////////
    4747
     
    8585///////////////////////////////////
    8686void vseg_init( vseg_t      * vseg,
    87                     intptr_t      base, 
     87                    intptr_t      base,
    8888                intptr_t      size,
    8989                vpn_t         vpn_base,
     
    105105
    106106    // set vseg flags depending on type
    107         if     ( type == VSEG_TYPE_CODE ) 
    108     {
    109         vseg->flags = VSEG_USER    |
    110                       VSEG_EXEC    | 
     107        if     ( type == VSEG_TYPE_CODE )
     108    {
     109        vseg->flags = VSEG_USER    |
     110                      VSEG_EXEC    |
    111111                      VSEG_CACHE   |
    112112                      VSEG_PRIVATE ;
     
    117117                      VSEG_WRITE   |
    118118                      VSEG_CACHE   |
    119                       VSEG_PRIVATE ; 
    120     }
    121     else if( type == VSEG_TYPE_DATA ) 
    122     {
    123         vseg->flags = VSEG_USER    |
    124                       VSEG_WRITE   |
    125                       VSEG_CACHE   | 
     119                      VSEG_PRIVATE ;
     120    }
     121    else if( type == VSEG_TYPE_DATA )
     122    {
     123        vseg->flags = VSEG_USER    |
     124                      VSEG_WRITE   |
     125                      VSEG_CACHE   |
    126126                      VSEG_DISTRIB ;
    127127    }
    128     else if( type == VSEG_TYPE_HEAP ) 
     128    else if( type == VSEG_TYPE_HEAP )
    129129    {
    130130        vseg->flags = VSEG_USER    |
     
    133133                      VSEG_DISTRIB ;
    134134    }
    135     else if( type == VSEG_TYPE_REMOTE ) 
    136     {
    137         vseg->flags = VSEG_USER    |
    138                       VSEG_WRITE   | 
    139                       VSEG_CACHE   ; 
    140     }
    141     else if( type == VSEG_TYPE_ANON ) 
     135    else if( type == VSEG_TYPE_REMOTE )
     136    {
     137        vseg->flags = VSEG_USER    |
     138                      VSEG_WRITE   |
     139                      VSEG_CACHE   ;
     140    }
     141    else if( type == VSEG_TYPE_ANON )
    142142    {
    143143        vseg->flags = VSEG_USER    |
     
    146146                      VSEG_DISTRIB ;
    147147    }
    148     else if( type == VSEG_TYPE_FILE ) 
     148    else if( type == VSEG_TYPE_FILE )
    149149    {
    150150        vseg->flags = VSEG_USER    |
     
    152152                      VSEG_CACHE   ;
    153153    }
    154     else if( type == VSEG_TYPE_KCODE ) 
     154    else if( type == VSEG_TYPE_KCODE )
    155155    {
    156156        vseg->flags = VSEG_EXEC    |
     
    158158                      VSEG_PRIVATE ;
    159159    }
    160     else if( type == VSEG_TYPE_KDATA ) 
     160    else if( type == VSEG_TYPE_KDATA )
    161161    {
    162162        vseg->flags = VSEG_WRITE   |
     
    164164                      VSEG_PRIVATE ;
    165165    }
    166     else 
     166    else
    167167    {
    168168            printk("\n[PANIC] in %s : illegal vseg type\n", __FUNCTION__);
    169169        hal_core_sleep();
    170     } 
     170    }
    171171}  // end vseg_init()
    172172
  • trunk/kernel/mm/vseg.h

    r16 r18  
    5757
    5858/**********************************************************************************************
    59  * These masks define the vseg generic (hardware independant) flags.
     59 * These masks define the vseg generic (hardware independent) flags.
    6060 *********************************************************************************************/
    6161
     
    7979        intptr_t          max;          /*! segment max virtual address (excluded)               */
    8080        vpn_t             vpn_base;     /*! first page of vseg                                   */
    81         vpn_t             vpn_size;     /*! numbre of pages occupied                             */
     81        vpn_t             vpn_size;     /*! number of pages occupied                             */
    8282        uint32_t          flags;        /*! vseg attributes                                      */
    8383        xptr_t            mapper;       /*! extended pointer on associated mapper                */
     
    119119 *********************************************************************************************/
    120120void vseg_init( vseg_t      * vseg,
    121                 intptr_t      base, 
    122                     intptr_t      size, 
     121                intptr_t      base,
     122                    intptr_t      size,
    123123                vpn_t         vpn_base,
    124124                vpn_t         vpn_size,
     
    139139
    140140/**********************************************************************************************
    141  * This function add a vseg descriptor in the set of vsegs controled by a given VMM,
    142  * and update the vmm field in the vseg descriptor.
     141 * This function adds a vseg descriptor in the set of vsegs controlled by a given VMM,
     142 * and updates the vmm field in the vseg descriptor.
    143143 * The lock protecting the vsegs list in VMM must be taken by the caller.
    144144 **********************************************************************************************
     
    147147 * @ returns 0 if success / returns ENOMEM if failure.
    148148 *********************************************************************************************/
    149 error_t vseg_attach( struct vmm_s  * vmm, 
     149error_t vseg_attach( struct vmm_s  * vmm,
    150150                     vseg_t        * vseg );
    151151
    152152/**********************************************************************************************
    153  * This function removes a vseg descriptor from the set of vsegs controled by a given VMM,
    154  * and update the vmm field in the vseg descriptor. No memory is released.
     153 * This function removes a vseg descriptor from the set of vsegs controlled by a given VMM,
     154 * and updates the vmm field in the vseg descriptor. No memory is released.
    155155 * The lock protecting the vsegs list in VMM must be taken by the caller.
    156156 **********************************************************************************************
     
    158158 * @ vseg      : pointer on the vseg descriptor
    159159 *********************************************************************************************/
    160 void vseg_detach( struct vmm_s  * vmm, 
     160void vseg_detach( struct vmm_s  * vmm,
    161161                  vseg_t        * vseg );
    162162
Note: See TracChangeset for help on using the changeset viewer.