Changeset 7 for trunk/kernel/mm


Ignore:
Timestamp:
Apr 26, 2017, 2:15:50 PM (8 years ago)
Author:
alain
Message:

Various bugs.

Location:
trunk/kernel/mm
Files:
8 edited

Legend:

Unmodified
Added
Removed
  • trunk/kernel/mm/kcm.c

    r1 r7  
    3333#include <page.h>
    3434#include <cluster.h>
     35#include <kmem.h>
    3536#include <kcm.h>
    3637
    37 /////////////////////////////////////////////////////////////////////////////////////
    38 // Extern global variable (defined in kmem.c file)
    39 /////////////////////////////////////////////////////////////////////////////////////
    40 
    41 extern uint32_t kmem_size_tbl[KMEM_TYPES_NR];
    42 
    43 
    44 /////////////////////////////////////////////////////////////////////////////////////
    45 // This function allocates one page from local PPM.
    46 /////////////////////////////////////////////////////////////////////////////////////
    47 static page_t * kcm_page_alloc( kcm_t * kcm )
    48 {
    49         page_t    * page;
    50     kmem_req_t  req;
    51 
    52     req.type  = KMEM_PAGE;
    53     req.size  = 0;
    54     req.flags = AF_KERNEL;
    55     page = kmem_alloc( &req );
    56  
    57         if( page == NULL )
    58         {
    59                 printk("\n[ERROR] in %s failed to allocate page in cluster %d\n",
    60                __FUNCTION__ , local_cxy );
    61         }
    62 
    63         return page;
    64 }
    65 
    6638//////////////////////////////////////////////////////////////////////////////////////
    67 // This static function returns pointer on allocated block from an active page.
     39// This static function returns pointer on an allocated block from an active page.
    6840// It returns NULL if no block available in selected page.
    6941// It changes the page status if required.
    7042//////////////////////////////////////////////////////////////////////////////////////
    71 static void * get_block( kcm_t      * kcm,
    72                          kcm_page_t * page )
    73 {
    74         int32_t index = bitmap_ffs( page->bitmap , sizeof(page->bitmap) );
    75 
    76     // the page should be active
    77     if( page->active == 0 )
    78     {
    79         printk("\n[PANIC] in %s : kcm page should be active\n", __FUNCTION__ );
    80         hal_core_sleep();
    81     }
    82 
    83     // the page should not be full
    84     if( index == -1 )
    85     {
    86         printk("\n[PANIC] in %s : kcm page should be active\n", __FUNCTION__ );
    87         hal_core_sleep();
    88     }
     43// @ kcm   : pointer on kcm allocator.
     44// @ ptr   : pointer on active kcm page to use.
     45/////////////////////////////////////////////////////////////////////////////////////
     46static void * kcm_get_block( kcm_t      * kcm,
     47                             kcm_page_t * page )
     48{
     49    assert( page->active , __FUNCTION__ , "kcm page should be active" );
     50
     51    // get first block available
     52        int32_t index = bitmap_ffs( page->bitmap , kcm->blocks_nr );
     53
     54    assert( (index != -1) , __FUNCTION__ , "kcm page should not be full" );
    8955 
     56    // allocate block
    9057        bitmap_clear( page->bitmap , index );
     58
     59    // increase page refcount
    9160        page->refcount ++;
    9261
    93     // change the page to busy if last block in page
     62    // change the page to busy no more free block in page
    9463    if( page->refcount >= kcm->blocks_nr )
    9564    {
     
    10372    }
    10473
    105         return (page->blk_tbl + index * kcm->block_size );
    106 }
     74        return (page->base + index * kcm->block_size );
     75
     76}  // kcm_get_block()
    10777
    10878/////////////////////////////////////////////////////////////////////////////////////
     
    11080// It changes the page status if required.
    11181/////////////////////////////////////////////////////////////////////////////////////
    112 static void put_block ( kcm_t * kcm,
    113                         void  * ptr )
     82// @ kcm   : pointer on kcm allocator.
     83// @ ptr   : pointer on block to be released.
     84/////////////////////////////////////////////////////////////////////////////////////
     85static void kcm_put_block ( kcm_t * kcm,
     86                            void  * ptr )
    11487{
    11588        kcm_page_t * page;
     
    11790 
    11891        page = (kcm_page_t*)((intptr_t)ptr & CONFIG_PPM_PAGE_MASK);
    119         index = ((uint8_t*)ptr - page->blk_tbl) / kcm->block_size;
     92        index = ((uint8_t*)ptr - page->base) / kcm->block_size;
    12093 
    12194        bitmap_set( page->bitmap , index );
     
    144117                kcm->free_pages_nr ++;
    145118        }
    146 }
    147 
    148 /////////////////////////////////////////////////////////////////////////////////////
    149 // This static function tries to allocate one page from PPM. It initializes
     119}  // kcm_put_block()
     120
     121/////////////////////////////////////////////////////////////////////////////////////
     122// This static function allocates one page from PPM. It initializes
    150123// the KCM-page descriptor, and introduces the new page into freelist.
    151124/////////////////////////////////////////////////////////////////////////////////////
     
    154127        page_t     * page;
    155128        kcm_page_t * ptr;
     129    kmem_req_t   req;
    156130
    157131    // get one page from local PPM
    158         page = kcm_page_alloc( kcm );
    159         if( page == NULL ) return ENOMEM;
    160  
    161     // get physical page base address
     132    req.type  = KMEM_PAGE;
     133    req.size  = 0;
     134    req.flags = AF_KERNEL;
     135    page = kmem_alloc( &req );
     136 
     137        if( page == NULL )
     138        {
     139                printk("\n[ERROR] in %s : failed to allocate page in cluster %d\n",
     140               __FUNCTION__ , local_cxy );
     141        return ENOMEM;
     142        }
     143
     144    // get page base address
    162145        ptr = ppm_page2base( page );
    163146
     
    168151        ptr->active        = 0;
    169152        ptr->refcount      = 0;
    170         ptr->blk_tbl       = (uint8_t*)ptr + kcm->block_size;
     153        ptr->base          = (uint8_t*)ptr + kcm->block_size;
    171154        ptr->kcm           = kcm;
    172155        ptr->page          = page;
     
    177160 
    178161        return 0;
    179 }
     162
     163}  // freelist_populate()
    180164
    181165/////////////////////////////////////////////////////////////////////////////////////
     
    185169static kcm_page_t * freelist_get( kcm_t * kcm )
    186170{
    187         error_t      err;
     171        error_t      error;
    188172        kcm_page_t * page;
    189173
     
    191175        if( kcm->free_pages_nr == 0 )
    192176        {
    193         err = freelist_populate( kcm );
    194         if( err ) return NULL;
     177        error = freelist_populate( kcm );
     178        if( error       ) return NULL;
    195179        }
    196180
     
    199183        list_unlink( &page->list );
    200184        kcm->free_pages_nr --;
    201     page->active = 1;
    202 
    203     list_add_first( &kcm->active_root , &page->list);
    204         kcm->active_pages_nr ++;
    205     page->active = 1;
    206185
    207186        return page;
    208 }
    209 
    210 
    211 ////////////////////////////////////
    212 error_t kcm_init( kcm_t       * kcm,
    213                           uint32_t      type )
     187
     188} // freelist_get()
     189
     190
     191//////////////////////////////
     192void kcm_init( kcm_t    * kcm,
     193                   uint32_t   type )
    214194{
    215195        uint32_t     blocks_nr;
    216196        uint32_t     block_size;
    217197        uint32_t     remaining;
    218         kcm_page_t * page;
    219 
     198
     199    // initialize lock
    220200        spinlock_init( &kcm->lock );
    221201
     
    231211        list_root_init( &kcm->active_root );
    232212
    233     // initialize block size and number of blocks per page,
    234     // using the global kmem_size_tbl[] array
    235         block_size      = ARROUND_UP( kmem_size_tbl[type] , 64 );
     213    // initialize block size and number of blocks per page
     214        block_size      = ARROUND_UP( kmem_type_size( type ) , 64 );
    236215        blocks_nr       = CONFIG_PPM_PAGE_SIZE / block_size;
    237216        remaining       = CONFIG_PPM_PAGE_SIZE % block_size;
     
    240219        kcm->blocks_nr  = blocks_nr;
    241220        kcm->block_size = block_size;
    242 
    243     // get one page from free_list
    244         page = freelist_get( kcm );
    245         if( page == NULL ) return ENOMEM;
    246221     
    247     // register page in active list
    248         list_add_first( &kcm->active_root , &page->list );
    249         page->active = 1;
    250         kcm->active_pages_nr ++;
    251 
    252         return 0;
    253 }
     222    kcm_dmsg("\n[INFO] %s : KCM %s initialised / block_size = %d / blocks_nr = %d\n",
     223             __FUNCTION__ , kmem_type_str( type ) , block_size , blocks_nr );
     224
     225}  // kcm_init()
    254226
    255227///////////////////////////////
     
    292264    spinlock_unlock( &kcm->lock );
    293265
    294    
    295 }
     266}  // kcm_destroy()
    296267
    297268///////////////////////////////
     
    304275        spinlock_lock( &kcm->lock );
    305276   
    306     // get block
    307     if( list_is_empty( &kcm->active_root ) )  // no active page
     277    // get an active page
     278    if( list_is_empty( &kcm->active_root ) )  // no active page => get one
    308279    {
     280        kcm_dmsg("\n[INFO] %s : enters for type %s but no active page => get one\n",
     281                 __FUNCTION__ , kmem_type_str( kcm->type ) );
     282
     283        // get a page from free list
     284                page = freelist_get( kcm );
     285            if( page == NULL ) return NULL;
     286
     287        // insert page in active list
     288        list_add_first( &kcm->active_root , &page->list );
     289            kcm->active_pages_nr ++;
     290        page->active = 1;
     291    }
     292    else                     // get first page from active list
     293    {
     294        kcm_dmsg("\n[INFO] %s : enters for type %s with an active page\n",
     295                 __FUNCTION__ , kmem_type_str( kcm->type ) );
     296
     297        // get page pointer from active list
    309298                page = (kcm_page_t *)LIST_FIRST( &kcm->active_root , kcm_page_t , list );
    310         ptr  = get_block( kcm , page );
    311299    }
    312     else                                      // active page cannot be full
    313     {
    314         page = LIST_FIRST( &kcm->active_root , kcm_page_t , list );
    315         ptr  = get_block( kcm , page );
    316     }
     300
     301    // get a block from selected active page
     302    // cannot fail, as an active page cannot be full...
     303    ptr  = kcm_get_block( kcm , page );
    317304
    318305    // release lock 
    319306        spinlock_unlock(&kcm->lock);
    320307
     308    kcm_dmsg("\n[INFO] %s : allocated one block of type %s / ptr = %x\n",
     309             __FUNCTION__ , kmem_type_str( kcm->type ) , (uint32_t)ptr );
     310
    321311        return ptr;
    322 }
     312
     313}  // kcm_alloc()
    323314
    324315///////////////////////////
     
    337328
    338329    // release block
    339         put_block( kcm , ptr );
     330        kcm_put_block( kcm , ptr );
    340331
    341332    // release lock 
     
    346337void kcm_print (kcm_t * kcm)
    347338{
    348         printk("*** KCM type = %d / free_pages = %d / busy_pages = %d / active_pages = %d\n",
    349            kcm->type ,
     339        printk("*** KCM type = %s / free_pages = %d / busy_pages = %d / active_pages = %d\n",
     340           kmem_type_str( kcm->type ) ,
    350341           kcm->free_pages_nr ,
    351342           kcm->busy_pages_nr ,
  • trunk/kernel/mm/kcm.h

    r1 r7  
    3939 * contain one single object.
    4040 * The various KCM allocators themselves are not statically allocated in the cluster
    41  * manager, but are dynamically allocated when required, using the specific KCM
     41 * manager, but are dynamically allocated when required, using the embedded KCM
    4242 * allocator defined in the cluster manager, to allocate the other ones...
    4343 ***************************************************************************************/
     
    6464/****************************************************************************************
    6565 * This structure defines a KCM-page descriptor.
    66  * A KCM-page can contain several blocks.
    67  * This descriptor is stored in the first block slot.
     66 * A KCM-page can contain up to (CONFIG_PPM_PAGE_SIZE / CONFIG_CACHE_LINE_SIZE) blocks.
     67 * This kcm page descriptor is stored in the first slot of the page.
    6868 ***************************************************************************************/
    6969
    7070typedef struct kcm_page_s
    7171{
    72         BITMAP          ( bitmap , 16 );       /*! at most 16 blocks in a page (1 if free) */
    73         uint8_t       * blk_tbl;               /*! pointer on first free block in page     */
     72        BITMAP          ( bitmap , CONFIG_KCM_BLOCKS_MAX );
     73        uint8_t       * base;                  /*! pointer on first block in page          */
    7474        kcm_t         * kcm;                   /*! owner KCM allocator                     */
    75         uint8_t         unused;                /*! first free block index in page          */
     75        list_entry_t    list;                  /*! [active / busy / free] list member      */
     76        page_t        * page;                  /*! pointer on the physical page descriptor */
    7677        uint8_t         refcount;              /*! number of allocated blocks              */
    7778        uint8_t         busy;                  /*! page busy if non zero                   */
    7879        uint8_t         active;                /*! page active if non zero                 */
    79         list_entry_t    list;                  /*! [active / busy / free] list member      */
    80         page_t        * page;                  /*! pointer on the physical page descriptor */
     80        uint8_t         unused;                /*!                                         */
    8181}
    8282kcm_page_t;
     
    8888 * @ type     : KCM allocator type.
    8989 ***************************************************************************************/
    90 error_t kcm_init( kcm_t       * kcm,
    91                           uint32_t      type );
     90void kcm_init( kcm_t    * kcm,
     91                   uint32_t   type );
    9292
    9393/****************************************************************************************
     
    9696 * @ kcm      : pointer on KCM manager to destroy.
    9797 ***************************************************************************************/
    98 void kcm_destroy( kcm_t       * kcm );
     98void kcm_destroy( kcm_t  * kcm );
    9999
    100100/****************************************************************************************
     
    102102 * The object size must be smaller than one page size.
    103103 ****************************************************************************************
    104  * @ kcm     :  pointer on the selected KCM allocator
     104 * @ kcm      :  pointer on the selected KCM allocator
     105 * @ return pointer on allocated block if success / return NULL if failure
    105106 ***************************************************************************************/
    106107void * kcm_alloc( kcm_t * kcm );
  • trunk/kernel/mm/khm.c

    r1 r7  
    4040{
    4141    // check config parameters
    42     if( (CONFIG_PPM_PAGE_SHIFT + CONFIG_PPM_HEAP_ORDER) >= 32 )
    43     {
    44         printk("\n[PANIC] in %s : CONFIG_PPM_HEAP_ORDER too large\n", __FUNCTION__ );
    45         hal_core_sleep();
    46     }
     42    assert( ((CONFIG_PPM_PAGE_SHIFT + CONFIG_PPM_HEAP_ORDER) < 32 ) , __FUNCTION__ ,
     43             "CONFIG_PPM_HEAP_ORDER too large" );
    4744
    4845    // initialize lock
     
    6562        khm->size    = heap_size;
    6663        khm->next    = (intptr_t)heap_base;
     64
     65    kinit_dmsg("\n[INFO] %s done in cluster %x at cycle %d\n",
     66               __FUNCTION__ , local_cxy , hal_time_stamp() );
    6767}
    6868
  • trunk/kernel/mm/kmem.c

    r1 r7  
    3737#include <thread.h>
    3838#include <process.h>
    39 #include <device.h>
     39#include <chdev.h>
    4040#include <mapper.h>
    4141#include <vfs.h>
     
    4949#include <kmem.h>
    5050
    51 /////////////////////////////////////////////////////////////////////////////////////////////
    52 // This global array is indexed by the Kernel Memory Object Type (defined in kmem.h)
    53 // It contains the size of fixed size objects type dynamically allocated by the KCMs.
    54 // This array should be consistent with the enum defined kmem.h.
    55 /////////////////////////////////////////////////////////////////////////////////////////////
    56 
    57 uint32_t  kmem_size_tbl[KMEM_TYPES_NR] =
    58 {
    59     0,                        // 0  KMEM_PAGE is not a fixed size object
    60     0,                        // 1  KMEM_GENERIC   
    61     sizeof( kcm_t ),          // 2  KMEM_KCM
    62     sizeof( vseg_t ),         // 3  KMEM_VSEG
    63     sizeof( device_t ),       // 4  KMEM_DEVICE
    64     sizeof( mapper_t ),       // 5  KMEM_MAPPER
    65     sizeof( process_t ),      // 6  KMEM_PROCESS
    66     0,                        // 7
    67     0,                        // 8 
    68     0,                        // 9 
    69 
    70     sizeof( fatfs_inode_t ),  // 10 KMEM_FATFS_INODE
    71     sizeof( fatfs_ctx_t ),    // 11 KMEM_FATFS_CTX
    72     sizeof( ramfs_inode_t ),  // 12 KMEM_RAMFS_INODE
    73     sizeof( ramfs_ctx_t ),    // 13 KMEM_RAMFS_CTX
    74     sizeof( vfs_ctx_t ),      // 14 KMEM_VFS_CTX
    75     sizeof( vfs_inode_t ),    // 15 KMEM_VFS_INODE
    76     sizeof( vfs_dentry_t ),   // 16 KMEM_VFS_DENTRY
    77     sizeof( vfs_file_t ),     // 17 KMEM_VFS_FILE
    78     sizeof( remote_sem_t ),   // 18 KMEM_SEM
    79     0,                        // 19
    80 
    81     64,                       // 20 KMEM_64_BYTES
    82     128,                      // 21 KMEM_128_BYTES
    83     256,                      // 22 KMEM_256_BYTES
    84     512,                      // 23 KMEM_512_BYTES
    85     1024,                     // 24 KMEM_1024_BYTES
    86     2048,                     // 25 KMEM_2048_BYTES
    87 };
     51///////////////////////////
     52void kmem_print_kcm_table()
     53{
     54    uint32_t    index;
     55    kcm_t     * kcm;
     56    cluster_t * cluster = LOCAL_CLUSTER;
     57
     58    printk("\n    *** KCM Pointers Table ***\n");
     59
     60    for( index = 0 ; index < KMEM_TYPES_NR ; index++ )
     61    {
     62        kcm = cluster->kcm_tbl[index];
     63        if( kcm != NULL )
     64        {
     65            if( index == kcm->type )
     66            {
     67                printk("     - KCM[%s] (at address %x) is OK\n",
     68                       kmem_type_str( index ) , (intptr_t)kcm );
     69            }
     70            else
     71            {
     72                printk("     - KCM[%s] (at address %x) is KO : has type %s\n",
     73                       kmem_type_str( index ) , (intptr_t)kcm , kmem_type_str( kcm->type ) );
     74            }           
     75        }
     76    }
     77}  // end kmem_print_kcm_table()
     78
     79/////////////////////////////////////////
     80uint32_t  kmem_type_size( uint32_t type )
     81{
     82    if     ( type == KMEM_PAGE )          return CONFIG_PPM_PAGE_SIZE;
     83    else if( type == KMEM_GENERIC )       return 0;
     84    else if( type == KMEM_KCM )           return sizeof( kcm_t );
     85    else if( type == KMEM_VSEG )          return sizeof( vseg_t );
     86    else if( type == KMEM_DEVICE )        return sizeof( chdev_t );
     87    else if( type == KMEM_MAPPER )        return sizeof( mapper_t );
     88    else if( type == KMEM_PROCESS )       return sizeof( process_t );
     89    else if( type == KMEM_CPU_CTX )       return sizeof( hal_cpu_context_t );
     90    else if( type == KMEM_FPU_CTX )       return sizeof( hal_fpu_context_t );
     91
     92    else if( type == KMEM_FATFS_INODE )   return sizeof( fatfs_inode_t );
     93    else if( type == KMEM_FATFS_CTX )     return sizeof( fatfs_ctx_t );
     94    else if( type == KMEM_RAMFS_INODE )   return sizeof( ramfs_inode_t );
     95    else if( type == KMEM_RAMFS_CTX )     return sizeof( ramfs_ctx_t );
     96    else if( type == KMEM_VFS_CTX )       return sizeof( vfs_ctx_t );
     97    else if( type == KMEM_VFS_INODE )     return sizeof( vfs_inode_t );
     98    else if( type == KMEM_VFS_DENTRY )    return sizeof( vfs_dentry_t );
     99    else if( type == KMEM_VFS_FILE )      return sizeof( vfs_file_t );
     100    else if( type == KMEM_SEM )           return sizeof( remote_sem_t );
     101    else                                  return 0;
     102}
     103
     104/////////////////////////////////////
     105char * kmem_type_str( uint32_t type )
     106{
     107    if     ( type == KMEM_PAGE )          return "KMEM_PAGE";
     108    else if( type == KMEM_GENERIC )       return "KMEM_GENERIC";
     109    else if( type == KMEM_KCM )           return "KMEM_KCM";
     110    else if( type == KMEM_VSEG )          return "KMEM_VSEG";
     111    else if( type == KMEM_DEVICE )        return "KMEM_DEVICE";
     112    else if( type == KMEM_MAPPER )        return "KMEM_MAPPER";
     113    else if( type == KMEM_PROCESS )       return "KMEM_PROCESS";
     114    else if( type == KMEM_CPU_CTX )       return "KMEM_CPU_CTX";
     115    else if( type == KMEM_FPU_CTX )       return "KMEM_FPU_CTX";
     116
     117    else if( type == KMEM_FATFS_INODE )   return "KMEM_FATFS_INODE";
     118    else if( type == KMEM_FATFS_CTX )     return "KMEM_FATFS_CTX";
     119    else if( type == KMEM_RAMFS_INODE )   return "KMEM_RAMFS_INODE";
     120    else if( type == KMEM_RAMFS_CTX )     return "KMEM_RAMFS_CTX";
     121    else if( type == KMEM_VFS_CTX )       return "KMEM_VFS_CTX";
     122    else if( type == KMEM_VFS_INODE )     return "KMEM_VFS_INODE";
     123    else if( type == KMEM_VFS_DENTRY )    return "KMEM_VFS_DENTRY";
     124    else if( type == KMEM_VFS_FILE )      return "KMEM_VFS_FILE";
     125    else if( type == KMEM_SEM )           return "KMEM_SEM";
     126    else                                  return "undefined";
     127}
    88128
    89129/////////////////////////////////////////////////////////////////////////////////////////////
     
    91131// It uses the KCM allocator embedded in cluster manager, initialized by cluster_init().
    92132/////////////////////////////////////////////////////////////////////////////////////////////
    93 static error_t kmem_get_kcm( uint32_t type )
     133static error_t kmem_create_kcm( uint32_t type )
    94134{
    95135        kcm_t    * kcm;
    96         error_t    error;
    97 
    98     // check kmem object type
    99     if( (type < 2) || (type >= KMEM_TYPES_NR) )
    100     {
    101         printk("\n[PANIC] in %s illegal request type\n", __FUNCTION__ );
    102         hal_core_sleep();
    103     }
     136
     137    assert( ((type > 1) && (type < KMEM_TYPES_NR) ) , __FUNCTION__ , "illegal KCM type" );
     138
     139        kmem_dmsg("\n[INFO] %s : enters / KCM type %s missing in cluster %x\n",
     140                      __FUNCTION__ , kmem_type_str( type ) , local_cxy );
    104141
    105142    cluster_t * cluster = LOCAL_CLUSTER;
    106143
    107144    // allocates memory for the requested KCM allocator
     145    // from the KCM allocator embedded in cluster descriptor
    108146        kcm = kcm_alloc( &cluster->kcm );
     147
    109148        if( kcm == NULL )
    110149    {
     
    115154
    116155    // initializes the new KCM allocator
    117         error = kcm_init( kcm , type );
    118 
    119         if( error )
    120         {
    121                 printk("\n[ERROR] in %s : failed to init KCM type %d\n",
    122                __FUNCTION__ , type , local_cxy );
    123                 return error;
    124         }
    125 
     156        kcm_init( kcm , type );
     157
     158    // register it if the KCM pointers Table
    126159        cluster->kcm_tbl[type] = kcm;
     160
    127161        hal_wbflush();
    128162
     163        kmem_dmsg("\n[INFO] %s : exit / KCM type %s created in cluster %x\n",
     164                      __FUNCTION__ , kmem_type_str( type ) , local_cxy );
     165
    129166        return 0;
    130 }
     167
     168}  // end kmem_create_kcm()
     169
    131170 
    132171
     
    137176
    138177        uint32_t    type;
    139         uint32_t    size;    // ln( pages ) if PPM / bytes if KHM or BKM / unused if KCM
    140178        uint32_t    flags;
    141         void      * ptr;     // local pointer on allocated memory buffer
     179        uint32_t    size;    // ln( pages ) if PPM / bytes if KHM / unused if KCM
     180        void      * ptr;     // memory buffer if KHM or KCM / page descriptor if PPM
     181
    142182
    143183        type  = req->type;
     
    145185        flags = req->flags;
    146186 
    147         kmem_dmsg("\n[INFO] %s in cluster %x : type %d, size %d, flags %x at cycle %d \n",
    148                       __FUNCTION__, local_cxy , type, size, flags , hal_time_stamp() );
    149 
    150         if( type >= KMEM_TYPES_NR )
    151     {
    152         printk("\n[PANIC] in %s illegal request type\n", __FUNCTION__ );
    153         hal_core_sleep();
    154     }
     187        assert( (type < KMEM_TYPES_NR) , __FUNCTION__ , "illegal KMEM request type" );
    155188 
     189        kmem_dmsg("\n[INFO] %s : enters in cluster %x for type %s / size %d\n",
     190                      __FUNCTION__ , local_cxy , kmem_type_str( type ) , size );
     191
    156192    // analyse request type
    157193        if( type ==  KMEM_PAGE )                       // PPM allocator
    158194    {       
    159195        // allocate the number of requested pages
    160                 ptr = (void*)ppm_alloc_pages( size );
     196                ptr = (void *)ppm_alloc_pages( size );
    161197
    162198        // reset page if required
    163                 if( flags & AF_ZERO ) page_zero( (page_t*)ptr );
     199                if( flags & AF_ZERO ) page_zero( (page_t *)ptr );
     200   
     201        kmem_dmsg("\n[INFO] %s : exit in cluster %x for type %s / page = %x / base = %x\n",
     202                  __FUNCTION__, local_cxy , kmem_type_str( type ) ,
     203                  (intptr_t)ptr , (intptr_t)ppm_page2base( ptr ) );
    164204        }
    165205    else if( type == KMEM_GENERIC )                // KHM allocator
     
    170210        // reset memory if requested
    171211                if( flags & AF_ZERO ) memset( ptr , 0 , size );
     212
     213        kmem_dmsg("\n[INFO] %s : exit in cluster %x for type %s / base = %x\n",
     214                  __FUNCTION__, local_cxy , kmem_type_str( type ) , (intptr_t)ptr );
    172215        }
    173216    else                                           // KCM allocator
    174217    {
    175         uint32_t error = 0;
    176 
    177218        // initialize the KCM allocator if not already done
    178219            if( cluster->kcm_tbl[type] == NULL )
    179220            {
    180221            spinlock_lock( &cluster->kcm_lock );
    181                         error = kmem_get_kcm( type );
     222                        error_t error = kmem_create_kcm( type );
    182223            spinlock_unlock( &cluster->kcm_lock );
     224            if ( error ) return NULL;
    183225            }
    184226
    185         // allocate memory from KCM if success
    186         if( error ) ptr = NULL;
    187         else        ptr = kcm_alloc( cluster->kcm_tbl[type] );
     227        // allocate memory from KCM
     228        ptr = kcm_alloc( cluster->kcm_tbl[type] );
     229
     230        // reset memory if requested
     231                if( flags & AF_ZERO ) memset( ptr , 0 , kmem_type_size( type ) );
     232
     233        kmem_dmsg("\n[INFO] %s : exit in cluster %x for type %s / base = %x\n",
     234                  __FUNCTION__, local_cxy , kmem_type_str( type ) , (intptr_t)ptr );
    188235        }
    189236
    190237    if( ptr == NULL )
    191238    {
    192             printk("\n[ERROR] in %s : failed for type %d, size %d, flags %x in cluster %x\n",
    193                __FUNCTION__ , type , size , flags , local_cxy );
     239            printk("\n[ERROR] in %s : failed for type %d / size %d in cluster %x\n",
     240               __FUNCTION__ , type , size , local_cxy );
    194241 
    195242            return NULL;
    196243    }
    197244
    198         kmem_dmsg("\n[INFO] %s got ptr = %x in cluster %x at cycle %d\n",
    199                   __FUNCTION__, (intptr_t)ptr , local_cxy , hal_time_stamp() );
    200            
    201245        return ptr;
    202246
  • trunk/kernel/mm/kmem.h

    r1 r7  
    3131
    3232/*************************************************************************************
    33  * This enum defines the Kernel Memory Types for dynamically allocated objects.
     33 * This enum defines the Kernel Memory Types for dynamically allocated objectsn.
    3434 ************************************************************************************/
    3535
     
    4343  KMEM_MAPPER           = 5,   /*! mapper_t                                         */
    4444  KMEM_PROCESS          = 6,   /*! process_t                                        */
    45   KMEM_TBD_7            = 7,
    46   KMEM_TBD_8            = 8,
     45  KMEM_CPU_CTX          = 7,   /*! hal_cpu_context_t                                */
     46  KMEM_FPU_CTX          = 8,   /*! hal_fpu_context_t                                */
    4747  KMEM_TBD_9            = 9,
    4848
     
    5858  KMEM_TBD_19           = 19,
    5959
    60   KMEM_64_BYTES         = 20,  /*! fixed size buffer                                */
    61   KMEM_128_BYTES        = 21,  /*! fixed size buffer                                */
    62   KMEM_256_BYTES        = 22,  /*! fixed size buffer                                */
    63   KMEM_512_BYTES        = 23,  /*! fixed size buffer                                */
    64   KMEM_1024_BYTES       = 24,  /*! fixed size buffer                                */
    65   KMEM_2048_BYTES       = 25,  /*! fixed size buffer                                */
    66 
    67   KMEM_TYPES_NR         = 26,
     60  KMEM_TYPES_NR         = 20,
    6861};
    6962
     
    9386 * This generic function allocates physical memory in the local cluster
    9487 * as specified by the request descriptor.
    95  * It uses three specialised physical memory allocators:
     88 * It uses three specialised physical memory allocators, depending on request type:
    9689 * - PPM (Physical Pages Manager) allocates N contiguous physical pages,
    9790 *       N must be a power of 2.
     
    10194 *       handling a dedicated cache for each object type.
    10295 *************************************************************************************
    103  * @ req : local pointer to allocation request
    104  * @ return a local pointer to allocated buffer / NULL otherwise
     96 * @ req   : local pointer to allocation request.
     97 * @ return a local pointer on page descriptor if PPM (i.e. type KMEM_PAGE).
     98 *   return a local pointer to allocated buffer if KCM or KHM.
     99 *   return NULL if no physical memory available.
    105100 ************************************************************************************/
    106101void * kmem_alloc( kmem_req_t * req );
     
    114109void  kmem_free ( kmem_req_t * req );
    115110
     111/*************************************************************************************
     112 * This function returns a printable string for a kmem object type.
     113 *************************************************************************************
     114 * @ type   : kmem object type.
     115 ************************************************************************************/
     116char * kmem_type_str( uint32_t type );
     117
     118/*************************************************************************************
     119 * This function returns the size (bytes) for a kmem object type.
     120 *************************************************************************************
     121 * @ type   : kmem object type.
     122 ************************************************************************************/
     123uint32_t kmem_type_size( uint32_t type );
     124
     125/*************************************************************************************
     126 * This functions display the content of the KCM pointers Table
     127 ************************************************************************************/
     128void kmem_print_kcm_table();
     129
    116130
    117131#endif  /* _KMEM_H_ */
  • trunk/kernel/mm/page.c

    r1 r7  
    265265
    266266        size = (1 << page->order) * CONFIG_PPM_PAGE_SIZE;
    267         base = ppm_page2base(page);
     267        base = ppm_page2base( page );
     268
     269// kmem_print_kcm_table();
     270
     271// printk("\n@@@ in page_zero : size = %x / base = %x / kcm_tbl = %x\n",
     272//        size , (uint32_t)base , (uint32_t)(&LOCAL_CLUSTER->kcm_tbl[0] ) );
    268273
    269274        memset( base , 0 , size );
     275
     276// kmem_print_kcm_table();
     277
    270278}
    271279
     
    285293
    286294
    287 /*
    288 /////////////////////////////////////////
    289 static void page_to_free( page_t * page )
    290 {
    291         assert((page->state == PGINVALID) ||
    292                (page->state == PGVALID)   ||
    293                (page->state == PGINIT));
    294 
    295         if(page_refcount_get(page) != 0)
    296         {
    297                 printk(ERROR, "ERROR: %s: cpu %d, pid %d, tid %x, page %x, state %x, count %d [%d]\n",
    298                        __FUNCTION__,
    299                        cpu_get_id(),
    300                        current_task->pid,
    301                        current_thread,
    302                        page,
    303                        page->state,
    304                        page_refcount_get(page),
    305                        cpu_time_stamp());
    306         }
    307         page->state = PGFREE;
    308 }
    309 
    310 ////////////////////////////////////////////
    311 static void page_to_invalid( page_t * page )
    312 {
    313         assert((page->state == PGFREE)  ||
    314                (page->state == PGVALID) ||
    315                (page->state == PGLOCKEDIO));
    316  
    317         if(page->state == PGLOCKEDIO)
    318         {
    319                 page->state = PGINVALID;
    320                 page_unlock(page);
    321                 return;
    322         }
    323  
    324         page->state = PGINVALID;
    325 }
    326 
    327 //////////////////////////////////////////
    328 static void page_to_valid( page_t * page )
    329 {
    330         assert((page->state == PGINVALID) ||
    331                (page->state == PGLOCKED)  ||
    332                (page->state == PGLOCKEDIO));
    333  
    334         if(page->state == PGINVALID)
    335         {
    336                 page->state = PGVALID;
    337                 return;
    338         }
    339 
    340         page->state = PGVALID;
    341         page_unlock(page);
    342 }
    343 
    344 //////////////////////////////////////////////
    345 static void page_to_locked_io( page_t * page )
    346 {
    347         assert((page->state == PGINVALID) ||
    348                (page->state == PGVALID));
    349 
    350         page_lock(page);
    351         page->state = PGLOCKEDIO;
    352 }
    353 
    354 ///////////////////////////////////////////
    355 static void page_to_locked( page_t * page )
    356 {
    357         assert(page->state == PGVALID);
    358         page_lock(page);
    359         page->state = PGLOCKED;
    360 }
    361 
    362 /////////////////////////////////////////
    363 void page_state_set( page_t       * page,
    364                      page_state_t   state )
    365 {
    366         switch( state )
    367         {
    368         case PGFREE:
    369                 page_to_free(page);
    370                 return;
    371 
    372         case PGINVALID:
    373                 page_to_invalid(page);
    374                 return;
    375 
    376         case PGVALID:
    377                 page_to_valid(page);
    378                 return;
    379 
    380         case PGLOCKEDIO:
    381                 page_to_locked_io(page);
    382                 return;
    383 
    384         case PGLOCKED:
    385                 page_to_locked(page);
    386                 return;
    387 
    388         case PGRESERVED:
    389                 refcount_set(&page->count,1);
    390 
    391         case PGINIT:
    392                 page->state = new_state;
    393                 return;
    394 
    395         default:
    396                 printk(ERROR, "ERROR: %s: Unknown Asked State %d\n", new_state);
    397                 return;
    398         }
    399 }
    400 */
    401 
    402 
     295
  • trunk/kernel/mm/ppm.c

    r1 r7  
    9191static void ppm_free_pages_nolock( page_t * page )
    9292{
     93        page_t   * buddy;            // searched buddy page descriptor
     94        uint32_t   buddy_index;      // buddy page index
     95        page_t   * current;          // current (merged) page descriptor
     96        uint32_t   current_index;    // current (merged) page index
     97        uint32_t   current_order;    // current (merget) page order
     98
    9399    ppm_t    * ppm         = &LOCAL_CLUSTER->ppm;
    94100    page_t   * pages_tbl   = ppm->pages_tbl;
    95     uint32_t   page_order  = page->order;
    96     uint32_t   page_index  = (uint32_t)(page - ppm->pages_tbl);
    97 
    98         page_t   * block;
    99         uint32_t   block_index;
    100         uint32_t   current_order;
    101 
    102     // update page descriptor flag
     101
     102    // update released page descriptor flags
    103103        page_set_flag( page , PG_FREE );
    104104
    105     // update free_lists
    106         for( current_order = page_order ; current_order < CONFIG_PPM_MAX_ORDER ; current_order++ )
     105    // search the buddy page descriptor
     106    // - merge with current page descriptor if found
     107    // - exit to release the current page descriptor if not found
     108    current       = page ,
     109    current_index = (uint32_t)(page - ppm->pages_tbl);
     110        for( current_order = page->order ;
     111         current_order < CONFIG_PPM_MAX_ORDER ;
     112         current_order++ )
    107113    {
    108                 block_index = page_index ^ (1 << current_order);
    109                 block = pages_tbl + block_index;
    110    
    111                 if( page_is_flag( block , PG_FREE ) || (block->order != current_order) ) break;
    112 
    113                 list_unlink( &block->list );
     114                buddy_index = current_index ^ (1 << current_order);
     115                buddy       = pages_tbl + buddy_index;
     116   
     117                if( !page_is_flag( buddy , PG_FREE ) || (buddy->order != current_order) ) break;
     118
     119        // remove buddy from free list
     120                list_unlink( &buddy->list );
    114121                ppm->free_pages_nr[current_order] --;
    115122        ppm->total_free_pages -= (1 << current_order);
    116123   
    117                 block->order = 0;
    118                 page_index &= block_index;
    119         }
    120  
    121         block        = pages_tbl + page_index;
    122         block->order = current_order;
    123 
    124         list_add_first( &ppm->free_pages_root[current_order] , &block->list );
     124        // merge buddy with current
     125                buddy->order = 0;
     126                current_index &= buddy_index;
     127        }
     128 
     129    // update merged page descriptor order
     130        current        = pages_tbl + current_index;
     131        current->order = current_order;
     132
     133    // insert current in free list
     134        list_add_first( &ppm->free_pages_root[current_order] , &current->list );
    125135        ppm->free_pages_nr[current_order] ++;
    126136    ppm->total_free_pages += (1 << current_order);
     
    128138}  // end ppm_free_pages_nolock()
    129139
    130 /////////////////////////////////
     140//////////////////////////////
    131141void ppm_init( ppm_t    * ppm,
    132142               uint32_t   pages_nr,        // total pages number
     
    148158                ppm->free_pages_nr[i] = 0;
    149159        }
     160
     161#if( CONFIG_PPM_DEBUG )
     162ppm_print( ppm , "after reset" );
     163#endif
    150164 
    151165    // initialize dirty_list as empty
     
    166180        // set pages numbers
    167181        ppm->pages_nr      = pages_nr;
    168     ppm->pages_offset  = pages_offset;
     182    ppm->pages_offset  = reserved_pages;
    169183
    170184    // initialises all page descriptors in pages_tbl[]
    171     // TODO Peut-on accélérer ces monstrueuses boucle ? [AG]
    172185        for( i = 0 ; i < pages_nr ; i++ )
    173186    {
    174187        page_init( &ppm->pages_tbl[i] );
     188
     189        // TODO optimisation : make only a partial init [AG]
     190        // complete the initialisation when page is allocated [AG]
     191        // ppm->pages_tbl[i].flags = 0;
    175192    }
    176193
    177     // set PG_RESERVED flag for reserved pages (kernel code & pages_tbl[])
     194    // - set PG_RESERVED flag for reserved pages (kernel code & pages_tbl[])
     195    // - release all other pages to populate the free lists
    178196        for( i = 0 ; i < reserved_pages ; i++)
    179197    {
    180198        page_set_flag( &ppm->pages_tbl[i] , PG_RESERVED );
    181199    }
    182 
    183     // initialise the free lists by releasing all non reserved pages
    184         for( i = 0 ; i < pages_nr ; i++ )
    185         {
    186                 page_t * page = &ppm->pages_tbl[i];
    187                 if( page_is_flag( page , PG_RESERVED) ) ppm_free_pages_nolock( page );
    188         }
     200        for( i = reserved_pages ; i < pages_nr ; i++ )
     201        {
     202            ppm_free_pages_nolock( &ppm->pages_tbl[i] );
     203
     204        // TODO optimisation : decompose this enormous set of small pages
     205        // to a set big pages with various order values
     206        }
     207
     208    // check consistency
     209    ppm_assert_order( ppm );
     210
     211    kinit_dmsg("\n[INFO] %s : done in cluster %x at cycle %d\n",
     212               __FUNCTION__ , local_cxy , hal_time_stamp() );
     213
     214#if( CONFIG_PPM_DEBUG )
     215ppm_print( ppm , "after init" );
     216#endif
     217   
    189218} // end ppm_init()
    190219
     
    198227    ppm_t    * ppm = &LOCAL_CLUSTER->ppm;
    199228
    200         if( ppm->signature != PPM_SIGNATURE )
    201     {
    202         printk("\n[PANIC] in %s : PPM non initialised in cluster %x\n",
    203                __FUNCTION__ , local_cxy );
    204         hal_core_sleep();
    205     }
    206 
    207         if( order >= CONFIG_PPM_MAX_ORDER )
    208     {
    209         printk("\n[PANIC] in %s : illegal order argument in cluster %x\n",
    210                __FUNCTION__ , local_cxy );
    211         hal_core_sleep();
    212     }
     229        assert( (ppm->signature == PPM_SIGNATURE) , __FUNCTION__ , "PPM non initialised" );
     230
     231        assert( (order < CONFIG_PPM_MAX_ORDER) , __FUNCTION__ , "illegal order argument" );
    213232
    214233        page_t * block = NULL;
     234
     235    ppm_dmsg("\n[INFO] %s : enters / order = %d\n",
     236             __FUNCTION__ , order );
     237
     238#if( CONFIG_PPM_DEBUG )
     239ppm_print( ppm , "before allocation" );
     240#endif
    215241
    216242    // take lock protecting free lists
    217243        spinlock_lock( &ppm->free_lock );
    218244
    219     // find a block larger or equal to requested size
     245    // find a free block equal or larger to requested size
    220246        for( current_order = order ; current_order < CONFIG_PPM_MAX_ORDER ; current_order ++ )
    221247        {
     
    264290        spinlock_unlock( &ppm->free_lock );
    265291 
     292    ppm_dmsg("\n[INFO] %s : base = %x / order = %d\n",
     293             __FUNCTION__ , (uint32_t)ppm_page2base( block ) , order );
     294
     295#if CONFIG_PPM_DEBUG
     296ppm_print( ppm , "after allocation" );
     297#endif
     298
    266299        return block;
    267300}  // end pmm_alloc-pages()
     
    282315}
    283316
    284 /////////////////////////////
    285 void ppm_print( ppm_t * ppm )
     317////////////////////////////
     318void ppm_print( ppm_t * ppm,
     319                char  * string )
    286320{
    287321        uint32_t       order;
     
    292326        spinlock_lock( &ppm->free_lock );
    293327
    294         printk("***  PPM state in cluster %x : pages = %d / offset = %d / free = %d ***\n",
    295                local_cxy , ppm->pages_nr , ppm->pages_offset , ppm->total_free_pages );
     328        printk("\n***  PPM state in cluster %x %s : pages = %d / offset = %d / free = %d ***\n",
     329               local_cxy , string , ppm->pages_nr , ppm->pages_offset , ppm->total_free_pages );
    296330       
    297331        for( order = 0 ; order < CONFIG_PPM_MAX_ORDER ; order++ )
    298332        {
    299                 printk("- order = %d / free_pages = %d  [\n",
     333                printk("- order = %d / free_pages = %d  [",
    300334               order , ppm->free_pages_nr[order] );
    301335               
     
    311345    // release lock protecting free lists
    312346        spinlock_unlock( &ppm->free_lock );
    313 }
    314 
    315 ////////////////////////////////////////
    316 void ppm_assert_order(struct ppm_s *ppm)
     347
     348}  // end ppm_print()
     349 
     350//////////////////////////u/////////
     351void ppm_assert_order( ppm_t * ppm )
    317352{
    318353        uint32_t       order;
     
    328363                        page = LIST_ELEMENT( iter , page_t , list );
    329364
    330                         if( page->order != order )
    331             {
    332                     printk("%s detected inconsistency at order %d, page %d\n",
    333                            __FUNCTION__, order , page - ppm->pages_tbl );
    334             }
     365                        assert( (page->order == order) , __FUNCTION__ , "PPM inconsistency" );
    335366                }
    336367        }
    337         return;
    338 }
    339 
     368
     369}  // end ppm_assert_order()
     370
  • trunk/kernel/mm/ppm.h

    r1 r7  
    3535
    3636/*****************************************************************************************
    37  * This structure defines the list of blocks of a given size for the "buddy"
    38  * allocation algorithm implemented by the ppm_t manager.
    39  ****************************************************************************************/
    40 typedef struct buddy_list_s
    41 {
    42         list_entry_t  root;       // root of the list
    43         uint32_t      pages_nr;   // number of blocks
    44 }
    45 buddy_list_t;
    46 
    47 /*****************************************************************************************
    4837 * This structure defines the Physical Memory Manager in a cluster.
    4938 * In all clusters, the physical memory bank starts at address 0.
     
    5140 * The physical page descriptors array is implemented just after this offset zone.
    5241 * The main service provided by the PMM is the dynamic allocation of physical pages.
    53  * This low-level allocator implements the buddy algorithm.
     42 * This low-level allocator implements the buddy algorithm: an allocated block is
     43 * is an integer number n of 4 Kbytes pages, and n (called order) is a power of 2.
    5444 ****************************************************************************************/
    5545typedef struct ppm_s
     
    7161/*****************************************************************************************
    7262 * This function initializes a PPM (Physical Pages Manager) in a cluster.
    73  * The physical memory base address in all cluster is zero.
     63 * The physical memory base address in all clusters is zero.
    7464 * The physical memory size is NOT constrained to be smaller than 4 Gbytes.
     65 *****************************************************************************************
    7566 * @ ppm          : pointer on physical pages manager.
    7667 * @ pages_nr     : total physical memory size (number of 4 Kbytes pages).
     
    8677 * In normal use, you don't need to call it directly, as the recommanded way to get
    8778 * physical pages is to call the generic allocator defined in kmem.h.
     79 *****************************************************************************************
    8880 * @ order        : ln2( number of 4 Kbytes pages)
    8981 * @ returns a pointer on the page descriptor if success / NULL otherwise
     
    9587 * In normal use, you do not need to call it directly, as the recommanded way to free
    9688 * physical pages is to call the generic allocator defined in kmem.h.
     89 *****************************************************************************************
    9790 * @ page         : pointer to the page descriptor to be released
    9891 ****************************************************************************************/
     
    10194/*****************************************************************************************
    10295 * This function check if a page descriptor is valid.
     96 *****************************************************************************************
    10397 * @ page         : pointer on a page descriptor
    10498 * @ returns true if valid / false otherwise.
     
    108102/*****************************************************************************************
    109103 * Get the page base address from the page descriptor pointer.
     104 *****************************************************************************************
    110105 * @ page         : pointer to page descriptor
    111106 * @ returns page base address
     
    115110/*****************************************************************************************
    116111 * Get the page descriptor pointer from the page base address.
     112 *****************************************************************************************
    117113 * @ vaddr        : page base address
    118114 * @ returns pointer on page descriptor
     
    122118/*****************************************************************************************
    123119 * Get the PPN from the page descriptor pointer.
     120 *****************************************************************************************
    124121 * @ page         : pointer to page descriptor
    125122 * @ returns physical page number
     
    129126/*****************************************************************************************
    130127 * Get the page descriptor pointer from the PPN.
     128 *****************************************************************************************
    131129 * @ ppn          : physical page number
    132130 * @ returns pointer on page descriptor
     
    136134/*****************************************************************************************
    137135 * Get the page base address from the PPN.
     136 *****************************************************************************************
    138137 * @ ppn          : physical page number
    139138 * @ returns page base address
     
    143142/*****************************************************************************************
    144143 * Get the PPN from the page base address.
     144 *****************************************************************************************
    145145 * @ vaddr        : page base address
    146146 * @ returns physical page number
     
    150150/*****************************************************************************************
    151151 * This function prints the PPM allocator status.
     152 *****************************************************************************************
    152153 * @ ppm      : pointer on PPM allocator.
     154 * @ string   : define context of display.
    153155 ****************************************************************************************/
    154 void ppm_print( ppm_t * ppm );
     156void ppm_print( ppm_t * ppm,
     157                char  * string );
    155158
    156159/*****************************************************************************************
    157160 * This function checks PPM allocator consistency.
     161 *****************************************************************************************
    158162 * @ ppm      : pointer on PPM allocator.
    159163 ****************************************************************************************/
Note: See TracChangeset for help on using the changeset viewer.