Changeset 683 for trunk/kernel/mm


Ignore:
Timestamp:
Jan 13, 2021, 12:36:17 AM (4 years ago)
Author:
alain
Message:

All modifications required to support the <tcp_chat> application
including error recovery in case of packet loss.A

Location:
trunk/kernel/mm
Files:
13 edited

Legend:

Unmodified
Added
Removed
  • trunk/kernel/mm/kcm.c

    r672 r683  
    3636#include <kcm.h>
    3737
     38///////////////////////////////////////////////////////////////////////////////////////////
     39//         global variables
     40///////////////////////////////////////////////////////////////////////////////////////////
     41
     42extern chdev_directory_t    chdev_dir;          // allocated in kernel_init.c
     43
    3844
    3945/////////////////////////////////////////////////////////////////////////////////////
     
    4248
    4349//////////////////////////////////////////////////////////////////////////////////////
    44 // This static function must be called by a local thread.
     50// This static function is called by the kcm_alloc() function.
    4551// It returns a pointer on a block allocated from an active kcm_page.
    4652// It makes a panic if no block is available in the selected page.
     
    5561{
    5662    // initialise variables
    57     uint32_t size   = 1 << kcm->order;
    58     uint32_t max    = kcm->max_blocks;
     63    uint32_t order  = kcm->order;
    5964    uint32_t count  = kcm_page->count;
    6065    uint64_t status = kcm_page->status;
    6166
    62 assert( __FUNCTION__, (count < max) , "kcm_page should not be full" );
     67// check kcm page not full
     68assert( __FUNCTION__, (count < 63) ,
     69"kcm_page should not be full / cxy %x / order %d / count %d", local_cxy, order, count );
    6370
    6471    uint32_t index  = 1;
     
    6774        // allocate first free block in kcm_page, update status,
    6875    // and count , compute index of allocated block in kcm_page
    69     while( index <= max )
     76    while( index <= 63 )
    7077    {
    7178        if( (status & mask) == 0 )   // block found
     
    8188    }
    8289
    83     // change the page list if found block is the last
    84     if( count == max-1 )
     90    // switch page to full if last block
     91    if( (count + 1) == 63 )
    8592    {
    8693                list_unlink( &kcm_page->list);
     
    9299
    93100        // compute return pointer
    94         void * ptr = (void *)((intptr_t)kcm_page + (index * size) );
    95 
    96 #if DEBUG_KCM
    97 thread_t * this  = CURRENT_THREAD;
    98 uint32_t   cycle = (uint32_t)hal_get_cycles();
    99 if( DEBUG_KCM < cycle )
    100 printk("\n[%s] thread[%x,%x] allocated block %x in page %x / size %d / count %d / cycle %d\n",
    101 __FUNCTION__, this->process->pid, this->trdid, ptr, kcm_page, size, count + 1, cycle );
    102 #endif
     101        void * ptr = (void *)((intptr_t)kcm_page + (index << order));
    103102
    104103        return ptr;
     
    107106
    108107/////////////////////////////////////////////////////////////////////////////////////
    109 // This private static function must be called by a local thread.
     108// This static function is called by the kcm_free() function.
    110109// It releases a previously allocated block to the relevant kcm_page.
    111110// It makes a panic if the released block is not allocated in this page.
     
    121120{
    122121    // initialise variables
    123     uint32_t max    = kcm->max_blocks;
    124     uint32_t size   = 1 << kcm->order;
     122    uint32_t order  = kcm->order;
    125123    uint32_t count  = kcm_page->count;
    126124    uint64_t status = kcm_page->status;
    127125   
    128         // compute block index from block pointer
    129         uint32_t index = ((intptr_t)block_ptr - (intptr_t)kcm_page) / size;
     126        // compute block index from block pointer and kcm_page pointer
     127        uint32_t index = ((intptr_t)block_ptr - (intptr_t)kcm_page) >> order;
    130128
    131129    // compute mask in bit vector
     
    136134        printk("\n[WARNING] in %s : block[%x,%x] not allocated / kcm %x / kcm_page %x\n",
    137135        __FUNCTION__, local_cxy, block_ptr, kcm, kcm_page );
    138         printk("   status %L / mask %L / sts & msk %L\n", status, mask, (status & mask) );
    139136        kcm_remote_display( local_cxy , kcm );
    140137        return;
     
    145142        kcm_page->count  = count - 1;
    146143
    147         // change the page mode if page was full
    148         if( count == max )
     144        // switch page to active if it was full
     145        if( count == 63 )
    149146        {
    150147                list_unlink( &kcm_page->list );
     
    155152        }
    156153
    157 #if DEBUG_KCM
    158 thread_t * this  = CURRENT_THREAD;
    159 uint32_t   cycle = (uint32_t)hal_get_cycles();
    160 if( DEBUG_KCM < cycle )
    161 printk("\n[%s] thread[%x,%x] block %x / page %x / size %d / count %d / cycle %d\n",
    162 __FUNCTION__, this->process->pid, this->trdid, block_ptr, kcm_page, size, count - 1, cycle );
    163 #endif
    164 
    165154}  // kcm_put_block()
    166155
    167156/////////////////////////////////////////////////////////////////////////////////////
    168 // This static function must be called by a local thread.
    169 // It returns one non-full kcm_page with the following policy :
     157// This static function  returns one non-full kcm_page with the following policy :
    170158// - if the "active_list" is non empty, it returns the first "active" page,
    171159//   without modifying the KCM state.
     
    188176    else                            // allocate a new page from PPM
    189177        {
    190         // get one 4 Kbytes page from local PPM
    191         page_t * page = ppm_alloc_pages( 0 );
     178        // get KCM order
     179        uint32_t order = kcm->order;
     180
     181        // get one kcm_page from  PPM
     182        page_t * page = ppm_alloc_pages( order + 6 - CONFIG_PPM_PAGE_ORDER );
    192183
    193184            if( page == NULL )
    194185            {
    195                     printk("\n[ERROR] in %s : failed to allocate page in cluster %x\n",
    196                 __FUNCTION__ , local_cxy );
    197 
     186
     187#if DEBUG_KCM_ERROR
     188printk("\n[ERROR] in %s : failed to allocate page in cluster %x\n",
     189__FUNCTION__ , local_cxy );
     190#endif
    198191                    return NULL;
    199192        }
     
    202195            xptr_t base_xp = ppm_page2base( XPTR( local_cxy , page ) );
    203196
    204         // get local pointer on kcm_page
     197        // get local pointer on kcm_page 
    205198            kcm_page = GET_PTR( base_xp );
    206199
     
    225218{
    226219
    227 assert( __FUNCTION__, ((order > 5) && (order < 12)) , "order must be in [6,11]" );
    228 
    229 assert( __FUNCTION__, (CONFIG_PPM_PAGE_SHIFT == 12) , "check status bit_vector width" );
     220// check argument
     221assert( __FUNCTION__, (order < CONFIG_PPM_PAGE_ORDER),
     222"order argument %d too large", order );
     223
     224assert( __FUNCTION__, (order >= CONFIG_CACHE_LINE_ORDER),
     225"order argument %d too small", order );
    230226
    231227        // initialize lock
     
    238234        list_root_init( &kcm->active_root );
    239235
    240         // initialize order and max_blocks
    241         kcm->order      = order;
    242     kcm->max_blocks = ( CONFIG_PPM_PAGE_SIZE >> order ) - 1;
     236        // initialize order
     237        kcm->order = order;
    243238 
    244239#if DEBUG_KCM
    245 thread_t * this  = CURRENT_THREAD;
    246 uint32_t   cycle = (uint32_t)hal_get_cycles();
    247 if( DEBUG_KCM < cycle )
    248 printk("\n[%s] thread[%x,%x] initialised KCM / order %d / max_blocks %d\n",
    249 __FUNCTION__, this->process->pid, this->trdid, order, kcm->max_blocks );
     240if( (DEBUG_KCM_ORDER == order) && (DEBUG_KCM_CXY == local_cxy) )
     241printk("\n[%s] cxy %x / order %d\n",
     242__FUNCTION__, local_cxy, order );
    250243#endif
    251244
     
    287280void * kcm_alloc( uint32_t order )
    288281{
    289     kcm_t      * kcm_ptr;
     282    kcm_t      * kcm;
    290283        kcm_page_t * kcm_page;
    291         void       * block_ptr;
    292 
    293    // min block size is 64 bytes
    294     if( order < 6 ) order = 6;
    295 
    296 assert( __FUNCTION__, (order < 12) , "order = %d / must be less than 12" , order );
     284        void       * block;
     285
     286// check argument
     287assert( __FUNCTION__, (order < CONFIG_PPM_PAGE_ORDER),
     288"order argument %d too large", order );
     289
     290#if DEBUG_KCM
     291uint32_t cycle = (uint32_t)hal_get_cycles();
     292#endif
     293
     294    // smallest block size is a cache line
     295    if (order < CONFIG_CACHE_LINE_ORDER) order = CONFIG_CACHE_LINE_ORDER;
    297296
    298297    // get local pointer on relevant KCM allocator
    299     kcm_ptr = &LOCAL_CLUSTER->kcm[order - 6];
     298    kcm = &LOCAL_CLUSTER->kcm[order - CONFIG_CACHE_LINE_ORDER];
    300299
    301300    // build extended pointer on local KCM lock
    302     xptr_t lock_xp = XPTR( local_cxy , &kcm_ptr->lock );
     301    xptr_t lock_xp = XPTR( local_cxy , &kcm->lock );
    303302
    304303        // get KCM lock
     
    306305
    307306    // get a non-full kcm_page
    308     kcm_page = kcm_get_page( kcm_ptr );
    309 
    310 #if DEBUG_KCM
    311 thread_t * this  = CURRENT_THREAD;
    312 uint32_t   cycle = (uint32_t)hal_get_cycles();
    313 if( DEBUG_KCM < cycle )
    314 {
    315 printk("\n[%s] thread[%x,%x] enters / order %d / page %x / kcm %x / page_status (%x|%x)\n",
    316 __FUNCTION__, this->process->pid, this->trdid, order, kcm_page, kcm_ptr,
    317 GET_CXY( kcm_page->status ), GET_PTR( kcm_page->status ) );
    318 kcm_remote_display( local_cxy , kcm_ptr );
    319 }
    320 #endif
     307    kcm_page = kcm_get_page( kcm );
    321308
    322309    if( kcm_page == NULL )
     
    326313        }
    327314
    328         // get a block from selected active page
    329         block_ptr = kcm_get_block( kcm_ptr , kcm_page );
     315#if DEBUG_KCM
     316if( (DEBUG_KCM_ORDER == order) && (DEBUG_KCM_CXY == local_cxy) && (DEBUG_KCM < cycle) )
     317printk("\n[%s] enter / order %d / kcm[%x,%x] / nb_full %d / nb_active %d\n"
     318"    page %x / status [%x,%x] / count %d\n",
     319__FUNCTION__, order, local_cxy, kcm, kcm->full_pages_nr, kcm->active_pages_nr,
     320kcm_page, (uint32_t)(kcm_page->status>>32), (uint32_t)(kcm_page->status), kcm_page->count );
     321#endif
     322
     323        // allocate a block from selected active page
     324        block = kcm_get_block( kcm , kcm_page );
     325
     326#if DEBUG_KCM
     327if( (DEBUG_KCM_ORDER == order) && (DEBUG_KCM_CXY == local_cxy) && (DEBUG_KCM < cycle) )
     328printk("\n[%s] exit  / order %d / kcm[%x,%x] / nb_full %d / nb_active %d\n"
     329"    page %x / status [%x,%x] / count %d\n",
     330__FUNCTION__, order, local_cxy, kcm, kcm->full_pages_nr, kcm->active_pages_nr,
     331kcm_page, (uint32_t)(kcm_page->status>>32), (uint32_t)(kcm_page->status), kcm_page->count );
     332#endif
    330333
    331334        // release lock
    332335        remote_busylock_release( lock_xp );
    333336
    334 #if DEBUG_KCM
    335 if( DEBUG_KCM < cycle )
    336 printk("\n[%s] thread[%x,%x] exit / order %d / block %x / kcm %x / page_status (%x|%x)\n",
    337 __FUNCTION__, this->process->pid, this->trdid, order, block_ptr, kcm_ptr,
    338 GET_CXY( kcm_page->status ), GET_PTR( kcm_page->status ) );
    339 #endif
    340 
    341         return block_ptr;
     337        return block;
    342338
    343339}  // end kcm_alloc()
    344340
    345 /////////////////////////////////
    346 void kcm_free( void * block_ptr )
    347 {
    348     kcm_t      * kcm_ptr;
     341///////////////////////////////
     342void kcm_free( void    * block,
     343               uint32_t  order )
     344{
     345    kcm_t      * kcm;
    349346        kcm_page_t * kcm_page;
    350347
    351348// check argument
    352 assert( __FUNCTION__, (block_ptr != NULL) , "block pointer cannot be NULL" );
     349assert( __FUNCTION__, (block != NULL),
     350"block pointer cannot be NULL" );
     351
     352#if DEBUG_KCM
     353uint32_t cycle = (uint32_t)hal_get_cycles();
     354#endif
     355
     356    // smallest block size is a cache line
     357    if (order < CONFIG_CACHE_LINE_ORDER) order = CONFIG_CACHE_LINE_ORDER;
     358
     359    // get local pointer on relevant KCM allocator
     360    kcm = &LOCAL_CLUSTER->kcm[order - CONFIG_CACHE_LINE_ORDER];
    353361
    354362    // get local pointer on KCM page
    355         kcm_page = (kcm_page_t *)((intptr_t)block_ptr & ~CONFIG_PPM_PAGE_MASK);
    356 
    357     // get local pointer on KCM descriptor
    358         kcm_ptr = kcm_page->kcm;
    359 
    360 #if DEBUG_KCM
    361 thread_t * this  = CURRENT_THREAD;
    362 uint32_t   cycle = (uint32_t)hal_get_cycles();
    363 if( (DEBUG_KCM < cycle) && (local_cxy == 1) )
    364 {
    365 printk("\n[%s] thread[%x,%x] enters / order %d / block %x / page %x / kcm %x / status [%x,%x]\n",
    366 __FUNCTION__, this->process->pid, this->trdid, kcm_ptr->order, block_ptr, kcm_page, kcm_ptr,
    367 GET_CXY(kcm_page->status), GET_PTR(kcm_page->status) );
    368 kcm_remote_display( local_cxy , kcm_ptr );
    369 }
    370 #endif
     363    intptr_t kcm_page_mask = (1 << (order + 6)) - 1;
     364        kcm_page = (kcm_page_t *)((intptr_t)block & ~kcm_page_mask);
    371365
    372366    // build extended pointer on local KCM lock
    373     xptr_t lock_xp = XPTR( local_cxy , &kcm_ptr->lock );
     367    xptr_t lock_xp = XPTR( local_cxy , &kcm->lock );
    374368
    375369        // get lock
    376370        remote_busylock_acquire( lock_xp );
    377371
    378         // release block
    379         kcm_put_block( kcm_ptr , kcm_page , block_ptr );
     372#if DEBUG_KCM
     373if( (DEBUG_KCM_ORDER == order) && (DEBUG_KCM_CXY == local_cxy) && (DEBUG_KCM < cycle) )
     374printk("\n[%s] exit  / order %d / kcm[%x,%x] / nb_full %d / nb_active %d\n"
     375"    page %x / status [%x,%x] / count %d\n",
     376__FUNCTION__, order, local_cxy, kcm, kcm->full_pages_nr, kcm->active_pages_nr,
     377kcm_page, (uint32_t)(kcm_page->status>>32), (uint32_t)(kcm_page->status), kcm_page->count );
     378#endif
     379
     380        // release the block to the relevant page
     381        kcm_put_block( kcm , kcm_page , block );
     382
     383#if DEBUG_KCM
     384if( (DEBUG_KCM_ORDER == order) && (DEBUG_KCM_CXY == local_cxy) && (DEBUG_KCM < cycle) )
     385printk("\n[%s] exit  / order %d / kcm[%x,%x] / nb_full %d / nb_active %d\n"
     386"    page %x / status [%x,%x] / count %d\n",
     387__FUNCTION__, order, local_cxy, kcm, kcm->full_pages_nr, kcm->active_pages_nr,
     388kcm_page, (uint32_t)(kcm_page->status>>32), (uint32_t)(kcm_page->status), kcm_page->count );
     389#endif
    380390
    381391        // release lock
    382392        remote_busylock_release( lock_xp );
    383393
    384 #if DEBUG_KCM
    385 if( (DEBUG_KCM < cycle) && (local_cxy == 1) )
    386 {
    387 printk("\n[%s] thread[%x,%x] exit / order %d / page %x / status [%x,%x]\n",
    388 __FUNCTION__, this->process->pid, this->trdid, kcm_ptr->order, kcm_ptr,
    389 GET_CXY(kcm_page->status), GET_PTR(kcm_page->status) );
    390 kcm_remote_display( local_cxy , kcm_ptr );
    391 }
    392 #endif
    393 
    394394}  // end kcm_free()
    395395
     
    400400
    401401/////////////////////////////////////////////////////////////////////////////////////
    402 // This static function can be called by any thread running in any cluster.
     402// This static function is called by the kcm_remote_alloc() function.
     403// It can be called by any thread running in any cluster.
    403404// It returns a local pointer on a block allocated from an active kcm_page.
    404405// It makes a panic if no block available in the selected kcm_page.
     
    415416{
    416417    uint32_t order  = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->order ) );
    417     uint32_t max    = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->max_blocks ) );
    418418    uint32_t count  = hal_remote_l32( XPTR( kcm_cxy , &kcm_page->count ) );
    419419    uint64_t status = hal_remote_l64( XPTR( kcm_cxy , &kcm_page->status ) );
    420     uint32_t size   = 1 << order;
    421 
    422 assert( __FUNCTION__, (count < max) , "kcm_page should not be full" );
     420
     421// check kcm_page not full
     422assert( __FUNCTION__, (count < 63) ,
     423"kcm_page should not be full / cxy %x / order %d / count %d", kcm_cxy, order, count );
    423424
    424425    uint32_t index  = 1;
     
    427428        // allocate first free block in kcm_page, update status,
    428429    // and count , compute index of allocated block in kcm_page
    429     while( index <= max )
     430    while( index <= 63 )
    430431    {
    431432        if( (status & mask) == 0 )   // block found
     
    440441    }
    441442
    442         // change the page list if found block is the last
    443         if( count == max-1 )
     443        // swich the page to full if last block
     444        if( (count + 1) == 63 )
    444445        {
    445446                list_remote_unlink( kcm_cxy , &kcm_page->list );
     
    451452
    452453        // compute return pointer
    453         void * ptr = (void *)((intptr_t)kcm_page + (index * size) );
    454 
    455 #if DEBUG_KCM_REMOTE
    456 thread_t * this  = CURRENT_THREAD;
    457 uint32_t   cycle = (uint32_t)hal_get_cycles();
    458 if( DEBUG_KCM_REMOTE < cycle )
    459 printk("\n[%s] thread[%x,%x] get block %x in page %x / cluster %x / size %x / count %d\n",
    460 __FUNCTION__, this->process->pid, this->trdid,
    461 ptr, kcm_page, kcm_cxy, size, count + 1 );
    462 #endif
     454        void * ptr = (void *)((intptr_t)kcm_page + (index << order));
    463455
    464456        return ptr;
     
    467459
    468460/////////////////////////////////////////////////////////////////////////////////////
    469 // This private static function can be called by any thread running in any cluster.
     461// This static function is called by the kcm_remote_free() function.
     462// It can be called by any thread running in any cluster.
    470463// It releases a previously allocated block to the relevant kcm_page.
    471464// It changes the kcm_page status as required.
     
    481474                                                             void       * block_ptr )
    482475{
    483     uint32_t max    = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->max_blocks ) );
    484476    uint32_t order  = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->order ) );
    485477    uint32_t count  = hal_remote_l32( XPTR( kcm_cxy , &kcm_page->count ) );
    486478    uint64_t status = hal_remote_l64( XPTR( kcm_cxy , &kcm_page->status ) );
    487     uint32_t size   = 1 << order;
    488479   
    489         // compute block index from block pointer
    490         uint32_t index = ((intptr_t)block_ptr - (intptr_t)kcm_page) / size;
     480        // compute block index from block pointer and kcm_page pointer
     481        uint32_t index = ((intptr_t)block_ptr - (intptr_t)kcm_page) >> order;
    491482
    492483    // compute mask in bit vector
     
    497488        printk("\n[WARNING] in %s : block[%x,%x] not allocated / kcm %x / kcm_page %x\n",
    498489        __FUNCTION__, kcm_cxy, block_ptr, kcm_ptr, kcm_page );
    499         printk("   status %L / mask %L / sts & msk %L\n", status, mask, (status & mask) );
    500490        kcm_remote_display( kcm_cxy , kcm_ptr );
    501491        return;
     
    506496        hal_remote_s32( XPTR( kcm_cxy , &kcm_page->count  ) , count - 1 );
    507497
    508         // change the page list if page was full
    509         if( count == max )
     498        // switch the page to active if page was full
     499        if( count == 63 )
    510500        {
    511501                list_remote_unlink( kcm_cxy , &kcm_page->list );
     
    516506        }
    517507
    518 #if (DEBUG_KCM_REMOTE & 1)
    519 thread_t * this  = CURRENT_THREAD;
    520 uint32_t   cycle = (uint32_t)hal_get_cycles();
    521 if( DEBUG_KCM_REMOTE < cycle )
    522 printk("\n[%s] thread[%x,%x] block %x / page %x / cluster %x / size %x / count %d\n",
    523 __FUNCTION__, this->process->pid, this->trdid, block_ptr, kcm_page, size, count - 1 )
    524 #endif
    525 
    526508}  // end kcm_remote_put_block()
    527509
    528510/////////////////////////////////////////////////////////////////////////////////////
    529 // This private static function can be called by any thread running in any cluster.
     511// This static function can be called by any thread running in any cluster.
    530512// It gets one non-full KCM page from the remote KCM.
    531513// It allocates a page from remote PPM to populate the freelist, and initialises
     
    545527    else                            // allocate a new page from PPM
    546528        {
    547         // get one 4 Kbytes page from remote PPM
    548         xptr_t page_xp = ppm_remote_alloc_pages( kcm_cxy , 0 );
    549 
     529        // get KCM order
     530        uint32_t order = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->order ));
     531
     532        // get one kcm_page from PPM
     533        xptr_t page_xp = ppm_remote_alloc_pages( kcm_cxy,
     534                                                 order + 6 - CONFIG_PPM_PAGE_ORDER );
    550535            if( page_xp == XPTR_NULL )
    551536            {
    552                     printk("\n[ERROR] in %s : failed to allocate page in cluster %x\n",
    553                 __FUNCTION__ , kcm_cxy );
    554 
     537
     538#if DEBUG_KCM_ERROR
     539printk("\n[ERROR] in %s : failed to allocate page in cluster %x\n",
     540__FUNCTION__ , kcm_cxy );
     541#endif
    555542                    return NULL;
    556543        }
     
    585572    void       * block_ptr;
    586573
    587     if( order < 6 ) order = 6;
    588 
    589 assert( __FUNCTION__, (order < 12) , "order = %d / must be less than 12" , order );
    590 
    591     // get local pointer on relevant KCM allocator
     574// check kcm_cxy argument
     575assert( __FUNCTION__, cluster_is_active( kcm_cxy ),
     576"cluster %x not active", kcm_cxy );
     577
     578// check order argument
     579assert( __FUNCTION__, (order < CONFIG_PPM_PAGE_ORDER) ,
     580"order argument %d too large", order );
     581
     582    // smallest size is a cache line
     583    if( order < CONFIG_CACHE_LINE_ORDER ) order = CONFIG_CACHE_LINE_ORDER;
     584
     585    // get local pointer on relevant KCM allocator (same in all clusters)
    592586    kcm_ptr = &LOCAL_CLUSTER->kcm[order - 6];
    593587
     
    607601        }
    608602
     603#if DEBUG_KCM
     604uint32_t cycle     = (uint32_t)hal_get_cycles();
     605uint32_t nb_full   = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->full_pages_nr ));
     606uint32_t nb_active = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->active_pages_nr ));
     607uint64_t status    = hal_remote_l64( XPTR( kcm_cxy , &kcm_page->status ));
     608uint32_t count     = hal_remote_l32( XPTR( kcm_cxy , &kcm_page->count ));
     609#endif
     610
     611
     612#if DEBUG_KCM
     613if( (DEBUG_KCM_ORDER == order) && (DEBUG_KCM_CXY == local_cxy) && (DEBUG_KCM < cycle) )
     614printk("\n[%s] enter / order %d / kcm[%x,%x] / nb_full %d / nb_active %d\n"
     615"    page %x / status [%x,%x] / count %d\n",
     616__FUNCTION__, order, kcm_cxy, kcm_ptr, nb_full, nb_active,
     617kcm_page, (uint32_t)(status>>32), (uint32_t)(status), kcm_page->count );
     618#endif
     619
    609620        // get a block from selected active page
    610621        block_ptr = kcm_remote_get_block( kcm_cxy , kcm_ptr , kcm_page );
    611622
     623#if DEBUG_KCM
     624if( (DEBUG_KCM_ORDER == order) && (DEBUG_KCM_CXY == local_cxy) && (DEBUG_KCM < cycle) )
     625printk("\n[%s] exit  / order %d / kcm[%x,%x] / nb_full %d / nb_active %d\n"
     626"    page %x / status [%x,%x] / count %d\n",
     627__FUNCTION__, order, kcm_cxy, kcm_ptr, nb_full, nb_active,
     628kcm_page, (uint32_t)(status>>32), (uint32_t)(status), kcm_page->count );
     629#endif
     630
    612631        // release lock
    613632        remote_busylock_release( lock_xp );
    614633
    615 #if DEBUG_KCM_REMOTE
    616 thread_t * this  = CURRENT_THREAD;
    617 uint32_t   cycle = (uint32_t)hal_get_cycles();
    618 if( DEBUG_KCM_REMOTE < cycle )
    619 printk("\n[%s] thread[%x,%x] allocated block %x / order %d / kcm[%x,%x]\n",
    620 __FUNCTION__, this->process->pid, this->trdid, block_ptr, order, kcm_cxy, kcm_ptr );
    621 #endif
    622 
    623634        return block_ptr;
    624635
    625636}  // end kcm_remote_alloc()
    626637
    627 /////////////////////////////////////
    628 void kcm_remote_free( cxy_t  kcm_cxy,
    629                       void * block_ptr )
     638////////////////////////////////////////
     639void kcm_remote_free( cxy_t     kcm_cxy,
     640                      void    * block_ptr,
     641                      uint32_t  order )
    630642{
    631643        kcm_t      * kcm_ptr;
    632644        kcm_page_t * kcm_page;
    633645
    634 // check argument
    635 assert( __FUNCTION__, (block_ptr != NULL) , "block pointer cannot be NULL" );
    636 
    637     // get local pointer on remote KCM page
    638         kcm_page = (kcm_page_t *)((intptr_t)block_ptr & ~CONFIG_PPM_PAGE_MASK);
    639 
    640     // get local pointer on remote KCM
    641         kcm_ptr = hal_remote_lpt( XPTR( kcm_cxy , &kcm_page->kcm ) );
     646// check kcm_cxy argument
     647assert( __FUNCTION__, cluster_is_active( kcm_cxy ),
     648"cluster %x not active", kcm_cxy );
     649
     650// check block_ptr argument
     651assert( __FUNCTION__, (block_ptr != NULL),
     652"block pointer cannot be NULL" );
     653
     654// check order argument
     655assert( __FUNCTION__, (order < CONFIG_PPM_PAGE_ORDER) ,
     656"order argument %d too large", order );
     657
     658    // smallest block size is a cache line
     659    if (order < CONFIG_CACHE_LINE_ORDER) order = CONFIG_CACHE_LINE_ORDER;
     660
     661    // get local pointer on relevant KCM allocator (same in all clusters)
     662    kcm_ptr = &LOCAL_CLUSTER->kcm[order - CONFIG_CACHE_LINE_ORDER];
     663
     664    // get local pointer on KCM page
     665    intptr_t kcm_page_mask = (1 << (order + 6)) - 1;
     666        kcm_page = (kcm_page_t *)((intptr_t)block_ptr & ~kcm_page_mask);
     667
     668#if DEBUG_KCM
     669uint32_t cycle     = (uint32_t)hal_get_cycles();
     670uint32_t nb_full   = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->full_pages_nr ));
     671uint32_t nb_active = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->active_pages_nr ));
     672uint64_t status    = hal_remote_l64( XPTR( kcm_cxy , &kcm_page->status ));
     673uint32_t count     = hal_remote_l32( XPTR( kcm_cxy , &kcm_page->count ));
     674#endif
    642675
    643676    // build extended pointer on remote KCM lock
     
    647680        remote_busylock_acquire( lock_xp );
    648681
    649         // release block
     682#if DEBUG_KCM
     683if( (DEBUG_KCM_ORDER == order) && (DEBUG_KCM_CXY == local_cxy) && (DEBUG_KCM < cycle) )
     684printk("\n[%s] enter / order %d / kcm[%x,%x] / nb_full %d / nb_active %d\n"
     685"    page %x / status [%x,%x] / count %d\n",
     686__FUNCTION__, order, kcm_cxy, kcm_ptr, nb_full, nb_active,
     687kcm_page, (uint32_t)(status>>32), (uint32_t)(status), kcm_page->count );
     688#endif
     689
     690        // release the block to the relevant page
    650691        kcm_remote_put_block( kcm_cxy , kcm_ptr , kcm_page , block_ptr );
     692
     693#if DEBUG_KCM
     694if( (DEBUG_KCM_ORDER == order) && (DEBUG_KCM_CXY == local_cxy) && (DEBUG_KCM < cycle) )
     695printk("\n[%s] exit  / order %d / kcm[%x,%x] / nb_full %d / nb_active %d\n"
     696"    page %x / status [%x,%x] / count %d\n",
     697__FUNCTION__, order, kcm_cxy, kcm_ptr, nb_full, nb_active,
     698kcm_page, (uint32_t)(status>>32), (uint32_t)(status), kcm_page->count );
     699#endif
    651700
    652701        // release lock
    653702        remote_busylock_release( lock_xp );
    654 
    655 #if DEBUG_KCM_REMOTE
    656 thread_t * this  = CURRENT_THREAD;
    657 uint32_t   cycle = (uint32_t)hal_get_cycles();
    658 uint32_t   order = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->order ) );
    659 if( DEBUG_KCM_REMOTE < cycle )
    660 printk("\n[%s] thread[%x,%x] released block %x / order %d / kcm[%x,%x]\n",
    661 __FUNCTION__, this->process->pid, this->trdid, block_ptr, order, kcm_cxy, kcm_ptr );
    662 #endif
    663703
    664704}  // end kcm_remote_free
     
    673713    uint32_t       count;
    674714
     715    // get pointers on TXT0 chdev
     716    xptr_t    txt0_xp  = chdev_dir.txt_tx[0];
     717    cxy_t     txt0_cxy = GET_CXY( txt0_xp );
     718    chdev_t * txt0_ptr = GET_PTR( txt0_xp );
     719
     720    // get extended pointer on remote TXT0 chdev lock
     721    xptr_t    txt0_lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
     722
     723    // get TXT0 lock
     724    remote_busylock_acquire( txt0_lock_xp );
     725
    675726    uint32_t order           = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->order) );
    676727    uint32_t full_pages_nr   = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->full_pages_nr ) );
    677728    uint32_t active_pages_nr = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->active_pages_nr ) );
    678729
    679         printk("*** KCM : cxy %x / order %d / full_pages_nr %d / active_pages_nr %d\n",
     730        nolock_printk("*** KCM : cxy %x / order %d / full_pages_nr %d / active_pages_nr %d\n",
    680731        kcm_cxy, order, full_pages_nr, active_pages_nr );
    681732
     
    688739            count    = hal_remote_l32( XPTR( kcm_cxy , &kcm_page->count ) );
    689740
    690             printk("- active page %x / status (%x,%x) / count %d\n",
    691             kcm_page, GET_CXY( status ), GET_PTR( status ), count );
     741            nolock_printk("- active page %x / status (%x,%x) / count %d\n",
     742            kcm_page, (uint32_t)( status<< 32 ), (uint32_t)( status ), count );
    692743        }
    693744    }
     
    701752            count    = hal_remote_l32( XPTR( kcm_cxy , &kcm_page->count ) );
    702753
    703             printk("- full page %x / status (%x,%x) / count %d\n",
    704             kcm_page, GET_CXY( status ), GET_PTR( status ), count );
     754            nolock_printk("- full page %x / status (%x,%x) / count %d\n",
     755            kcm_page, (uint32_t)( status<< 32 ), (uint32_t)( status ), count );
    705756        }
    706757    }
     758
     759    // release TXT0 lock
     760    remote_busylock_release( txt0_lock_xp );
     761
    707762}  // end kcm remote_display()
  • trunk/kernel/mm/kcm.h

    r672 r683  
    3232#include <kmem.h>
    3333
    34 
    35 #define KCM_PAGE_FULL     0
    36 #define KCM_PAGE_EMPTY    1
    37 #define KCM_PAGE_ACTIVE   2
    38 
    3934/****************************************************************************************
    40  * This structure defines a generic Kernel Cache Manager, that is a block allocator,
    41  * for fixed size objects. It exists in each cluster a specific KCM allocator for
    42  * the following block sizes: 64, 128, 256, 512, 1024, 2048 bytes.
    43  * These six KCM allocators are initialized by the cluster_init() function.
     35 * This structure defines a generic Kernel Cache Manager, a fixed size block allocator.
     36 * It returns an aligned block whose size is a power of 2, not smaller than a cache line,
     37 * but smaller than a small PPM page. It exists in each cluster a specific KCM allocator
     38 * for each possible block size. When the cache line contains 64 bytes and the page
     39 * contains 4K bytes, the possible block sizes are 64, 128, 256, 512, 1024, 2048 bytes.
     40 * These KCM allocators are initialized by the cluster_init() function.
    4441 *
    45  * Each KCM cache is implemented as a set o 4 Kbytes pages. A kcm_page is split in slots,
    46  * where each slot can contain one block. in each kcm_page, the first slot (that cannot
    47  * be smaller than 64 bytes) contains the kcm page descriptor, defined below
     42 * Each KCM cache is implemented as a set of "kcm_pages": a "kcm_page" is an aligned
     43 * buffer in physical memory (allocated by the PPM allocator) such as :
     44 *       buffer_size = block_size * 64  <=>  buffer_order = block_order + 6.
     45 *
     46 * A kcm_page contains always 64 kcm_blocks, but the first block (that cannot be smaller
     47 * than 64 bytes) is used to store the kcm_page descriptor defining the page allocation
     48 * status, and cannot be allocated to store data.
     49 *
     50 * A KCM cache is extensible, as new kcm_pages are dynamically allocated from the PPM
     51 * allocator when required. For a given KCM cache the set of kcm_pages is split in two
     52 * lists: the list of "full" pages (containing 63 allocated blocks), and the list of
     53 * "active" pages (containing at least one free block). An "empty" page (containing
     54 * only free blocks) is considered active, and is not released to PPM.
    4855 *
    4956 * To allow any thread running in any cluster to directly access the KCM of any cluster,
     
    6269
    6370        uint32_t             order;            /*! ln( block_size )                        */
    64         uint32_t             max_blocks;       /*! max number of blocks per page           */
    6571}
    6672kcm_t;
     
    8490        list_entry_t        list;              /*! [active / busy / free] list member      */
    8591        kcm_t             * kcm;               /*! pointer on kcm allocator                */
    86         page_t            * page;              /*! pointer on the physical page descriptor */
     92        page_t            * page;              /*! pointer on physical page descriptor    */
    8793}
    8894kcm_page_t;
     
    120126 ****************************************************************************************
    121127 * @ block_ptr   : local pointer on the released block.
     128 * @ order       : log2( block_size in bytes ).
    122129 ***************************************************************************************/
    123 void kcm_free( void    * block_ptr );
     130void kcm_free( void    * block_ptr,
     131               uint32_t  order );
    124132
    125133
     
    143151 * @ kcm_cxy     : remote KCM cluster identifier.
    144152 * @ block_ptr   : local pointer on the released buffer in remote cluster.
     153 * @ order       : log2( block_size in bytes ).
    145154 ***************************************************************************************/
    146155void kcm_remote_free( cxy_t     kcm_cxy,
    147                       void    * block_ptr );
     156                      void    * block_ptr,
     157                      uint32_t  order );
    148158
    149159/****************************************************************************************
  • trunk/kernel/mm/khm.c

    r672 r683  
    4040{
    4141        // check config parameters
    42         assert( __FUNCTION__, ((CONFIG_PPM_PAGE_SHIFT + CONFIG_PPM_HEAP_ORDER) < 32 ) ,
     42        assert( __FUNCTION__, ((CONFIG_PPM_PAGE_ORDER + CONFIG_PPM_HEAP_ORDER) < 32 ) ,
    4343                 "CONFIG_PPM_HEAP_ORDER too large" );
    4444
     
    4747
    4848        // compute kernel heap size
    49         intptr_t heap_size = (1 << CONFIG_PPM_HEAP_ORDER) << CONFIG_PPM_PAGE_SHIFT;
     49        intptr_t heap_size = (1 << CONFIG_PPM_HEAP_ORDER) << CONFIG_PPM_PAGE_ORDER;
    5050
    5151        // get kernel heap base from PPM
  • trunk/kernel/mm/kmem.c

    r672 r683  
    22 * kmem.c - kernel memory allocator implementation.
    33 *
    4  * Authors  Alain Greiner (2016,2017,2018,2019,2020)
     4 * Authors  Alain Greiner     (2016,2017,2018,2019,2020)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    2929#include <thread.h>
    3030#include <memcpy.h>
    31 #include <khm.h>
    3231#include <ppm.h>
    3332#include <kcm.h>
     
    3534#include <kmem.h>
    3635
    37 /////////////////////////////////////
    38 void * kmem_alloc( kmem_req_t * req )
    39 {
    40         uint32_t    type;    // KMEM_PPM / KMEM_KCM / KMEM_KHM
    41         uint32_t    flags;   // AF_NONE / AF_ZERO / AF_KERNEL
    42         uint32_t    order;   // PPM: ln(pages) / KCM: ln(bytes) / KHM: bytes
    43 
    44         type  = req->type;
    45         order = req->order;
    46         flags = req->flags;
    47 
    48     //////////////////////
    49         if( type == KMEM_PPM )
    50         {
    51                 // allocate the number of requested pages
    52                 page_t * page_ptr = (void *)ppm_alloc_pages( order );
    53 
    54                 if( page_ptr == NULL )
    55                 {
    56                         printk("\n[ERROR] in %s : PPM failed / order %d / cluster %x\n",
    57                         __FUNCTION__ , order , local_cxy );
    58                         return NULL;
    59                 }
    60 
    61         xptr_t page_xp = XPTR( local_cxy , page_ptr );
    62 
    63                 // reset page if requested
    64                 if( flags & AF_ZERO ) page_zero( page_ptr );
    65 
    66         // get pointer on buffer from the page descriptor
    67         void * ptr = GET_PTR( ppm_page2base( page_xp ) );
    68 
    69 #if DEBUG_KMEM
     36///////////////////////////////////
     37void * kmem_alloc( uint32_t  order,
     38                   uint32_t  flags )
     39{
     40
     41#if DEBUG_KMEM || DEBUG_KMEM_ERROR
    7042thread_t * this  = CURRENT_THREAD;
    7143uint32_t   cycle = (uint32_t)hal_get_cycles();
    72 if( DEBUG_KMEM < cycle )
    73 printk("\n[%s] thread[%x,%x] from PPM / %d page(s) / ppn %x / cxy %x / cycle %d\n",
    74 __FUNCTION__, this->process->pid, this->trdid,
    75 1<<order, ppm_page2ppn(XPTR(local_cxy,ptr)), local_cxy, cycle );
     44#endif
     45
     46        if( order >= CONFIG_PPM_PAGE_ORDER )     // use PPM
     47        {
     48                // allocate memory from PPM
     49                page_t * page = (void *)ppm_alloc_pages( order - CONFIG_PPM_PAGE_ORDER );
     50
     51                if( page == NULL )
     52                {
     53
     54#if DEBUG_KMEM_ERROR
     55if (DEBUG_KMEM_ERROR < cycle)
     56printk("\n[ERROR] in %s : thread[%x,%x] failed for PPM / order %d / cluster %x / cycle %d\n",
     57__FUNCTION__ , this->process->pid , this->trdid , order , local_cxy , cycle );
     58#endif
     59                        return NULL;
     60                }
     61
     62                // reset page if requested
     63                if( flags & AF_ZERO ) page_zero( page );
     64
     65        // get pointer on buffer from the page descriptor
     66        xptr_t page_xp = XPTR( local_cxy , page );
     67        void * ptr     = GET_PTR( ppm_page2base( page_xp ) );
     68
     69#if DEBUG_KMEM
     70if( (DEBUG_KMEM < cycle) && (DEBUG_KMEM_CXY == local_cxy) && (DEBUG_KMEM_ORDER == order) )
     71printk("\n[%s] thread[%x,%x] from PPM / order %d / ppn %x / cxy %x / cycle %d\n",
     72__FUNCTION__, this->process->pid, this->trdid,
     73order, ppm_page2ppn(XPTR(local_cxy,ptr)), local_cxy, cycle );
    7674#endif
    7775        return ptr;
    7876        }
    79     ///////////////////////////
    80         else if( type == KMEM_KCM )
     77        else                                     // use KCM
    8178        {
    8279                // allocate memory from KCM
     
    8582                if( ptr == NULL )
    8683                {
    87                         printk("\n[ERROR] in %s : KCM failed / order %d / cluster %x\n",
    88                     __FUNCTION__ , order , local_cxy );
     84
     85#if DEBUG_KMEM_ERROR
     86if (DEBUG_KMEM_ERROR < cycle)
     87printk("\n[ERROR] in %s : thread[%x,%x] failed for KCM / order %d / cluster %x / cycle %d\n",
     88__FUNCTION__ , this->process->pid , this->trdid , order , local_cxy , cycle );
     89#endif
    8990                        return NULL;
    9091                }
     
    9495
    9596#if DEBUG_KMEM
    96 thread_t * this  = CURRENT_THREAD;
    97 uint32_t   cycle = (uint32_t)hal_get_cycles();
    98 if( DEBUG_KMEM < cycle )
    99 printk("\n[%s] thread [%x,%x] from KCM / %d bytes / base %x / cxy %x / cycle %d\n",
    100 __FUNCTION__, this->process->pid, this->trdid,
    101 1<<order, ptr, local_cxy, cycle );
     97if( (DEBUG_KMEM < cycle) && (DEBUG_KMEM_CXY == local_cxy) && (DEBUG_KMEM_ORDER == order) )
     98printk("\n[%s] thread [%x,%x] from KCM / order %d / base %x / cxy %x / cycle %d\n",
     99__FUNCTION__, this->process->pid, this->trdid,
     100order, ptr, local_cxy, cycle );
    102101#endif
    103102        return ptr;
    104103        }
    105     ///////////////////////////
    106         else if( type == KMEM_KHM )
    107         {
    108                 // allocate memory from KHM
    109                 void * ptr = khm_alloc( &LOCAL_CLUSTER->khm , order );
    110 
    111                 if( ptr == NULL )
    112                 {
    113                         printk("\n[ERROR] in %s : KHM failed / order %d / cluster %x\n",
    114                         __FUNCTION__ , order , local_cxy );
    115                         return NULL;
    116                 }
    117 
    118                 // reset memory if requested
    119                 if( flags & AF_ZERO ) memset( ptr , 0 , order );
    120 
    121 #if DEBUG_KMEM
    122 thread_t * this  = CURRENT_THREAD;
    123 uint32_t   cycle = (uint32_t)hal_get_cycles();
    124 if( DEBUG_KMEM < cycle )
    125 printk("\n[%s] thread[%x,%x] from KHM / %d bytes / base %x / cxy %x / cycle %d\n",
    126 __FUNCTION__, this->process->pid, this->trdid,
    127 order, ptr, local_cxy, cycle );
    128 #endif
    129         return ptr;
    130         }
    131     else
    132     {
    133         printk("\n[ERROR] in %s : illegal allocator type\n", __FUNCTION__);
    134         return NULL;
    135     }
    136104}  // end kmem_alloc()
    137105
    138 //////////////////////////////////
    139 void kmem_free( kmem_req_t * req )
    140 {
    141     uint32_t type = req->type;
    142 
    143     //////////////////////
    144         if( type == KMEM_PPM )
    145         {
    146         page_t * page = GET_PTR( ppm_base2page( XPTR( local_cxy , req->ptr ) ) );
     106//////////////////////////////
     107void kmem_free( void    * ptr,
     108                uint32_t  order )
     109{
     110        if( order >= CONFIG_PPM_PAGE_ORDER )     // use PPM
     111        {
     112        page_t * page = GET_PTR( ppm_base2page( XPTR( local_cxy , ptr ) ) );
    147113
    148114        ppm_free_pages( page );
    149115    }
    150     ///////////////////////////
    151     else if( type == KMEM_KCM )
     116        else                                     // use KCM
    152117    {
    153         kcm_free( req->ptr );
    154         }
    155     ///////////////////////////
    156     else if( type == KMEM_KHM )
    157     {
    158         khm_free( req->ptr );
    159     }
    160     else
    161     {
    162         printk("\n[ERROR] in %s : illegal allocator type\n", __FUNCTION__);
    163     }
     118        kcm_free( ptr , order );
     119        }
    164120}  // end kmem_free()
    165121
    166 ///////////////////////////////////////////
    167 void * kmem_remote_alloc( cxy_t        cxy,
    168                           kmem_req_t * req )
    169 {
    170         uint32_t    type;    // KMEM_PPM / KMEM_KCM / KMEM_KHM
    171         uint32_t    flags;   // AF_ZERO / AF_KERNEL / AF_NONE
    172         uint32_t    order;   // PPM: ln(pages) / KCM: ln(bytes) / KHM: bytes
    173 
    174         type  = req->type;
    175         order = req->order;
    176         flags = req->flags;
    177 
    178         //////////////////////
    179         if( type == KMEM_PPM )
    180         {
    181                 // allocate the number of requested pages from remote cluster
    182                 xptr_t page_xp = ppm_remote_alloc_pages( cxy , order );
     122
     123
     124////////////////////////////////////////
     125void * kmem_remote_alloc( cxy_t     cxy,
     126                          uint32_t  order,
     127                          uint32_t  flags )
     128{
     129
     130#if DEBUG_KMEM || DEBUG_KMEM_ERROR
     131thread_t * this = CURRENT_THREAD;
     132uint32_t   cycle = (uint32_t)hal_get_cycles();
     133#endif
     134
     135        if( order >= CONFIG_PPM_PAGE_ORDER )     // use PPM
     136        {
     137                // allocate memory from PPM
     138                xptr_t page_xp = ppm_remote_alloc_pages( cxy , order - CONFIG_PPM_PAGE_ORDER );
    183139
    184140                if( page_xp == XPTR_NULL )
    185141                {
    186                         printk("\n[ERROR] in %s : failed for PPM / order %d in cluster %x\n",
    187                         __FUNCTION__ , order , cxy );
     142
     143#if DEBUG_KMEM_ERROR
     144if( DEBUG_KMEM_ERROR < cycle )
     145printk("\n[ERROR] in %s : thread[%x,%x] failed for PPM / order %d / cluster %x / cycle %d\n",
     146__FUNCTION__ , this->process->pid , this->trdid , order , cxy , cycle );
     147#endif
    188148                        return NULL;
    189149                }
     
    192152        xptr_t base_xp = ppm_page2base( page_xp );
    193153
    194                 // reset page if requested
    195                 if( flags & AF_ZERO ) hal_remote_memset( base_xp , 0 , CONFIG_PPM_PAGE_SIZE );
    196 
    197 
    198 #if DEBUG_KMEM_REMOTE
    199 thread_t * this = CURRENT_THREAD;
    200 uint32_t   cycle = (uint32_t)hal_get_cycles();
    201 if( DEBUG_KMEM_REMOTE < cycle )
    202 printk("\n[%s] thread[%x,%x] from PPM / %d page(s) / ppn %x / cxy %x / cycle %d\n",
    203 __FUNCTION__, this->process->pid, this->trdid,
    204 1<<order, ppm_page2ppn( page_xp ), cxy, cycle );
     154                // reset memory if requested
     155                if( flags & AF_ZERO ) hal_remote_memset( base_xp , 0 , 1<<order );
     156
     157#if DEBUG_KMEM
     158if( (DEBUG_KMEM < cycle) && (DEBUG_KMEM_CXY == local_cxy) && (DEBUG_KMEM_ORDER == order) )
     159printk("\n[%s] thread[%x,%x] from PPM / order %d / ppn %x / cxy %x / cycle %d\n",
     160__FUNCTION__, this->process->pid, this->trdid,
     161order, ppm_page2ppn( page_xp ), cxy, cycle );
    205162#endif
    206163        return GET_PTR( base_xp );
    207164        }
    208     ///////////////////////////
    209         else if( type == KMEM_KCM )
     165        else                                     // use KCM
    210166        {
    211167                // allocate memory from KCM
     
    214170                if( ptr == NULL )
    215171                {
    216                         printk("\n[ERROR] in %s : failed for KCM / order %d in cluster %x\n",
    217                     __FUNCTION__ , order , cxy );
     172
     173#if DEBUG_KMEM_ERROR
     174if( DEBUG_KMEM_ERROR < cycle )
     175printk("\n[ERROR] in %s : thread[%x,%x] failed for KCM / order %d / cluster %x / cycle %d\n",
     176__FUNCTION__ , this->process->pid , this->trdid , order , cxy , cycle );
     177#endif
    218178                        return NULL;
    219179                }
     
    222182                if( flags & AF_ZERO )  hal_remote_memset( XPTR( cxy , ptr ) , 0 , 1<<order );
    223183
    224 #if DEBUG_KMEM_REMOTE
    225 thread_t * this = CURRENT_THREAD;
    226 uint32_t   cycle = (uint32_t)hal_get_cycles();
    227 if( DEBUG_KMEM_REMOTE < cycle )
    228 printk("\n[%s] thread [%x,%x] from KCM / %d bytes / base %x / cxy %x / cycle %d\n",
    229 __FUNCTION__, this->process->pid, this->trdid,
    230 1<<order, ptr, cxy, cycle );
     184#if DEBUG_KMEM
     185if( (DEBUG_KMEM < cycle) && (DEBUG_KMEM_CXY == local_cxy) && (DEBUG_KMEM_ORDER == order) )
     186printk("\n[%s] thread [%x,%x] from KCM / order %d / base %x / cxy %x / cycle %d\n",
     187__FUNCTION__, this->process->pid, this->trdid,
     188order, ptr, cxy, cycle );
    231189#endif
    232190        return ptr;
    233191        }
    234         ///////////////////////////
    235         else if( type == KMEM_KHM )               
    236         {
    237         printk("\n[ERROR] in %s : remote access not supported for KHM\n", __FUNCTION__  );
    238                 return NULL;
    239         }
    240     else
    241     {
    242         printk("\n[ERROR] in %s : illegal allocator type\n", __FUNCTION__);
    243         return NULL;
    244     }
    245192}  // kmem_remote_malloc()
    246193
    247 ////////////////////////////////////////
    248 void kmem_remote_free( cxy_t        cxy,
    249                        kmem_req_t * req )
    250 {
    251     uint32_t type = req->type;
    252 
    253     //////////////////////
    254         if( type == KMEM_PPM )
    255         {
    256         page_t * page = GET_PTR( ppm_base2page( XPTR( cxy , req->ptr ) ) );
     194/////////////////////////////////////
     195void kmem_remote_free( cxy_t     cxy,
     196                       void    * ptr,
     197                       uint32_t  order )
     198{
     199        if( order >= CONFIG_PPM_PAGE_ORDER )     // use PPM
     200        {
     201        page_t * page = GET_PTR( ppm_base2page( XPTR( cxy , ptr ) ) );
    257202
    258203        ppm_remote_free_pages( cxy , page );
    259204    }
    260     ///////////////////////////
    261     else if( type == KMEM_KCM )
     205        else                                     // use KCM
    262206    {
    263         kcm_remote_free( cxy , req->ptr );
    264         }
    265     ///////////////////////////
    266     else if( type == KMEM_KHM )
    267     {
    268         printk("\n[ERROR] in %s : remote access not supported for KHM\n", __FUNCTION__ );
    269     }
    270     else
    271     {
    272         printk("\n[ERROR] in %s : illegal allocator type\n", __FUNCTION__);
    273     }
     207        kcm_remote_free( cxy , ptr , order );
     208        }
    274209}  // end kmem_remote_free()
    275210
  • trunk/kernel/mm/kmem.h

    r656 r683  
    11/*
    2  * kmem.h - kernel unified memory allocator interface
     2 * kmem.h - unified kernel memory allocator definition
    33 *
    4  * Authors  Alain Greiner (2016,2017,2018,2019)
     4 * Authors  Alain Greiner     (2016,2017,2018,2019,2020)
    55 *
    66 * Copyright (c) UPMC Sorbonne Universites
     
    2929
    3030/*************************************************************************************
    31  * This enum defines the three Kernel Memory Allocaror types
    32  ************************************************************************************/
    33 
    34 enum
    35 {
    36     KMEM_PPM              = 0,   /*! PPM allocator                                  */
    37     KMEM_KCM              = 1,   /*! KCM allocator                                  */
    38     KMEM_KHM              = 2,   /*! KHM allocator                                  */
    39 };
    40 
    41 /*************************************************************************************
    4231 * This defines the generic Allocation Flags that can be associated to
    4332 * a Kernel Memory Request.
     
    4534
    4635#define AF_NONE       0x0000   // no attributes
    47 #define AF_KERNEL     0x0001   // for kernel use
    48 #define AF_ZERO       0x0002   // must be reset to 0
    49 
    50 /*************************************************************************************
    51  * This structure defines a Kernel Memory Request.
    52  ************************************************************************************/
    53 
    54 typedef struct kmem_req_s
    55 {
    56     uint32_t      type;   /*! KMEM_PPM / KMEM_KCM / KMEM_KHM                        */
    57     uint32_t      order;  /*! PPM: ln2(pages) / KCM: ln2(bytes) / KHM: bytes        */
    58     uint32_t      flags;  /*! request attributes                                    */
    59     void        * ptr;    /*! local pointer on allocated buffer (only used by free) */
    60 }
    61 kmem_req_t;
     36#define AF_KERNEL     0x0001   // for kernel use ???
     37#define AF_ZERO       0x0002   // data buffer must be reset to 0
    6238
    6339/*************************************************************************************
    6440 * These two functions allocate physical memory in a local or remote cluster
    65  * as specified by the kmem_req_t request descriptor, and return a local pointer
    66  * on the allocated buffer. It uses three specialised physical memory allocators:
    67  * - PPM (Physical Pages Manager) allocates N contiguous small physical pages.
    68  *       N is a power of 2, and req.order = ln(N). Implement the buddy algorithm.
    69  * - KCM (Kernel Cache Manager) allocates aligned blocks of M bytes from a cache.
    70  *       M is a power of 2, and req.order = ln( M ). One cache per block size.
    71  * - KHM (Kernel Heap Manager) allocates physical memory buffers of M bytes,
    72  *       M can have any value, and req.order = M.
    73  *
    74  * WARNING: the physical memory allocated with a given allocator type must be
    75  *          released using the same allocator type.
     41 * as specified by the <cxy>, <order> and <flags> arguments, and return a local
     42 * pointer on the allocated buffer. The buffer size (in bytes) is a power of 2,
     43 * equal to (1 << order) bytes. It can be initialized to zero if requested.
     44 * Depending on the <order> value, it uses two specialised allocators:
     45 * - When order is larger or equal to CONFIG_PPM_PAGE_ORDER, the PPM (Physical Pages
     46 *   Manager) allocates 2**(order - PPM_PAGE_ORDER) contiguous small physical pages.
     47 *   This allocator implements the buddy algorithm.
     48 * - When order is smaller than CONFIG_PPM_PAGE_ORDER, the KCM (Kernel Cache Manager)
     49 *   allocates an aligned block of 2**order bytes from specialised KCM[ORDER] caches
     50 *  (one KCM cache per block size). 
    7651 *************************************************************************************
    77  * @ cxy   : target cluster identifier for a remote access.
    78  * @ req   : local pointer on allocation request.
     52 * @ cxy    : [in] target cluster identifier for a remote access).
     53 * @ order  : [in] ln( block size in bytes).
     54 * @ flags  : [in] allocation flags defined above.
    7955 * @ return local pointer on allocated buffer if success / return NULL if no memory.
    8056 ************************************************************************************/
    81 void * kmem_alloc( kmem_req_t * req );
     57void * kmem_alloc( uint32_t  order,
     58                   uint32_t  flags );
    8259
    83 void * kmem_remote_alloc( cxy_t        cxy,
    84                           kmem_req_t * req );
     60void * kmem_remote_alloc( cxy_t     cxy,
     61                          uint32_t  order,
     62                          uint32_t  flags );
    8563
    8664/*************************************************************************************
    87  * These two functions release previously allocated physical memory, as specified
    88  * by the <type> and <ptr> fields of the kmem_req_t request descriptor.
     65 * These two functions release a previously allocated physical memory block,
     66 * as specified by the <cxy>, <order> and <ptr> arguments.
     67 * - When order is larger or equal to CONFIG_PPM_PAGE_ORDER, the PPM (Physical Pages
     68 *   Manager) releases 2**(order - PPM_PAGE_ORDER) contiguous small physical pages.
     69 *   This allocator implements the buddy algorithm.
     70 * - When order is smaller than CONFIG_PPM_PAGE_ORDER, the KCM (Kernel Cache Manager)
     71 *   release release the block of 2**order bytes to the specialised KCM[order] cache.
    8972 *************************************************************************************
    90  * @ cxy   : target cluster identifier for a remote access.
    91  * @ req : local pointer to request descriptor.
     73 * @ cxy    : [in] target cluster identifier for a remote access.
     74 * @ ptr    : [in] local pointer to released block.
     75 * @ order  : [in] ln( block size in bytes ).
    9276 ************************************************************************************/
    93 void  kmem_free ( kmem_req_t * req );
     77void  kmem_free( void    * ptr,
     78                 uint32_t  order );
    9479
    95 void  kmem_remote_free( cxy_t        cxy,
    96                         kmem_req_t * req );
     80void  kmem_remote_free( cxy_t     cxy,
     81                        void    * ptr,
     82                        uint32_t  order );
    9783
    9884
  • trunk/kernel/mm/mapper.c

    r672 r683  
    33 *
    44 * Authors   Mohamed Lamine Karaoui (2015)
    5  *           Alain Greiner (2016,2017,2018,2019,2020)
     5 *           Alain Greiner          (2016,2017,2018,2019,2020)
    66 *
    77 * Copyright (c)  UPMC Sorbonne Universites
     
    5151{
    5252    mapper_t * mapper_ptr;
    53     kmem_req_t req;
    5453    error_t    error;
    5554
    5655    // allocate memory for mapper descriptor
    57     req.type    = KMEM_KCM;
    58     req.order   = bits_log2( sizeof(mapper_t) );
    59     req.flags   = AF_KERNEL | AF_ZERO;
    60     mapper_ptr  = kmem_remote_alloc( cxy , &req );
     56    mapper_ptr  = kmem_remote_alloc( cxy , bits_log2(sizeof(mapper_t)) , AF_ZERO );
    6157
    6258    if( mapper_ptr == NULL )
    6359    {
    64         printk("\n[ERROR] in %s : no memory for mapper descriptor\n", __FUNCTION__ );
     60
     61#if DEBUG_MAPPER_ERROR
     62printk("\n[ERROR] in %s : no memory for mapper descriptor\n", __FUNCTION__ );
     63#endif
    6564        return XPTR_NULL;
    6665    }
     
    7776    if( error )
    7877    {
    79         printk("\n[ERROR] in %s : cannot initialize radix tree\n", __FUNCTION__ );
    80         req.type  = KMEM_KCM;
    81         req.ptr   = mapper_ptr;
    82         kmem_remote_free( cxy , &req );
     78
     79#if DEBUG_MAPPER_ERROR
     80printk("\n[ERROR] in %s : cannot initialize radix tree\n", __FUNCTION__ );
     81kmem_remote_free( cxy , mapper_ptr , bits_log2(sizeof(mapper_t)) );
     82#endif
    8383        return XPTR_NULL;
    8484    }
     
    104104    uint32_t   found_index = 0;
    105105    uint32_t   start_index = 0;
    106     kmem_req_t req;
    107106
    108107    cxy_t      mapper_cxy = GET_CXY( mapper_xp );
     
    137136
    138137    // release memory for mapper descriptor
    139     req.type = KMEM_KCM;
    140     req.ptr  = mapper_ptr;
    141     kmem_remote_free( mapper_cxy , &req );
     138    kmem_remote_free( mapper_cxy , mapper_ptr , bits_log2(sizeof(mapper_t)) );
    142139
    143140}  // end mapper_destroy()
     
    153150    uint32_t   inode_type = 0;
    154151
    155     thread_t * this = CURRENT_THREAD;
     152#if DEBUG_MAPPER_HANDLE_MISS || DEBUG_MAPPER_ERROR
     153thread_t * this  = CURRENT_THREAD;
     154uint32_t   cycle = (uint32_t)hal_get_cycles();
     155#endif
    156156
    157157    // get target mapper cluster and local pointer
     
    170170
    171171#if DEBUG_MAPPER_HANDLE_MISS
    172 uint32_t      cycle = (uint32_t)hal_get_cycles();
    173172char          name[CONFIG_VFS_MAX_NAME_LENGTH];
    174173if( (DEBUG_MAPPER_HANDLE_MISS < cycle) && (inode != NULL) )
     
    185184#endif
    186185
    187 #if( DEBUG_MAPPER_HANDLE_MISS & 2 )
     186#if( DEBUG_MAPPER_HANDLE_MISS & 1 )
    188187if( DEBUG_MAPPER_HANDLE_MISS < cycle )
    189188{
     
    193192#endif
    194193
    195     // allocate one 4 Kbytes page from the remote mapper cluster
    196     xptr_t page_xp = ppm_remote_alloc_pages( mapper_cxy , 0 );
     194    // allocate one 4 Kbytes page in the remote mapper cluster
     195    void * base_ptr = kmem_remote_alloc( mapper_cxy , 12 , AF_NONE );
     196
     197    if( base_ptr == NULL )
     198    {
     199
     200#if DEBUG_MAPPER_ERROR
     201printk("\n[ERROR] in %s : thread [%x,%x] cannot allocate page in cluster %x / cycle %d\n",
     202__FUNCTION__ , this->process->pid, this->trdid , mapper_cxy , cycle );
     203#endif
     204        return -1;
     205    }
     206
     207    // get pointers on allocated page descrptor
     208    xptr_t   page_xp  = ppm_base2page( XPTR( mapper_cxy , base_ptr ) );
    197209    page_t * page_ptr = GET_PTR( page_xp );
    198                            
    199     if( page_xp == XPTR_NULL )
    200     {
    201         printk("\n[ERROR] in %s : thread [%x,%x] cannot allocate page in cluster %x\n",
    202         __FUNCTION__ , this->process->pid, this->trdid , mapper_cxy );
    203         return -1;
    204     }
    205210
    206211    // initialize the page descriptor
     
    217222                                 page_id,
    218223                                 page_ptr );
    219 
    220224    if( error )
    221225    {
    222         printk("\n[ERROR] in %s : thread[%x,%x] cannot insert page in mapper\n",
    223         __FUNCTION__ , this->process->pid, this->trdid );
    224         ppm_remote_free_pages( mapper_cxy , page_ptr );
     226
     227#if DEBUG_MAPPER_ERROR
     228printk("\n[ERROR] in %s : thread[%x,%x] cannot insert page in mapper / cycle %d\n",
     229__FUNCTION__ , this->process->pid, this->trdid , cycle );
     230ppm_remote_free_pages( mapper_cxy , page_ptr );
     231#endif
    225232        return -1;
    226233    }
     
    236243        if( error )
    237244        {
    238             printk("\n[ERROR] in %s : thread[%x,%x] cannot load page from device\n",
    239             __FUNCTION__ , this->process->pid, this->trdid );
    240             mapper_remote_release_page( mapper_xp , page_ptr );
     245
     246#if DEBUG_MAPPER_ERROR
     247printk("\n[ERROR] in %s : thread[%x,%x] cannot load page from device / cycle %d\n",
     248__FUNCTION__ , this->process->pid, this->trdid , cycle );
     249mapper_remote_release_page( mapper_xp , page_ptr );
     250#endif
    241251            return -1;
    242252         }
     
    260270#endif
    261271
    262 #if( DEBUG_MAPPER_HANDLE_MISS & 2 )
     272#if( DEBUG_MAPPER_HANDLE_MISS & 1 )
    263273if( DEBUG_MAPPER_HANDLE_MISS < cycle )
    264274{
     
    299309#endif
    300310
    301 #if( DEBUG_MAPPER_GET_PAGE & 2 )
     311#if( DEBUG_MAPPER_GET_PAGE & 1 )
    302312if( DEBUG_MAPPER_GET_PAGE < cycle )
    303313ppm_remote_display( local_cxy );
     
    336346            if( error )
    337347            {
    338                 printk("\n[ERROR] in %s : thread[%x,%x] cannot handle mapper miss\n",
    339                 __FUNCTION__ , this->process->pid, this->trdid );
    340                 remote_rwlock_wr_release( lock_xp );
     348
     349#if DEBUG_MAPPER_ERROR
     350printk("\n[ERROR] in %s : thread[%x,%x] cannot handle mapper miss\n",
     351__FUNCTION__ , this->process->pid, this->trdid );
     352remote_rwlock_wr_release( lock_xp );
     353#endif
    341354                return XPTR_NULL;
    342355            }
     
    364377#endif
    365378
    366 #if( DEBUG_MAPPER_GET_PAGE & 2)
     379#if( DEBUG_MAPPER_GET_PAGE & 1)
    367380if( DEBUG_MAPPER_GET_PAGE < cycle )
    368381ppm_remote_display( local_cxy );
     
    432445            if( error )
    433446            {
    434                 printk("\n[ERROR] in %s : thread[%x,%x] cannot handle mapper miss\n",
    435                 __FUNCTION__ , this->process->pid, this->trdid );
    436                 remote_rwlock_wr_release( lock_xp );
     447
     448#if DEBUG_MAPPER_ERROR
     449printk("\n[ERROR] in %s : thread[%x,%x] cannot handle mapper miss\n",
     450__FUNCTION__ , this->process->pid, this->trdid );
     451remote_rwlock_wr_release( lock_xp );
     452#endif
    437453                return XPTR_NULL;
    438454            }
     
    460476#endif
    461477
    462 #if( DEBUG_MAPPER_GET_FAT_PAGE & 2)
     478#if( DEBUG_MAPPER_GET_FAT_PAGE & 1)
    463479if( DEBUG_MAPPER_GET_FAT_PAGE < cycle )
    464480ppm_remote_display( local_cxy );
     
    532548
    533549    // compute indexes of pages for first and last byte in mapper
    534     uint32_t first = min_byte >> CONFIG_PPM_PAGE_SHIFT;
    535     uint32_t last  = max_byte >> CONFIG_PPM_PAGE_SHIFT;
     550    uint32_t first = min_byte >> CONFIG_PPM_PAGE_ORDER;
     551    uint32_t last  = max_byte >> CONFIG_PPM_PAGE_ORDER;
    536552
    537553#if (DEBUG_MAPPER_MOVE_USER & 1)
     
    668684
    669685    // compute indexes for first and last pages in mapper
    670     uint32_t first = min_byte >> CONFIG_PPM_PAGE_SHIFT;
    671     uint32_t last  = max_byte >> CONFIG_PPM_PAGE_SHIFT;
     686    uint32_t first = min_byte >> CONFIG_PPM_PAGE_ORDER;
     687    uint32_t last  = max_byte >> CONFIG_PPM_PAGE_ORDER;
    672688
    673689    // compute source and destination clusters
     
    853869            if( error )
    854870            {
    855                 printk("\n[ERROR] in %s : cannot synchonize dirty page %d\n",
    856                 __FUNCTION__, page_ptr->index );
     871
     872#if DEBUG_MAPPER_SYNC
     873printk("\n[ERROR] in %s : cannot synchonize dirty page %d\n",
     874__FUNCTION__, page_ptr->index );
     875#endif
    857876                return -1;
    858877            }
  • trunk/kernel/mm/mapper.h

    r657 r683  
    3939/*******************************************************************************************
    4040 * This mapper_t object implements the kernel cache for a given VFS file or directory.
    41  * There is one mapper per file/dir. It is implemented as a three levels radix tree,
    42  * entirely stored in the same cluster as the inode representing the file/dir.
     41 * There is one mapper per file/dir.
     42 * - It is implemented as a three levels radix tree, entirely stored in the same cluster
     43 *   as the inode representing the file/directory.
    4344 * - The fast retrieval key is the page index in the file.
    4445 *   The ix1_width, ix2_width, ix3_width sub-indexes are configuration parameters.
    4546 * - The leaves are pointers on physical page descriptors, dynamically allocated
    46  *   in the local cluster.
     47 *   in the same cluster as the radix tree.
    4748 * - The mapper is protected by a "remote_rwlock", to support several simultaneous
    4849 *   "readers", and only one "writer".
     
    6061 *   buffer, that can be physically located in any cluster.
    6162 * - In the present implementation the cache size for a given file increases on demand,
    62  *   and the  allocated memory is only released when the mapper/inode is destroyed.
     63 *   and the  allocated memory is only released when the inode is destroyed.
     64 *
     65 * WARNING : This mapper implementation makes the assumption that the PPM page size
     66 *           is 4 Kbytes. This code should be modified to support a generic page size,
     67 *           defined by the CONFIG_PPM_PAGE_SIZE parameter.
    6368 ******************************************************************************************/
    6469
  • trunk/kernel/mm/page.h

    r656 r683  
    33 *
    44 * Authors Ghassan Almalles (2008,2009,2010,2011,2012)
    5  *         Alain Greiner    (2016,2017,2018,2019)
     5 *         Alain Greiner    (2016,2017,2018,2019,2020)
    66 *
    77 * Copyright (c) UPMC Sorbonne Universites
  • trunk/kernel/mm/ppm.c

    r672 r683  
    6060
    6161   void   * base_ptr = ppm->vaddr_base +
    62                        ((page_ptr - ppm->pages_tbl)<<CONFIG_PPM_PAGE_SHIFT);
     62                       ((page_ptr - ppm->pages_tbl)<<CONFIG_PPM_PAGE_ORDER);
    6363
    6464        return XPTR( page_cxy , base_ptr );
     
    7575
    7676        page_t * page_ptr = ppm->pages_tbl +
    77                         ((base_ptr - ppm->vaddr_base)>>CONFIG_PPM_PAGE_SHIFT);
     77                        ((base_ptr - ppm->vaddr_base)>>CONFIG_PPM_PAGE_ORDER);
    7878
    7979        return XPTR( base_cxy , page_ptr );
     
    9191    page_t * page_ptr = GET_PTR( page_xp );
    9292
    93     paddr_t  paddr    = PADDR( page_cxy , (page_ptr - ppm->pages_tbl)<<CONFIG_PPM_PAGE_SHIFT );
    94 
    95     return (ppn_t)(paddr >> CONFIG_PPM_PAGE_SHIFT);
     93    paddr_t  paddr    = PADDR( page_cxy , (page_ptr - ppm->pages_tbl)<<CONFIG_PPM_PAGE_ORDER );
     94
     95    return (ppn_t)(paddr >> CONFIG_PPM_PAGE_ORDER);
    9696
    9797}  // end hal_page2ppn()
     
    102102        ppm_t   * ppm  = &LOCAL_CLUSTER->ppm;
    103103
    104     paddr_t  paddr = ((paddr_t)ppn) << CONFIG_PPM_PAGE_SHIFT;
     104    paddr_t  paddr = ((paddr_t)ppn) << CONFIG_PPM_PAGE_ORDER;
    105105
    106106    cxy_t    cxy   = CXY_FROM_PADDR( paddr );
    107107    lpa_t    lpa   = LPA_FROM_PADDR( paddr );
    108108
    109     return XPTR( cxy , &ppm->pages_tbl[lpa>>CONFIG_PPM_PAGE_SHIFT] );
     109    return XPTR( cxy , &ppm->pages_tbl[lpa>>CONFIG_PPM_PAGE_ORDER] );
    110110
    111111}  // end hal_ppn2page
     
    118118        ppm_t  * ppm   = &LOCAL_CLUSTER->ppm;
    119119   
    120     paddr_t  paddr = ((paddr_t)ppn) << CONFIG_PPM_PAGE_SHIFT;
     120    paddr_t  paddr = ((paddr_t)ppn) << CONFIG_PPM_PAGE_ORDER;
    121121
    122122    cxy_t    cxy   = CXY_FROM_PADDR( paddr );
     
    137137    paddr_t  paddr    = PADDR( base_cxy , (base_ptr - ppm->vaddr_base) );
    138138
    139     return (ppn_t)(paddr >> CONFIG_PPM_PAGE_SHIFT);
     139    return (ppn_t)(paddr >> CONFIG_PPM_PAGE_ORDER);
    140140
    141141}  // end ppm_base2ppn()
     
    159159
    160160assert( __FUNCTION__, !page_is_flag( page , PG_FREE ) ,
    161 "page already released : ppn = %x\n" , ppm_page2ppn( XPTR( local_cxy , page ) ) );
     161"page already released : ppn = %x" , ppm_page2ppn( XPTR( local_cxy , page ) ) );
    162162
    163163assert( __FUNCTION__, !page_is_flag( page , PG_RESERVED ) ,
    164 "reserved page : ppn = %x\n" , ppm_page2ppn( XPTR( local_cxy , page ) ) );
     164"reserved page : ppn = %x" , ppm_page2ppn( XPTR( local_cxy , page ) ) );
    165165
    166166        // set FREE flag in released page descriptor
     
    214214        page_t   * found_block; 
    215215
    216     thread_t * this = CURRENT_THREAD;
    217 
    218216        ppm_t    * ppm = &LOCAL_CLUSTER->ppm;
    219217
    220 #if DEBUG_PPM_ALLOC_PAGES
    221 uint32_t cycle = (uint32_t)hal_get_cycles();
     218#if DEBUG_PPM_ALLOC_PAGES || DEBUG_PPM_ERROR
     219thread_t * this  = CURRENT_THREAD;
     220uint32_t   cycle = (uint32_t)hal_get_cycles();
    222221#endif
    223222
     
    232231
    233232// check order
    234 assert( __FUNCTION__, (order < CONFIG_PPM_MAX_ORDER) , "illegal order argument = %d\n" , order );
     233assert( __FUNCTION__, (order < CONFIG_PPM_MAX_ORDER) ,
     234"illegal order argument = %d" , order );
    235235
    236236    //build extended pointer on lock protecting remote PPM
     
    273273        if( current_block == NULL ) // return failure if no free block found
    274274        {
    275                 // release lock protecting free lists
     275
     276#if DEBUG_PPM_ERROR
     277printk("\n[ERROR] in %s thread[%x,%x] cannot allocate %d page(s) in cluster %x / cycle %d\n",
     278__FUNCTION__, this->process->pid, this->trdid, 1<<order, local_cxy, cycle );
     279#endif
     280        // release lock protecting free lists
    276281                remote_busylock_release( lock_xp );
    277 
    278         printk("\n[%s] thread[%x,%x] cannot allocate %d page(s) in cluster %x\n",
    279         __FUNCTION__, this->process->pid, this->trdid, 1<<order, local_cxy );
    280 
    281282                return NULL;
    282283        }
     
    385386    page_t   * found_block;
    386387
    387     thread_t * this  = CURRENT_THREAD;
    388 
    389388// check order
    390 assert( __FUNCTION__, (order < CONFIG_PPM_MAX_ORDER) , "illegal order argument = %d\n" , order );
     389assert( __FUNCTION__, (order < CONFIG_PPM_MAX_ORDER) ,
     390"illegal order argument = %d" , order );
    391391
    392392    // get local pointer on PPM (same in all clusters)
    393393        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
    394394
    395 #if DEBUG_PPM_REMOTE_ALLOC_PAGES
     395#if DEBUG_PPM_ALLOC_PAGES || DEBUG_PPM_ERROR
     396thread_t * this  = CURRENT_THREAD;
    396397uint32_t   cycle = (uint32_t)hal_get_cycles();
    397398#endif
    398399
    399 #if DEBUG_PPM_REMOTE_ALLOC_PAGES
    400 if( DEBUG_PPM_REMOTE_ALLOC_PAGES < cycle )
     400#if DEBUG_PPM_ALLOC_PAGES
     401if( DEBUG_PPM_ALLOC_PAGES < cycle )
    401402{
    402403    printk("\n[%s] thread[%x,%x] enter for %d page(s) in cluster %x / cycle %d\n",
    403404    __FUNCTION__, this->process->pid, this->trdid, 1<<order, cxy, cycle );
    404     if( DEBUG_PPM_REMOTE_ALLOC_PAGES & 1 ) ppm_remote_display( cxy );
     405    if( DEBUG_PPM_ALLOC_PAGES & 1 ) ppm_remote_display( cxy );
    405406}
    406407#endif
     
    445446        if( current_block == NULL ) // return failure
    446447        {
     448
     449#if DEBUG_PPM_ERROR
     450 printk("\n[ERROR] in %s : thread[%x,%x] cannot allocate %d page(s) in cluster %x / cycle %d\n",
     451__FUNCTION__, this->process->pid, this->trdid, 1<<order, cxy, cycle );
     452#endif
    447453                // release lock protecting free lists
    448454                remote_busylock_release( lock_xp );
    449 
    450         printk("\n[ERROR] in %s : thread[%x,%x] cannot allocate %d page(s) in cluster %x\n",
    451         __FUNCTION__, this->process->pid, this->trdid, 1<<order, cxy );
    452 
    453455                return XPTR_NULL;
    454456        }
     
    489491    hal_fence();
    490492
    491 #if DEBUG_PPM_REMOTE_ALLOC_PAGES
    492 if( DEBUG_PPM_REMOTE_ALLOC_PAGES < cycle )
     493#if DEBUG_PPM_ALLOC_PAGES
     494if( DEBUG_PPM_ALLOC_PAGES < cycle )
    493495{
    494496    printk("\n[%s] thread[%x,%x] allocated %d page(s) in cluster %x / ppn %x / cycle %d\n",
    495497    __FUNCTION__, this->process->pid, this->trdid,
    496498    1<<order, cxy, ppm_page2ppn(XPTR( cxy , found_block )), cycle );
    497     if( DEBUG_PPM_REMOTE_ALLOC_PAGES & 1 ) ppm_remote_display( cxy );
     499    if( DEBUG_PPM_ALLOC_PAGES & 1 ) ppm_remote_display( cxy );
    498500}
    499501#endif
     
    521523    uint32_t   order = hal_remote_l32( XPTR( page_cxy , &page_ptr->order ) );
    522524
    523 #if DEBUG_PPM_REMOTE_FREE_PAGES
     525#if DEBUG_PPM_FREE_PAGES
    524526thread_t * this  = CURRENT_THREAD;
    525527uint32_t   cycle = (uint32_t)hal_get_cycles();
     
    527529#endif
    528530
    529 #if DEBUG_PPM_REMOTE_FREE_PAGES
    530 if( DEBUG_PPM_REMOTE_FREE_PAGES < cycle )
     531#if DEBUG_PPM_FREE_PAGES
     532if( DEBUG_PPM_FREE_PAGES < cycle )
    531533{
    532534    printk("\n[%s] thread[%x,%x] enter for %d page(s) in cluster %x / ppn %x / cycle %d\n",
    533535    __FUNCTION__, this->process->pid, this->trdid, 1<<order, page_cxy, ppn, cycle );
    534     if( DEBUG_PPM_REMOTE_FREE_PAGES & 1 ) ppm_remote_display( page_cxy );
     536    if( DEBUG_PPM_FREE_PAGES & 1 ) ppm_remote_display( page_cxy );
    535537}
    536538#endif
     
    549551
    550552assert( __FUNCTION__, !page_remote_is_flag( page_xp , PG_FREE ) ,
    551 "page already released : ppn = %x\n" , ppm_page2ppn(XPTR( page_cxy , page_ptr ) ) );
     553"page already released : ppn = %x" , ppm_page2ppn(XPTR( page_cxy , page_ptr ) ) );
    552554
    553555assert( __FUNCTION__, !page_remote_is_flag( page_xp , PG_RESERVED ) ,
    554 "reserved page : ppn = %x\n" , ppm_page2ppn(XPTR( page_cxy , page_ptr ) ) );
     556"reserved page : ppn = %x" , ppm_page2ppn(XPTR( page_cxy , page_ptr ) ) );
    555557
    556558        // set the FREE flag in released page descriptor
     
    607609    hal_fence();
    608610
    609 #if DEBUG_PPM_REMOTE_FREE_PAGES
    610 if( DEBUG_PPM_REMOTE_FREE_PAGES < cycle )
     611#if DEBUG_PPM_FREE_PAGES
     612if( DEBUG_PPM_FREE_PAGES < cycle )
    611613{
    612614    printk("\n[%s] thread[%x,%x] released %d page(s) in cluster %x / ppn %x / cycle %d\n",
    613615    __FUNCTION__, this->process->pid, this->trdid, 1<<order, page_cxy, ppn, cycle );
    614     if( DEBUG_PPM_REMOTE_FREE_PAGES & 1 ) ppm_remote_display( page_cxy );
     616    if( DEBUG_PPM_FREE_PAGES & 1 ) ppm_remote_display( page_cxy );
    615617}
    616618#endif
  • trunk/kernel/mm/ppm.h

    r656 r683  
    33 *
    44 * Authors  Ghassan Almaless (2008,2009,2010,2011,2012)
    5  *          Alain Greiner    (2016,2017,2018,2019)
     5 *          Alain Greiner    (2016,2017,2018,2019,2020)
    66 *
    77 * Copyright (c) UPMC Sorbonne Universites
     
    5757 * the "buddy" algorithm.
    5858 * The local threads can access these free_lists by calling the ppm_alloc_pages() and
    59  * ppm_free_page() functions, but the remote threads can access the same free lists,
     59 * ppm_free_page() functions, and the remote threads can access the same free lists,
    6060 * by calling the ppm_remote_alloc_pages() and ppm_remote_free_pages functions.
    6161 * Therefore, these free lists are protected by a remote_busy_lock.
     
    9898 * physical pages. It takes the lock protecting the free_lists before register the
    9999 * released page in the relevant free_list.
    100  * In normal use, you do not need to call it directly, as the recommended way to free
     100 * In normal use, it should not be called directly, as the recommended way to free
    101101 * physical pages is to call the generic allocator defined in kmem.h.
    102102 *****************************************************************************************
  • trunk/kernel/mm/vmm.c

    r672 r683  
    11/*
    2  * vmm.c - virtual memory manager related operations definition.
     2 * vmm.c - virtual memory manager related operations implementation.
    33 *
    44 * Authors   Ghassan Almaless (2008,2009,2010,2011,2012)
     
    8989
    9090// check ltid argument
    91 assert( __FUNCTION__, (ltid <= ((CONFIG_VMM_VSPACE_SIZE - CONFIG_VMM_STACK_BASE) / CONFIG_VMM_STACK_SIZE)),
     91assert( __FUNCTION__,
     92(ltid <= ((CONFIG_VMM_VSPACE_SIZE - CONFIG_VMM_STACK_BASE) / CONFIG_VMM_STACK_SIZE)),
    9293"slot index %d too large for an user stack vseg", ltid );
    9394
     
    107108    if( vseg == NULL )
    108109        {
    109         // release lock protecting free lists
     110 
     111#if DEBUG_VMM_ERROR
     112printk("\n[ERROR] %s cannot allocate memory for vseg in cluster %x\n",
     113__FUNCTION__ , local_cxy );
     114#endif
    110115        busylock_release( &mgr->lock );
    111 
    112         printk("\n[ERROR] %s cannot allocate memory for vseg in cluster %x\n",
    113         __FUNCTION__ , local_cxy );
    114 
    115116        return NULL;
    116117    }
     
    346347    if( current_vseg == NULL )  // return failure
    347348    {
    348         // release lock protecting free lists
     349
     350#if DEBUG_VMM_ERROR
     351printk("\n[ERROR] %s cannot allocate ) %d page(s) in cluster %x\n",
     352__FUNCTION__, npages , local_cxy );
     353#endif
    349354        busylock_release( &mgr->lock );
    350 
    351         printk("\n[ERROR] %s cannot allocate ) %d page(s) in cluster %x\n",
    352         __FUNCTION__, npages , local_cxy );
    353 
    354355        return NULL;
    355356    }
     
    368369            if( new_vseg == NULL )
    369370        {
    370                 // release lock protecting free lists
     371
     372#if DEBUG_VMM_ERROR
     373printk("\n[ERROR] %s cannot allocate memory for vseg in cluster %x\n",
     374__FUNCTION__ , local_cxy );
     375#endif
    371376            busylock_release( &mgr->lock );
    372 
    373             printk("\n[ERROR] %s cannot allocate memory for vseg in cluster %x\n",
    374             __FUNCTION__ , local_cxy );
    375 
    376377            return NULL;
    377378            }
     
    517518                    XPTR( local_cxy , &vseg->xlist ) );
    518519
    519 }  // end vmm_attach_vseg_from_vsl()
     520}  // end vmm_attach_vseg_to_vsl()
    520521
    521522////////////////////////////////////////////////////////////////////////////////////////////
     
    537538    xlist_unlink( XPTR( local_cxy , &vseg->xlist ) );
    538539
    539 }  // end vmm_detach_from_vsl()
     540}  // end vmm_detach_vseg_from_vsl()
    540541
    541542////////////////////////////////////////////
     
    12901291            if( child_vseg == NULL )   // release all allocated vsegs
    12911292            {
     1293
     1294#if DEBUG_VMM_ERROR
     1295printk("\n[ERROR] in %s : cannot create vseg for child in cluster %x\n",
     1296__FUNCTION__, local_cxy );
     1297#endif
    12921298                vmm_destroy( child_process );
    1293                 printk("\n[ERROR] in %s : cannot create vseg for child\n", __FUNCTION__ );
    12941299                return -1;
    12951300            }
     
    13381343                    if( error )
    13391344                    {
     1345
     1346#if DEBUG_VMM_ERROR
     1347printk("\n[ERROR] in %s : cannot copy GPT\n",
     1348__FUNCTION__ );
     1349#endif
    13401350                        vmm_destroy( child_process );
    1341                         printk("\n[ERROR] in %s : cannot copy GPT\n", __FUNCTION__ );
    13421351                        return -1;
    13431352                    }
     
    13571366    remote_queuelock_release( parent_lock_xp );
    13581367
    1359 /* deprecated [AG] : this is already done by the vmm_user_init() funcfion
    1360 
    1361     // initialize the child VMM STACK allocator
    1362     vmm_stack_init( child_vmm );
    1363 
    1364     // initialize the child VMM MMAP allocator
    1365     vmm_mmap_init( child_vmm );
    1366 
    1367     // initialize instrumentation counters
    1368         child_vmm->false_pgfault_nr    = 0;
    1369         child_vmm->local_pgfault_nr    = 0;
    1370         child_vmm->global_pgfault_nr   = 0;
    1371         child_vmm->false_pgfault_cost  = 0;
    1372         child_vmm->local_pgfault_cost  = 0;
    1373         child_vmm->global_pgfault_cost = 0;
    1374 */
    13751368    // copy base addresses from parent VMM to child VMM
    13761369    child_vmm->args_vpn_base = (vpn_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->args_vpn_base));
     
    15641557        if( vseg == NULL )
    15651558        {
    1566             printk("\n[ERROR] %s cannot create %s vseg for process %x in cluster %x\n",
    1567             __FUNCTION__ , vseg_type_str( type ) , process->pid , local_cxy );
     1559
     1560#if DEBUG_VMM_ERROR
     1561printk("\n[ERROR] %s cannot create %s vseg for process %x in cluster %x\n",
     1562__FUNCTION__ , vseg_type_str( type ) , process->pid , local_cxy );
     1563#endif
    15681564            return NULL;
    15691565        }
     
    15721568        vseg->type = type;
    15731569        vseg->vmm  = vmm;
    1574         vseg->min  = vseg->vpn_base << CONFIG_PPM_PAGE_SHIFT;
    1575         vseg->max  = vseg->min + (vseg->vpn_size << CONFIG_PPM_PAGE_SHIFT);
     1570        vseg->min  = vseg->vpn_base << CONFIG_PPM_PAGE_ORDER;
     1571        vseg->max  = vseg->min + (vseg->vpn_size << CONFIG_PPM_PAGE_ORDER);
    15761572        vseg->cxy  = cxy;
    15771573
     
    15821578    {
    15831579        // compute page index (in mapper) for first and last byte
    1584         vpn_t    vpn_min    = file_offset >> CONFIG_PPM_PAGE_SHIFT;
    1585         vpn_t    vpn_max    = (file_offset + size - 1) >> CONFIG_PPM_PAGE_SHIFT;
     1580        vpn_t    vpn_min    = file_offset >> CONFIG_PPM_PAGE_ORDER;
     1581        vpn_t    vpn_max    = (file_offset + size - 1) >> CONFIG_PPM_PAGE_ORDER;
    15861582
    15871583        // compute offset in first page and number of pages
     
    15941590        if( vseg == NULL )
    15951591        {
    1596             printk("\n[ERROR] %s cannot create %s vseg for process %x in cluster %x\n",
    1597             __FUNCTION__ , vseg_type_str( type ) , process->pid , local_cxy );
     1592
     1593#if DEBUG_VMM_ERROR
     1594printk("\n[ERROR] %s cannot create %s vseg for process %x in cluster %x\n",
     1595__FUNCTION__ , vseg_type_str( type ) , process->pid , local_cxy );
     1596#endif
    15981597            return NULL;
    15991598        }
     
    16021601        vseg->type        = type;
    16031602        vseg->vmm         = vmm;
    1604         vseg->min         = (vseg->vpn_base << CONFIG_PPM_PAGE_SHIFT) + offset;
     1603        vseg->min         = (vseg->vpn_base << CONFIG_PPM_PAGE_ORDER) + offset;
    16051604        vseg->max         = vseg->min + size;
    16061605        vseg->file_offset = file_offset;
     
    16151614    {
    16161615        // compute number of required pages in virtual space
    1617         vpn_t npages = size >> CONFIG_PPM_PAGE_SHIFT;
     1616        vpn_t npages = size >> CONFIG_PPM_PAGE_ORDER;
    16181617        if( size & CONFIG_PPM_PAGE_MASK) npages++;
    16191618       
     
    16231622        if( vseg == NULL )
    16241623        {
    1625             printk("\n[ERROR] %s cannot create %s vseg for process %x in cluster %x\n",
    1626             __FUNCTION__ , vseg_type_str( type ) , process->pid , local_cxy );
     1624
     1625#if DEBUG_VMM_ERROR
     1626printk("\n[ERROR] %s cannot create %s vseg for process %x in cluster %x\n",
     1627__FUNCTION__ , vseg_type_str( type ) , process->pid , local_cxy );
     1628#endif
    16271629            return NULL;
    16281630        }
     
    16311633        vseg->type = type;
    16321634        vseg->vmm  = vmm;
    1633         vseg->min  = vseg->vpn_base << CONFIG_PPM_PAGE_SHIFT;
    1634         vseg->max  = vseg->min + (vseg->vpn_size << CONFIG_PPM_PAGE_SHIFT);
     1635        vseg->min  = vseg->vpn_base << CONFIG_PPM_PAGE_ORDER;
     1636        vseg->max  = vseg->min + (vseg->vpn_size << CONFIG_PPM_PAGE_ORDER);
    16351637        vseg->cxy  = cxy;
    16361638
     
    16401642    else    // VSEG_TYPE_DATA, VSEG_TYPE_CODE or KERNEL vseg
    16411643    {
    1642         uint32_t vpn_min = base >> CONFIG_PPM_PAGE_SHIFT;
    1643         uint32_t vpn_max = (base + size - 1) >> CONFIG_PPM_PAGE_SHIFT;
     1644        uint32_t vpn_min = base >> CONFIG_PPM_PAGE_ORDER;
     1645        uint32_t vpn_max = (base + size - 1) >> CONFIG_PPM_PAGE_ORDER;
    16441646
    16451647        // allocate vseg descriptor
     
    16481650            if( vseg == NULL )
    16491651            {
    1650             printk("\n[ERROR] %s cannot create %s vseg for process %x in cluster %x\n",
    1651             __FUNCTION__ , vseg_type_str( type ) , process->pid , local_cxy );
     1652
     1653#if DEBUG_VMM_ERROR
     1654printk("\n[ERROR] %s cannot create %s vseg for process %x in cluster %x\n",
     1655__FUNCTION__ , vseg_type_str( type ) , process->pid , local_cxy );
     1656#endif
    16521657            return NULL;
    16531658            }
     1659
    16541660        // initialize vseg
    16551661        vseg->type        = type;
     
    16571663        vseg->min         = base;
    16581664        vseg->max         = base + size;
    1659         vseg->vpn_base    = base >> CONFIG_PPM_PAGE_SHIFT;
     1665        vseg->vpn_base    = base >> CONFIG_PPM_PAGE_ORDER;
    16601666        vseg->vpn_size    = vpn_max - vpn_min + 1;
    16611667        vseg->file_offset = file_offset;
     
    16721678    if( existing_vseg != NULL )
    16731679    {
    1674         printk("\n[ERROR] in %s for process %x : new vseg %s [vpn_base %x / vpn_size %x]\n"
    1675                "        overlap existing vseg %s [vpn_base %x / vpn_size %x]\n",
    1676         __FUNCTION__ , process->pid, vseg_type_str(vseg->type), vseg->vpn_base, vseg->vpn_size,
    1677         vseg_type_str(existing_vseg->type), existing_vseg->vpn_base, existing_vseg->vpn_size );
     1680
     1681#if DEBUG_VMM_ERROR
     1682printk("\n[ERROR] in %s for process %x : new vseg %s [vpn_base %x / vpn_size %x]\n"
     1683       "        overlap existing vseg %s [vpn_base %x / vpn_size %x]\n",
     1684__FUNCTION__ , process->pid, vseg_type_str(vseg->type), vseg->vpn_base, vseg->vpn_size,
     1685vseg_type_str(existing_vseg->type), existing_vseg->vpn_base, existing_vseg->vpn_size );
     1686#endif
    16781687        vseg_free( vseg );
    16791688        return NULL;
     
    18011810    if( do_kmem_release )
    18021811    {
    1803         kmem_req_t req;
    1804         req.type = KMEM_PPM;
    1805         req.ptr  = GET_PTR( ppm_ppn2base( ppn ) );
    1806 
    1807         kmem_remote_free( page_cxy , &req );
     1812        // get physical page order
     1813        uint32_t order = CONFIG_PPM_PAGE_ORDER +
     1814                         hal_remote_l32( XPTR( page_cxy , &page_ptr->order ));
     1815
     1816        // get physical page base
     1817        void * base = GET_PTR( ppm_ppn2base( ppn ) );
     1818
     1819        // release physical page
     1820        kmem_remote_free( page_cxy , base , order );
    18081821
    18091822#if DEBUG_VMM_PPN_RELEASE
     
    18551868#endif
    18561869
    1857     // loop on PTEs in GPT to unmap all mapped PTE
    1858         for( vpn = vpn_min ; vpn < vpn_max ; vpn++ )
     1870    // the loop on PTEs in GPT to unmap all mapped PTEs
     1871    for( vpn = vpn_min ; vpn < vpn_max ; vpn++ )
    18591872    {
    18601873        // get ppn and attr
     
    19421955    intptr_t min          = new_base;
    19431956    intptr_t max          = new_base + new_size;
    1944     vpn_t    new_vpn_min  = min >> CONFIG_PPM_PAGE_SHIFT;
    1945     vpn_t    new_vpn_max  = (max - 1) >> CONFIG_PPM_PAGE_SHIFT;
     1957    vpn_t    new_vpn_min  = min >> CONFIG_PPM_PAGE_ORDER;
     1958    vpn_t    new_vpn_max  = (max - 1) >> CONFIG_PPM_PAGE_ORDER;
    19461959
    19471960    // build extended pointer on GPT
     
    20822095        if( ref_cxy == local_cxy )    // local is ref => return error
    20832096        {
    2084             printk("\n[ERROR] in %s : vaddr %x in process %x out of segment\n",
    2085             __FUNCTION__, vaddr, process->pid );
    2086 
    2087             // release local VSL lock
     2097
     2098#if DEBUG_VMM_ERROR
     2099printk("\n[ERROR] in %s : vaddr %x in process %x out of segment\n",
     2100__FUNCTION__, vaddr, process->pid );
     2101#endif
    20882102            remote_queuelock_release( loc_lock_xp );
    2089 
    20902103            return -1;
    20912104        }
     
    21032116            if( ref_vseg == NULL )  // vseg not found => return error
    21042117            {
    2105                 // release both VSL locks
     2118
     2119#if DEBUG_VMM_ERROR
     2120printk("\n[ERROR] in %s : vaddr %x in process %x out of segment\n",
     2121__FUNCTION__, vaddr, process->pid );
     2122#endif
    21062123                remote_queuelock_release( loc_lock_xp );
    21072124                remote_queuelock_release( ref_lock_xp );
    2108 
    2109                 printk("\n[ERROR] in %s : vaddr %x in process %x out of segment\n",
    2110                 __FUNCTION__, vaddr, process->pid );
    2111 
    21122125                return -1;
    21132126            }
     
    21192132                if( loc_vseg == NULL )   // no memory => return error
    21202133                {
    2121                     printk("\n[ERROR] in %s : vaddr %x in process %x / no memory\n",
    2122                     __FUNCTION__, vaddr, process->pid );
    2123 
    2124                     // release both VSL locks
     2134
     2135#if DEBUG_VMM_ERROR
     2136printk("\n[ERROR] in %s : vaddr %x in process %x / no memory\n",
     2137__FUNCTION__, vaddr, process->pid );
     2138#endif
    21252139                    remote_queuelock_release( ref_lock_xp );
    21262140                    remote_queuelock_release( loc_lock_xp );
    2127 
    21282141                    return -1;
    21292142                }
     
    21582171//////////////////////////////////////////////////////////////////////////////////////
    21592172// This static function compute the target cluster to allocate a physical page
    2160 // for a given <vpn> in a given <vseg>, allocates the page and returns an extended
    2161 // pointer on the allocated page descriptor.
     2173// for a given <vpn> in a given <vseg>, allocates the physical page from a local
     2174// or remote cluster (depending on the vseg type), and returns an extended pointer
     2175// on the allocated page descriptor.
    21622176// The vseg cannot have the FILE type.
    21632177//////////////////////////////////////////////////////////////////////////////////////
    21642178// @ vseg   : local pointer on vseg.
    21652179// @ vpn    : unmapped vpn.
    2166 // @ return an extended pointer on the allocated page descriptor.
     2180// @ return xptr on page descriptor if success / return XPTR_NULL if failure
    21672181//////////////////////////////////////////////////////////////////////////////////////
    21682182static xptr_t vmm_page_allocate( vseg_t * vseg,
     
    22072221    }
    22082222
    2209     // allocate one small physical page from target cluster
    2210     kmem_req_t req;
    2211     req.type  = KMEM_PPM;
    2212     req.order = 0;
    2213     req.flags = AF_ZERO;
    2214 
    22152223    // get local pointer on page base
    2216     void * ptr = kmem_remote_alloc( page_cxy , &req );
    2217 
     2224    void * ptr = kmem_remote_alloc( page_cxy , CONFIG_PPM_PAGE_ORDER , AF_ZERO );
     2225
     2226    if( ptr == NULL )
     2227    {
     2228
     2229#if DEBUG_VMM_ERROR
     2230printk("\n[ERROR] in %s : cannot allocate memory from cluster %x\n",
     2231__FUNCTION__, page_cxy );
     2232#endif
     2233        return XPTR_NULL;
     2234    }     
    22182235    // get extended pointer on page descriptor
    22192236    page_xp = ppm_base2page( XPTR( page_cxy , ptr ) );
     
    22912308       
    22922309            // compute missing page offset in vseg
    2293             uint32_t offset = page_id << CONFIG_PPM_PAGE_SHIFT;
     2310            uint32_t offset = page_id << CONFIG_PPM_PAGE_ORDER;
    22942311
    22952312            // compute missing page offset in .elf file
     
    24272444    // get local vseg (access to reference VSL can be required)
    24282445    error = vmm_get_vseg( process,
    2429                           (intptr_t)vpn<<CONFIG_PPM_PAGE_SHIFT,
     2446                          (intptr_t)vpn<<CONFIG_PPM_PAGE_ORDER,
    24302447                          &vseg );
    24312448    if( error )
     
    27522769    // get local vseg
    27532770    error = vmm_get_vseg( process,
    2754                           (intptr_t)vpn<<CONFIG_PPM_PAGE_SHIFT,
     2771                          (intptr_t)vpn<<CONFIG_PPM_PAGE_ORDER,
    27552772                          &vseg );
    27562773    if( error )
  • trunk/kernel/mm/vseg.c

    r672 r683  
    6262vseg_t * vseg_alloc( void )
    6363{
    64     kmem_req_t   req;
    65 
    66     req.type  = KMEM_KCM;
    67         req.order = bits_log2( sizeof(vseg_t) );
    68         req.flags = AF_KERNEL | AF_ZERO;
    69 
    70     return kmem_alloc( &req );
     64    return (vseg_t*)kmem_alloc( bits_log2( sizeof(vseg_t)) , AF_ZERO );
    7165}
    7266
     
    7468void vseg_free( vseg_t * vseg )
    7569{
    76     kmem_req_t  req;
    77 
    78         req.type = KMEM_KCM;
    79         req.ptr  = vseg;
    80         kmem_free( &req );
     70        kmem_free( vseg , bits_log2( sizeof(vseg_t)) );
    8171}
    8272
  • trunk/kernel/mm/vseg.h

    r657 r683  
    8282    vpn_t             vpn_base;     /*! first page of vseg                                */
    8383    vpn_t             vpn_size;     /*! number of pages occupied                          */
     84    xptr_t            mapper_xp;    /*! xptr on remote mapper (for types CODE/DATA/FILE)  */
    8485    uint32_t          flags;        /*! vseg attributes                                   */
    85     xptr_t            mapper_xp;    /*! xptr on remote mapper (for types CODE/DATA/FILE)  */
    8686    intptr_t          file_offset;  /*! vseg offset in file (for types CODE/DATA/FILE)    */
    8787    intptr_t          file_size;    /*! max segment size in mapper (for type CODE/DATA)   */
Note: See TracChangeset for help on using the changeset viewer.