source: trunk/kernel/mm/ppm.c @ 654

Last change on this file since 654 was 651, checked in by alain, 5 years ago

1) Improve the VMM MMAP allocator: implement the "buddy" algorithm
to allocate only aligned blocks.
2) fix a bug in the pthread_join() / pthread_exit() mmechanism.

File size: 25.4 KB
RevLine 
[1]1/*
[636]2 * ppm.c -  Physical Pages Manager implementation
[1]3 *
4 * Authors  Ghassan Almaless (2008,2009,2010,2011,2012)
[632]5 *          Alain Greiner    (2016,2017,2018,2019)
[1]6 *
7 * Copyright (c) UPMC Sorbonne Universites
8 *
9 * This file is part of ALMOS-MKH.
10 *
11 * ALMOS-MKH.is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; version 2.0 of the License.
14 *
15 * ALMOS-MKH.is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with ALMOS-MKH.; if not, write to the Free Software Foundation,
22 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
[14]25#include <kernel_config.h>
[457]26#include <hal_kernel_types.h>
[1]27#include <hal_special.h>
28#include <printk.h>
29#include <list.h>
30#include <bits.h>
31#include <page.h>
[585]32#include <dqdt.h>
[567]33#include <busylock.h>
34#include <queuelock.h>
[1]35#include <thread.h>
36#include <cluster.h>
37#include <kmem.h>
38#include <process.h>
[567]39#include <mapper.h>
[1]40#include <ppm.h>
[606]41#include <vfs.h>
[1]42
[567]43////////////////////////////////////////////////////////////////////////////////////////
[634]44//         global variables
45////////////////////////////////////////////////////////////////////////////////////////
46
47extern chdev_directory_t    chdev_dir;          // allocated in kernel_init.c
48
49////////////////////////////////////////////////////////////////////////////////////////
[567]50//     functions to  translate [ page <-> base <-> ppn ]
51////////////////////////////////////////////////////////////////////////////////////////
52
[50]53/////////////////////////////////////////////
[315]54inline xptr_t ppm_page2base( xptr_t page_xp )
[1]55{
[315]56        ppm_t  * ppm      = &LOCAL_CLUSTER->ppm;
[1]57
[315]58    cxy_t    page_cxy = GET_CXY( page_xp );
[437]59    page_t * page_ptr = GET_PTR( page_xp );
[315]60
[406]61   void   * base_ptr = ppm->vaddr_base + 
62                       ((page_ptr - ppm->pages_tbl)<<CONFIG_PPM_PAGE_SHIFT);
63
[315]64        return XPTR( page_cxy , base_ptr );
65
66} // end ppm_page2base()
67
68/////////////////////////////////////////////
69inline xptr_t ppm_base2page( xptr_t base_xp )
[1]70{
[315]71        ppm_t  * ppm = &LOCAL_CLUSTER->ppm;
[1]72
[315]73    cxy_t    base_cxy = GET_CXY( base_xp );
[437]74    void   * base_ptr = GET_PTR( base_xp );
[315]75
76        page_t * page_ptr = ppm->pages_tbl + 
77                        ((base_ptr - ppm->vaddr_base)>>CONFIG_PPM_PAGE_SHIFT);
78
79        return XPTR( base_cxy , page_ptr );
80
81}  // end ppm_base2page()
82
83
84
[50]85///////////////////////////////////////////
[315]86inline ppn_t ppm_page2ppn( xptr_t page_xp )
87{
88        ppm_t  * ppm      = &LOCAL_CLUSTER->ppm;
89
90    cxy_t    page_cxy = GET_CXY( page_xp );
[437]91    page_t * page_ptr = GET_PTR( page_xp );
[315]92
93    paddr_t  paddr    = PADDR( page_cxy , (page_ptr - ppm->pages_tbl)<<CONFIG_PPM_PAGE_SHIFT );
94
[437]95    return (ppn_t)(paddr >> CONFIG_PPM_PAGE_SHIFT);
[315]96
97}  // end hal_page2ppn()
98
99///////////////////////////////////////
100inline xptr_t ppm_ppn2page( ppn_t ppn )
101{
[437]102        ppm_t   * ppm  = &LOCAL_CLUSTER->ppm;
[315]103
[437]104    paddr_t  paddr = ((paddr_t)ppn) << CONFIG_PPM_PAGE_SHIFT;
[315]105
[437]106    cxy_t    cxy   = CXY_FROM_PADDR( paddr );
107    lpa_t    lpa   = LPA_FROM_PADDR( paddr );
[315]108
[437]109    return XPTR( cxy , &ppm->pages_tbl[lpa>>CONFIG_PPM_PAGE_SHIFT] );
[315]110
111}  // end hal_ppn2page
112
113
114
115///////////////////////////////////////
116inline xptr_t ppm_ppn2base( ppn_t ppn )
117{
[437]118        ppm_t  * ppm   = &LOCAL_CLUSTER->ppm;
[315]119   
[437]120    paddr_t  paddr = ((paddr_t)ppn) << CONFIG_PPM_PAGE_SHIFT;
[315]121
[437]122    cxy_t    cxy   = CXY_FROM_PADDR( paddr );
123    lpa_t    lpa   = LPA_FROM_PADDR( paddr );
[315]124
[437]125        return XPTR( cxy , (void *)ppm->vaddr_base + lpa );
[315]126
127}  // end ppm_ppn2base()
128
129///////////////////////////////////////////
130inline ppn_t ppm_base2ppn( xptr_t base_xp )
131{
132        ppm_t  * ppm      = &LOCAL_CLUSTER->ppm;
133
134    cxy_t    base_cxy = GET_CXY( base_xp );
[437]135    void   * base_ptr = GET_PTR( base_xp );
[315]136
137    paddr_t  paddr    = PADDR( base_cxy , (base_ptr - ppm->vaddr_base) );
138
[437]139    return (ppn_t)(paddr >> CONFIG_PPM_PAGE_SHIFT);
[315]140
141}  // end ppm_base2ppn()
142
143
[567]144////////////////////////////////////////////////////////////////////////////////////////
145//     functions to  allocate / release  physical pages
146////////////////////////////////////////////////////////////////////////////////////////
[315]147
148///////////////////////////////////////////
[50]149void ppm_free_pages_nolock( page_t * page )
[1]150{
[636]151        page_t   * buddy;               // searched buddy page descriptor
152        uint32_t   buddy_index;         // buddy page index in page_tbl[]
153        page_t   * current;             // current (merged) page descriptor
154        uint32_t   current_index;       // current (merged) page index in page_tbl[]
155        uint32_t   current_order;       // current (merged) page order
[7]156
[160]157        ppm_t    * ppm         = &LOCAL_CLUSTER->ppm;
158        page_t   * pages_tbl   = ppm->pages_tbl;
[1]159
[632]160assert( !page_is_flag( page , PG_FREE ) ,
[636]161"page already released : ppn = %x\n" , ppm_page2ppn( XPTR( local_cxy , page ) ) );
[177]162
[632]163assert( !page_is_flag( page , PG_RESERVED ) ,
[636]164"reserved page : ppn = %x\n" , ppm_page2ppn( XPTR( local_cxy , page ) ) );
[407]165
[636]166        // set FREE flag in released page descriptor
[1]167        page_set_flag( page , PG_FREE );
168
[636]169    // initialise loop variables
170    current       = page;
171    current_order = page->order;
172        current_index = page - ppm->pages_tbl;
173
[160]174        // search the buddy page descriptor
[636]175        // - merge with current page if buddy found
176        // - exit to release the current page when buddy not found
177    while( current_order < CONFIG_PPM_MAX_ORDER )
178    {
179        // compute buddy page index and page descriptor
[7]180                buddy_index = current_index ^ (1 << current_order);
181                buddy       = pages_tbl + buddy_index;
[636]182       
183        // exit loop if buddy not found in current free list
184                if( !page_is_flag( buddy , PG_FREE ) || (buddy->order != current_order) ) break;
[18]185
[636]186        // remove buddy page from current free_list
[7]187                list_unlink( &buddy->list );
[1]188                ppm->free_pages_nr[current_order] --;
[18]189
[636]190        // reset order field in buddy page descriptor
[7]191                buddy->order = 0;
[632]192
[636]193                // compute next (merged) page index in page_tbl[]
[7]194                current_index &= buddy_index;
[18]195
[636]196        // compute next (merged) page order
197        current_order++;
198
199        // compute next (merged) page descripror
200        current = pages_tbl + current_index; 
201    }
202
203        // update order field for merged page descriptor
[7]204        current->order = current_order;
[1]205
[636]206        // insert merged page in relevant free list
[7]207        list_add_first( &ppm->free_pages_root[current_order] , &current->list );
[1]208        ppm->free_pages_nr[current_order] ++;
209
[433]210}  // end ppm_free_pages_nolock()
211
[636]212
[1]213////////////////////////////////////////////
214page_t * ppm_alloc_pages( uint32_t   order )
215{
[632]216        page_t   * current_block;
[160]217        uint32_t   current_order;
[1]218        uint32_t   current_size;
[632]219        page_t   * found_block; 
[551]220
[635]221    thread_t * this = CURRENT_THREAD;
222
[438]223#if DEBUG_PPM_ALLOC_PAGES
[433]224uint32_t cycle = (uint32_t)hal_get_cycles();
225#endif
[1]226
[636]227#if (DEBUG_PPM_ALLOC_PAGES & 1)
[438]228if( DEBUG_PPM_ALLOC_PAGES < cycle )
[636]229{
230    printk("\n[%s] thread[%x,%x] enter for %d page(s) in cluster %x / cycle %d\n",
231    __FUNCTION__, this->process->pid, this->trdid, 1<<order, local_cxy, cycle );
232    ppm_remote_display( local_cxy );
233}
[433]234#endif
235
[160]236        ppm_t    * ppm = &LOCAL_CLUSTER->ppm;
[1]237
[611]238// check order
239assert( (order < CONFIG_PPM_MAX_ORDER) , "illegal order argument = %d\n" , order );
[1]240
[632]241    //build extended pointer on lock protecting remote PPM
242    xptr_t lock_xp = XPTR( local_cxy , &ppm->free_lock );
[1]243
[160]244        // take lock protecting free lists
[632]245        remote_busylock_acquire( lock_xp );
[1]246
[632]247        current_block = NULL;
[635]248    current_order = order;
[632]249
[635]250        // search a free block equal or larger than requested size
251        while( current_order < CONFIG_PPM_MAX_ORDER )
[1]252        {
[635]253        // get local pointer on the root of relevant free_list (same in all clusters)
254        list_entry_t * root = &ppm->free_pages_root[current_order];
255
256                if( !list_is_empty( root ) )
[1]257                {
[632]258            // get first free block in this free_list
[635]259                        current_block = LIST_FIRST( root , page_t , list );
[632]260
261            // remove this block from this free_list
262                        list_unlink( &current_block->list );
[635]263                ppm->free_pages_nr[current_order] --;
[632]264
265            // register pointer on found block
266            found_block = current_block;
267
268            // compute found block size
269                current_size = (1 << current_order);
270
271                        break; 
[1]272                }
[635]273
274        // increment loop index
275        current_order++;
[1]276        }
277
[632]278        if( current_block == NULL ) // return failure if no free block found
[1]279        {
[160]280                // release lock protecting free lists
[632]281                remote_busylock_release( lock_xp );
[1]282
[635]283        printk("\n[%s] thread[%x,%x] cannot allocate %d page(s) in cluster %x\n",
284        __FUNCTION__, this->process->pid, this->trdid, 1<<order, local_cxy );
[433]285
[160]286                return NULL;
287        }
[18]288
[1]289
[632]290        // split the found block in smaller sub-blocks if required
[160]291        // and update the free-lists accordingly
[1]292        while( current_order > order )
293        {
[635]294        // update size and order
[1]295                current_order --;
[635]296                current_size >>= 1;
[632]297
[651]298        // update order fields in new free block
[632]299                current_block = found_block + current_size;
300                current_block->order = current_order;
[18]301
[632]302        // insert new free block in relevant free_list
303                list_add_first( &ppm->free_pages_root[current_order] , &current_block->list );
[1]304                ppm->free_pages_nr[current_order] ++;
305        }
[18]306
[632]307        // update found block page descriptor
308        page_clear_flag( found_block , PG_FREE );
309        page_refcount_up( found_block );
310        found_block->order = order;
[1]311
[160]312        // release lock protecting free lists
[632]313        remote_busylock_release( lock_xp );
[18]314
[585]315    // update DQDT
[632]316    dqdt_increment_pages( local_cxy , order );
[585]317
[438]318#if DEBUG_PPM_ALLOC_PAGES
319if( DEBUG_PPM_ALLOC_PAGES < cycle )
[636]320{
321    printk("\n[%s] thread[%x,%x] allocated %d page(s) in cluster %x / ppn %x / cycle %d\n",
322    __FUNCTION__, this->process->pid, this->trdid, 
323    1<<order, local_cxy, ppm_page2ppn(XPTR( local_cxy , found_block )), cycle );
324    ppm_remote_display( local_cxy );
325}
[433]326#endif
[7]327
[632]328        return found_block;
[1]329
[433]330}  // end ppm_alloc_pages()
[1]331
332////////////////////////////////////
333void ppm_free_pages( page_t * page )
334{
335        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
[18]336
[438]337#if DEBUG_PPM_FREE_PAGES
[632]338thread_t * this  = CURRENT_THREAD;
339uint32_t   cycle = (uint32_t)hal_get_cycles();
[433]340#endif
341
[636]342#if ( DEBUG_PPM_FREE_PAGES & 1 )
[438]343if( DEBUG_PPM_FREE_PAGES < cycle )
[636]344{
345    printk("\n[%s] thread[%x,%x] enter for %d page(s) in cluster %x / ppn %x / cycle %d\n",
346    __FUNCTION__, this->process->pid, this->trdid, 
347    1<<page->order, local_cxy, ppm_page2ppn(XPTR(local_cxy , page)), cycle );
348    ppm_remote_display( local_cxy );
[433]349#endif
350
[632]351    //build extended pointer on lock protecting free_lists
352    xptr_t lock_xp = XPTR( local_cxy , &ppm->free_lock );
353
[160]354        // get lock protecting free_pages[] array
[632]355        remote_busylock_acquire( lock_xp );
[1]356
[18]357        ppm_free_pages_nolock( page );
[1]358
[632]359        // release lock protecting free_lists
360        remote_busylock_release( lock_xp );
[433]361
[585]362    // update DQDT
[632]363    dqdt_decrement_pages( local_cxy , page->order );
[585]364
[438]365#if DEBUG_PPM_FREE_PAGES
366if( DEBUG_PPM_FREE_PAGES < cycle )
[636]367{
368    printk("\n[%s] thread[%x,%x] released %d page(s) in cluster %x / ppn %x / cycle %d\n",
369    __FUNCTION__, this->process->pid, this->trdid, 
370    1<<page->order, local_cxy, ppm_page2ppn(XPTR(local_cxy , page)) , cycle );
371    ppm_remote_display( local_cxy );
372}
[433]373#endif
374
[567]375}  // end ppm_free_pages()
[1]376
[636]377
378
379
[632]380/////////////////////////////////////////////
[635]381void * ppm_remote_alloc_pages( cxy_t     cxy,
[632]382                               uint32_t  order )
[1]383{
[632]384        uint32_t   current_order;
385        uint32_t   current_size;
386    page_t   * current_block;   
387    page_t   * found_block;
388
[635]389    thread_t * this  = CURRENT_THREAD;
390
[634]391#if DEBUG_PPM_REMOTE_ALLOC_PAGES
[632]392uint32_t   cycle = (uint32_t)hal_get_cycles();
393#endif
394
[636]395#if ( DEBUG_PPM_REMOTE_ALLOC_PAGES & 1 )
[634]396if( DEBUG_PPM_REMOTE_ALLOC_PAGES < cycle )
[636]397{
398    printk("\n[%s] thread[%x,%x] enter for %d small page(s) in cluster %x / cycle %d\n",
399    __FUNCTION__, this->process->pid, this->trdid, 1<<order, cxy, cycle );
400    ppm_remote_display( cxy );
401}
[632]402#endif
403
404// check order
405assert( (order < CONFIG_PPM_MAX_ORDER) , "illegal order argument = %d\n" , order );
406
407    // get local pointer on PPM (same in all clusters)
408        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
409
410    //build extended pointer on lock protecting remote PPM
411    xptr_t lock_xp = XPTR( cxy , &ppm->free_lock );
412
413        // take lock protecting free lists in remote cluster
414        remote_busylock_acquire( lock_xp );
415
416    current_block = NULL;   
[635]417    current_order = order;
[632]418
[635]419    // search a free block equal or larger than requested size
420    while( current_order < CONFIG_PPM_MAX_ORDER )
421    {
422        // get local pointer on the root of relevant free_list (same in all clusters)
[632]423        list_entry_t * root = &ppm->free_pages_root[current_order];
424
[635]425                if( !list_remote_is_empty( cxy , root ) )  // list non empty => success
[632]426                {
427            // get local pointer on first free page descriptor in remote cluster
428                        current_block = LIST_REMOTE_FIRST( cxy, root , page_t , list );
429
430            // remove first free page from the free-list in remote cluster
431                        list_remote_unlink( cxy , &current_block->list );
[635]432                hal_remote_atomic_add( XPTR( cxy , &ppm->free_pages_nr[current_order] ), -1 );
[632]433
434            // register found block
435            found_block = current_block;
436
437            // compute found block size
438                current_size = (1 << current_order);
439
440                        break;
441                }
[635]442
443        // increment loop index
444        current_order++;
[632]445        }
446
447        if( current_block == NULL ) // return failure
448        {
449                // release lock protecting free lists
450                remote_busylock_release( lock_xp );
451
[635]452        printk("\n[ERROR] in %s : thread[%x,%x] cannot allocate %d page(s) in cluster %x\n",
453        __FUNCTION__, this->process->pid, this->trdid, 1<<order, cxy );
[632]454
455                return XPTR_NULL;
456        }
457
458        // split the found block in smaller sub-blocks if required
459        // and update the free-lists accordingly in remote cluster
460        while( current_order > order )
461        {
[635]462        // update order and size
[632]463                current_order --;
464                current_size >>= 1;
465
466        // update new free block order field in remote cluster
[635]467                current_block = found_block + current_size;
[632]468                hal_remote_s32( XPTR( cxy , &current_block->order ) , current_order );
469
470        // get local pointer on the root of the relevant free_list in remote cluster 
471        list_entry_t * root = &ppm->free_pages_root[current_order];
472
473        // insert new free block in this free_list
474                list_remote_add_first( cxy , root, &current_block->list );
475
476        // update free-list number of items in remote cluster
477        hal_remote_atomic_add( XPTR(cxy , &ppm->free_pages_nr[current_order]), 1 );
478        }
479
[634]480        // update refcount, flags and order fields in found block
[632]481        page_remote_clear_flag( XPTR( cxy , found_block ), PG_FREE );
482        page_remote_refcount_up( XPTR( cxy , found_block ) );
483        hal_remote_s32( XPTR( cxy , &found_block->order ) , order );
484   
485        // release lock protecting free lists in remote cluster
486        remote_busylock_release( lock_xp );
487
488    // update DQDT page counter in remote cluster
489    dqdt_increment_pages( cxy , order );
490
[634]491#if DEBUG_PPM_REMOTE_ALLOC_PAGES
492if( DEBUG_PPM_REMOTE_ALLOC_PAGES < cycle )
[636]493{
494    printk("\n[%s] thread[%x,%x] allocated %d page(s) in cluster %x / ppn %x / cycle %d\n",
495    __FUNCTION__, this->process->pid, this->trdid, 
496    1<<order, cxy, ppm_page2ppn(XPTR( cxy , found_block )), cycle );
497    ppm_remote_display( cxy );
498}
[632]499#endif
500
[635]501        return found_block;
[632]502
503}  // end ppm_remote_alloc_pages()
504
[636]505///////////////////////////////////////////////
506void ppm_remote_free_pages( cxy_t     page_cxy,
507                            page_t  * page_ptr )
[632]508{
509    xptr_t     page_xp;          // extended pointer on released page descriptor
[636]510        page_t   * buddy_ptr;        // searched buddy page descriptor
511    uint32_t   buddy_order;      // searched buddy page order
512        uint32_t   buddy_index;      // buddy page index in page_tbl[]
513        page_t   * current_ptr;      // current (merged) page descriptor
514        uint32_t   current_index;    // current (merged) page index in page_tbl[]
515        uint32_t   current_order;    // current (merged) page order
[632]516
[634]517#if DEBUG_PPM_REMOTE_FREE_PAGES
[632]518thread_t * this  = CURRENT_THREAD;
519uint32_t   cycle = (uint32_t)hal_get_cycles();
520#endif
521
[636]522#if ( DEBUG_PPM_REMOTE_FREE_PAGES & 1 )
[634]523if( DEBUG_PPM_REMOTE_FREE_PAGES < cycle )
[636]524{
525    printk("\n[%s] thread[%x,%x] enter for %d page(s) in cluster %x / ppn %x / cycle %d\n",
526    __FUNCTION__, this->process->pid, this->trdid, 
527    1<<page_ptr->order, page_cxy, ppm_page2ppn(XPTR( page_cxy , page_ptr )), cycle );
528    ppm_remote_display( page_cxy );
529}
[632]530#endif
531
532    // build extended pointer on released page descriptor
[636]533    page_xp = XPTR( page_cxy , page_ptr );
[632]534   
535    // get local pointer on PPM (same in all clusters)
536        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
537
538    // build extended pointer on lock protecting remote PPM
[636]539    xptr_t lock_xp = XPTR( page_cxy , &ppm->free_lock );
[632]540
541    // get local pointer on remote PPM page_tbl[] array
[636]542        page_t * pages_tbl = hal_remote_lpt( XPTR( page_cxy , &ppm->pages_tbl ) );
[632]543
544        // get lock protecting free_pages in remote cluster
545        remote_busylock_acquire( lock_xp );
546
547assert( !page_remote_is_flag( page_xp , PG_FREE ) ,
[636]548"page already released : ppn = %x\n" , ppm_page2ppn(XPTR( page_cxy , page_ptr ) ) );
[632]549
550assert( !page_remote_is_flag( page_xp , PG_RESERVED ) ,
[636]551"reserved page : ppn = %x\n" , ppm_page2ppn(XPTR( page_cxy , page_ptr ) ) );
[632]552
[636]553        // set the FREE flag in released page descriptor
[632]554        page_remote_set_flag( page_xp , PG_FREE );
555
[636]556    // initialise loop variables
557    current_ptr   = page_ptr;
558    current_order = hal_remote_l32( XPTR( page_cxy , &page_ptr->order ) );
559        current_index = page_ptr - ppm->pages_tbl;
560
[632]561        // search the buddy page descriptor
[636]562        // - merge with current page descriptor if buddy found
563        // - exit to release the current page descriptor if buddy not found
564    while( current_order < CONFIG_PPM_MAX_ORDER )
565    {
566        // compute buddy page index and local pointer on page descriptor
[632]567                buddy_index = current_index ^ (1 << current_order);
568                buddy_ptr   = pages_tbl + buddy_index;
[637]569
570        // get buddy order
571        buddy_order = hal_remote_l32( XPTR( page_cxy , &buddy_ptr->order ) );
[636]572       
573        // exit loop if buddy not found
574                if( !page_remote_is_flag( XPTR( page_cxy , buddy_ptr ) , PG_FREE ) || 
[632]575            (buddy_order != current_order) ) break;
576
[636]577        // remove buddy page from its free list in remote cluster
578                list_remote_unlink( page_cxy , &buddy_ptr->list );
579        hal_remote_atomic_add( XPTR( page_cxy , &ppm->free_pages_nr[current_order] ) , -1 );
[632]580
[636]581        // reset order field in buddy page descriptor
582        hal_remote_s32( XPTR( page_cxy , &buddy_ptr->order ) , 0 );
[632]583
[636]584                // compute next (merged) page index in page_tbl[]
[632]585                current_index &= buddy_index;
586
[636]587        // compute next (merged) page order
588        current_order++;
589
590        // compute next (merged) page descripror
591        current_ptr = pages_tbl + current_index; 
592
593    }  // end loop on order
594
595        // update current (merged) page descriptor order field
[632]596        current_ptr = pages_tbl + current_index;
[636]597    hal_remote_s32( XPTR( page_cxy , &current_ptr->order ) , current_order );
[632]598
[636]599        // insert current (merged) page into relevant free list
600        list_remote_add_first( page_cxy , &ppm->free_pages_root[current_order] , &current_ptr->list );
601    hal_remote_atomic_add( XPTR( page_cxy , &ppm->free_pages_nr[current_order] ) , 1 );
[632]602
603        // release lock protecting free_pages[] array
604        remote_busylock_release( lock_xp );
605
606    // update DQDT
[636]607    dqdt_decrement_pages( page_cxy , page_ptr->order );
[632]608
[634]609#if DEBUG_PPM_REMOTE_FREE_PAGES
610if( DEBUG_PPM_REMOTE_FREE_PAGES < cycle )
[636]611{
612    printk("\n[%s] thread[%x,%x] released %d page(s) in cluster %x / ppn %x / cycle %d\n",
613    __FUNCTION__, this->process->pid, this->trdid, 
614    1<<page_ptr->order, page_cxy, ppm_page2ppn(XPTR( page_cxy , page_ptr ) ), cycle );
615    ppm_remote_display( page_cxy );
616}
[632]617#endif
618
619}  // end ppm_remote_free_pages()
620
621////////////////////////////////////
622void ppm_remote_display( cxy_t cxy )
623{
[1]624        uint32_t       order;
625        list_entry_t * iter;
[634]626    xptr_t         page_xp;
[1]627
[433]628    ppm_t * ppm = &LOCAL_CLUSTER->ppm;
629
[636]630    // get remote PPM general parameters
631    uint32_t   pages_nr   = hal_remote_l32( XPTR( cxy , &ppm->pages_nr ) );
632    void     * vaddr_base = hal_remote_lpt( XPTR( cxy , &ppm->vaddr_base ) ); 
633    void     * pages_tbl  = hal_remote_lpt( XPTR( cxy , &ppm->pages_tbl ) ); 
634
[632]635    // build extended pointer on lock protecting remote PPM
[634]636    xptr_t ppm_lock_xp = XPTR( cxy , &ppm->free_lock );
[1]637
[634]638    // get pointers on TXT0 chdev
639    xptr_t    txt0_xp  = chdev_dir.txt_tx[0];
640    cxy_t     txt0_cxy = GET_CXY( txt0_xp );
641    chdev_t * txt0_ptr = GET_PTR( txt0_xp );
[632]642
[634]643    // build extended pointer on remote TXT0 lock
644    xptr_t  txt_lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
[18]645
[634]646        // get PPM lock
647        remote_busylock_acquire( ppm_lock_xp );
648
649    // get TXT0 lock
650    remote_busylock_acquire( txt_lock_xp );
651
[636]652        nolock_printk("\n***** PPM in cluster %x / %d pages / page_tbl %x / vaddr_base %x\n",
653    local_cxy, pages_nr, pages_tbl, vaddr_base );
[634]654
[1]655        for( order = 0 ; order < CONFIG_PPM_MAX_ORDER ; order++ )
656        {
[632]657        // get number of free pages for free_list[order] in remote cluster
658        uint32_t n = hal_remote_l32( XPTR( cxy , &ppm->free_pages_nr[order] ) );
[18]659
[636]660        // display direct free_list[order]
661                nolock_printk("- forward  : order = %d / n = %d\t: ", order , n );
[632]662                LIST_REMOTE_FOREACH( cxy , &ppm->free_pages_root[order] , iter )
[1]663                {
[634]664            page_xp = XPTR( cxy , LIST_ELEMENT( iter , page_t , list ) );
665                        nolock_printk("%x," , ppm_page2ppn( page_xp ) );
[1]666                }
[634]667                nolock_printk("\n");
[1]668        }
669
[634]670        // release TXT0 lock
671        remote_busylock_release( txt_lock_xp );
672
673        // release PPM lock
674        remote_busylock_release( ppm_lock_xp );
[160]675}
[1]676
[632]677////////////////////////////////
678error_t ppm_assert_order( void )
[1]679{
680        uint32_t       order;
681        list_entry_t * iter;
682        page_t       * page;
[18]683
[632]684    ppm_t * ppm = &LOCAL_CLUSTER->ppm;
685
[407]686        for( order=0 ; order < CONFIG_PPM_MAX_ORDER ; order++ )
[1]687        {
688                if( list_is_empty( &ppm->free_pages_root[order] ) ) continue;
[18]689
[1]690                LIST_FOREACH( &ppm->free_pages_root[order] , iter )
691                {
692                        page = LIST_ELEMENT( iter , page_t , list );
[160]693                        if( page->order != order )  return -1;
[1]694                }
695        }
696
[160]697        return 0;
698}
[53]699
[567]700
701//////////////////////////////////////////////////////////////////////////////////////
702//     functions to handle  dirty physical pages
703//////////////////////////////////////////////////////////////////////////////////////
704
[606]705//////////////////////////////////////////
706bool_t ppm_page_do_dirty( xptr_t page_xp )
[567]707{
708        bool_t done = false;
709
[606]710    // get page cluster and local pointer
711    page_t * page_ptr = GET_PTR( page_xp );
712    cxy_t    page_cxy = GET_CXY( page_xp );
713
714    // get local pointer on PPM (same in all clusters)
[567]715        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
716
[606]717    // build extended pointers on page lock, page flags, and PPM dirty list lock
718    xptr_t page_lock_xp  = XPTR( page_cxy , &page_ptr->lock  );   
719    xptr_t page_flags_xp = XPTR( page_cxy , &page_ptr->flags );
720    xptr_t dirty_lock_xp = XPTR( page_cxy , &ppm->dirty_lock );
721           
722        // lock the remote PPM dirty_list
723        remote_queuelock_acquire( dirty_lock_xp );
[567]724
[606]725    // lock the remote page
726    remote_busylock_acquire( page_lock_xp );
727
728    // get remote page flags
729    uint32_t flags = hal_remote_l32( page_flags_xp );
730
731        if( (flags & PG_DIRTY) == 0 )
[567]732        {
733                // set dirty flag in page descriptor
[606]734        hal_remote_s32( page_flags_xp , flags | PG_DIRTY );
[567]735
[632]736                // insert the page in the remote dirty list
737        list_remote_add_first( page_cxy , &ppm->dirty_root , &page_ptr->list );
[606]738
[567]739                done = true;
740        }
741
[606]742    // unlock the remote page
743    remote_busylock_release( page_lock_xp );
[567]744
[606]745        // unlock the remote PPM dirty_list
746        remote_queuelock_release( dirty_lock_xp );
747
[567]748        return done;
749
[606]750} // end ppm_page_do_dirty()
751
752////////////////////////////////////////////
753bool_t ppm_page_undo_dirty( xptr_t page_xp )
[567]754{
755        bool_t done = false;
756
[606]757    // get page cluster and local pointer
758    page_t * page_ptr = GET_PTR( page_xp );
759    cxy_t    page_cxy = GET_CXY( page_xp );
760
761    // get local pointer on PPM (same in all clusters)
[567]762        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
763
[606]764    // build extended pointers on page lock, page flags, and PPM dirty list lock
765    xptr_t page_lock_xp  = XPTR( page_cxy , &page_ptr->lock  );
766    xptr_t page_flags_xp = XPTR( page_cxy , &page_ptr->flags );
767    xptr_t dirty_lock_xp = XPTR( page_cxy , &ppm->dirty_lock );
768           
769        // lock the remote PPM dirty_list
770        remote_queuelock_acquire( XPTR( page_cxy , &ppm->dirty_lock ) );
[567]771
[606]772    // lock the remote page
773    remote_busylock_acquire( page_lock_xp );
774
775    // get remote page flags
776    uint32_t flags = hal_remote_l32( page_flags_xp );
777
778        if( (flags & PG_DIRTY) )  // page is dirty
[567]779        {
[606]780                // reset dirty flag in page descriptor
781        hal_remote_s32( page_flags_xp , flags & (~PG_DIRTY) );
[567]782
[632]783        // remove the page from remote dirty list
784        list_remote_unlink( page_cxy , &page_ptr->list );
[606]785
[567]786                done = true;
787        }
788
[606]789    // unlock the remote page
790    remote_busylock_release( page_lock_xp );
[567]791
[606]792        // unlock the remote PPM dirty_list
793        remote_queuelock_release( dirty_lock_xp );
794
[567]795        return done;
796
[606]797}  // end ppm_page_undo_dirty()
798
799/////////////////////////////////
800void ppm_sync_dirty_pages( void )
[567]801{
[606]802        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
[567]803
[606]804    // get local pointer on PPM dirty_root
805    list_entry_t * dirty_root = &ppm->dirty_root;
806
807    // build extended pointer on PPM dirty_lock
808    xptr_t dirty_lock_xp = XPTR( local_cxy , &ppm->dirty_lock );
809
[567]810        // get the PPM dirty_list lock
[606]811        remote_queuelock_acquire( dirty_lock_xp );
[567]812
813        while( !list_is_empty( &ppm->dirty_root ) )
814        {
[606]815                page_t * page = LIST_FIRST( dirty_root ,  page_t , list );
816        xptr_t   page_xp = XPTR( local_cxy , page );
[567]817
[606]818        // build extended pointer on page lock
819        xptr_t page_lock_xp = XPTR( local_cxy , &page->lock );
820
[567]821                // get the page lock
[606]822                remote_busylock_acquire( page_lock_xp );
[567]823
824                // sync the page
[606]825                vfs_fs_move_page( page_xp , false );  // from mapper to device
[567]826
827                // release the page lock
[606]828                remote_busylock_release( page_lock_xp );
[567]829        }
830
831        // release the PPM dirty_list lock
[606]832        remote_queuelock_release( dirty_lock_xp );
[567]833
[606]834}  // end ppm_sync_dirty_pages()
835
Note: See TracBrowser for help on using the repository browser.