source: trunk/kernel/mm/mapper.c @ 674

Last change on this file since 674 was 672, checked in by alain, 4 years ago

1) Introduce up to 4 command lines arguments in the KSH "load" command.
These arguments are transfered to the user process through the
argc/argv mechanism, using the user space "args" vseg.

2) Introduce the named and anonymous "pipes", for inter-process communication
through the pipe() and mkfifo() syscalls.

3) Introduce the "chat" application to validate the two above mechanisms.

4) Improve printk() and assert() fonctions in printk.c.

File size: 31.4 KB
RevLine 
[1]1/*
[606]2 * mapper.c - Kernel cache for FS files or directories implementation.
[1]3 *
4 * Authors   Mohamed Lamine Karaoui (2015)
[657]5 *           Alain Greiner (2016,2017,2018,2019,2020)
[1]6 *
7 * Copyright (c)  UPMC Sorbonne Universites
8 *
9 * This file is part of ALMOS-MKH.
10 *
11 * ALMOS-MKH is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; version 2.0 of the License.
14 *
15 * ALMOS-MKH is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with ALMOS-MKH; if not, write to the Free Software Foundation,
22 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
[14]25#include <kernel_config.h>
[457]26#include <hal_kernel_types.h>
[1]27#include <hal_special.h>
[23]28#include <hal_uspace.h>
[656]29#include <hal_vmm.h>
[1]30#include <grdxt.h>
[614]31#include <string.h>
[1]32#include <rwlock.h>
33#include <printk.h>
[279]34#include <memcpy.h>
[1]35#include <thread.h>
36#include <core.h>
37#include <process.h>
38#include <kmem.h>
39#include <kcm.h>
[567]40#include <ppm.h>
[1]41#include <page.h>
42#include <cluster.h>
43#include <vfs.h>
44#include <mapper.h>
[614]45#include <dev_ioc.h>
[1]46
[567]47
[657]48/////////////////////////////////////
49xptr_t  mapper_create( cxy_t     cxy,
50                       uint32_t  type )
[1]51{
[657]52    mapper_t * mapper_ptr;
[1]53    kmem_req_t req;
54    error_t    error;
55
[635]56    // allocate memory for mapper descriptor
[657]57    req.type    = KMEM_KCM;
58    req.order   = bits_log2( sizeof(mapper_t) );
59    req.flags   = AF_KERNEL | AF_ZERO;
60    mapper_ptr  = kmem_remote_alloc( cxy , &req );
[1]61
[657]62    if( mapper_ptr == NULL )
[1]63    {
64        printk("\n[ERROR] in %s : no memory for mapper descriptor\n", __FUNCTION__ );
[657]65        return XPTR_NULL;
[1]66    }
67
[657]68    // initialize refcount and type
69    hal_remote_s32( XPTR( cxy , &mapper_ptr->refcount ) , 0 );
70    hal_remote_s32( XPTR( cxy , &mapper_ptr->fs_type )  , type );
[1]71
72    // initialize radix tree
[657]73    error = grdxt_remote_init( XPTR( cxy , &mapper_ptr->rt ),
74                               CONFIG_MAPPER_GRDXT_W1,
75                               CONFIG_MAPPER_GRDXT_W2,
76                               CONFIG_MAPPER_GRDXT_W3 );
[1]77    if( error )
78    {
79        printk("\n[ERROR] in %s : cannot initialize radix tree\n", __FUNCTION__ );
[635]80        req.type  = KMEM_KCM;
[657]81        req.ptr   = mapper_ptr;
82        kmem_remote_free( cxy , &req );
83        return XPTR_NULL;
[1]84    }
85
86    // initialize mapper lock
[657]87    remote_rwlock_init( XPTR( cxy , &mapper_ptr->lock ) , LOCK_MAPPER_STATE );
[1]88
89    // initialize waiting threads xlist (empty)
[657]90    xlist_root_init( XPTR( cxy , &mapper_ptr->wait_root ) );
[1]91
92    // initialize vsegs xlist (empty)
[657]93    xlist_root_init( XPTR( cxy , &mapper_ptr->vsegs_root ) );
[1]94
[657]95    return XPTR( cxy , mapper_ptr );
[1]96
[204]97}  // end mapper_create()
98
[606]99////////////////////////////////////////
[657]100void mapper_destroy( xptr_t  mapper_xp )
[1]101{
[657]102    xptr_t     page_xp;
[1]103    page_t   * page;
104    uint32_t   found_index = 0;
105    uint32_t   start_index = 0;
106    kmem_req_t req;
107
[657]108    cxy_t      mapper_cxy = GET_CXY( mapper_xp ); 
109    mapper_t * mapper_ptr = GET_PTR( mapper_xp );
110
111    // build extended pointer on radix tree
112    xptr_t rt_xp = XPTR( mapper_cxy , &mapper_ptr->rt );
113
[606]114    // scan radix tree
[1]115    do
116    {
117        // get page from radix tree
[657]118        page_xp = grdxt_remote_get_first( rt_xp,
119                                          start_index , 
120                                          &found_index );
121        page = GET_PTR( page_xp );
122       
[606]123        // release registered pages to PPM
[18]124        if( page != NULL )
[1]125        {
126            // remove page from mapper and release to PPM
[657]127            mapper_remote_release_page( mapper_xp , page );
[1]128
129            // update start_key value for next page
130            start_index = found_index;
131        }
132    }
133    while( page != NULL );
134
[606]135    // release the memory allocated to radix tree itself
[657]136    grdxt_remote_destroy( rt_xp );
[1]137
138    // release memory for mapper descriptor
[635]139    req.type = KMEM_KCM;
[657]140    req.ptr  = mapper_ptr;
141    kmem_remote_free( mapper_cxy , &req );
[1]142
[204]143}  // end mapper_destroy()
144
[657]145/////////////////////////////////////////////////
146error_t mapper_handle_miss( xptr_t     mapper_xp,
147                            uint32_t   page_id,
148                            xptr_t   * page_xp_ptr )
[635]149{
150    error_t    error;
151
[657]152    uint32_t   inode_size = 0;   
153    uint32_t   inode_type = 0;
[656]154
[635]155    thread_t * this = CURRENT_THREAD;
156
157    // get target mapper cluster and local pointer
[656]158    cxy_t         mapper_cxy = GET_CXY( mapper_xp );
159    mapper_t    * mapper_ptr = GET_PTR( mapper_xp );
[635]160
[656]161    // get inode pointer
162    vfs_inode_t * inode = hal_remote_lpt( XPTR( mapper_cxy , &mapper_ptr->inode ) );
163
164    // get inode size and type if relevant
165    if( inode != NULL )
166    {
167        inode_size = hal_remote_l32( XPTR( mapper_cxy , &inode->size ) );
168        inode_type = hal_remote_l32( XPTR( mapper_cxy , &inode->type ) );
169    }
170
[635]171#if DEBUG_MAPPER_HANDLE_MISS
172uint32_t      cycle = (uint32_t)hal_get_cycles();
173char          name[CONFIG_VFS_MAX_NAME_LENGTH];
174if( (DEBUG_MAPPER_HANDLE_MISS < cycle) && (inode != NULL) )
175{
[656]176    vfs_inode_get_name( XPTR( mapper_cxy , inode ) , name );
177    printk("\n[%s] thread[%x,%x] enter for page %d in <%s> / cxy %x / cycle %d\n",
[635]178    __FUNCTION__, this->process->pid, this->trdid, page_id, name, mapper_cxy, cycle );
179}
180if( (DEBUG_MAPPER_HANDLE_MISS < cycle) && (inode == NULL) )
181{
[656]182    printk("\n[%s] thread[%x,%x] enter for page %d in FAT / cxy %x / cycle %d\n",
[635]183    __FUNCTION__, this->process->pid, this->trdid, page_id, mapper_cxy, cycle );
184}
185#endif
186
[656]187#if( DEBUG_MAPPER_HANDLE_MISS & 2 )
188if( DEBUG_MAPPER_HANDLE_MISS < cycle ) 
189{
190    if (inode != NULL) grdxt_remote_display( XPTR( mapper_cxy , &mapper_ptr->rt ) , name );
191    else               grdxt_remote_display( XPTR( mapper_cxy , &mapper_ptr->rt ) , "FAT" );
192}
193#endif
194
[635]195    // allocate one 4 Kbytes page from the remote mapper cluster
[656]196    xptr_t page_xp = ppm_remote_alloc_pages( mapper_cxy , 0 );
197    page_t * page_ptr = GET_PTR( page_xp );
[635]198                           
[656]199    if( page_xp == XPTR_NULL )
[635]200    {
201        printk("\n[ERROR] in %s : thread [%x,%x] cannot allocate page in cluster %x\n",
202        __FUNCTION__ , this->process->pid, this->trdid , mapper_cxy );
203        return -1;
204    }
205
206    // initialize the page descriptor
207    page_remote_init( page_xp );
208
[656]209    // initialize specific page descriptor fields
[635]210    hal_remote_s32( XPTR( mapper_cxy , &page_ptr->refcount ) , 1          );
211    hal_remote_s32( XPTR( mapper_cxy , &page_ptr->index )    , page_id    );
212    hal_remote_spt( XPTR( mapper_cxy , &page_ptr->mapper )   , mapper_ptr );
213    hal_remote_s32( XPTR( mapper_cxy , &page_ptr->flags )    , PG_INIT    );
214
215    // insert page in mapper radix tree
216    error = grdxt_remote_insert( XPTR( mapper_cxy , &mapper_ptr->rt),
217                                 page_id,
218                                 page_ptr );
219
220    if( error )
221    {
222        printk("\n[ERROR] in %s : thread[%x,%x] cannot insert page in mapper\n",
223        __FUNCTION__ , this->process->pid, this->trdid );
224        ppm_remote_free_pages( mapper_cxy , page_ptr );
225        return -1;
226    }
227
[656]228    // launch I/O operation to load page from IOC device when required:
229    // - it is the FAT mapper
230    // - it is a directory mapper
231    // - it is a file mapper, and it exist data on IOC device for this page
[672]232    if( (inode == NULL) || (inode_type == FILE_TYPE_DIR) || (inode_size > (page_id << 10) ) )
[656]233    {
234        error = vfs_fs_move_page( page_xp , IOC_SYNC_READ );
[635]235
[656]236        if( error )
237        {
238            printk("\n[ERROR] in %s : thread[%x,%x] cannot load page from device\n",
239            __FUNCTION__ , this->process->pid, this->trdid );
240            mapper_remote_release_page( mapper_xp , page_ptr );
241            return -1;
242         }
[635]243    }
244
245    // return extended pointer on allocated page
246    *page_xp_ptr = page_xp;
247
248#if DEBUG_MAPPER_HANDLE_MISS
[656]249ppn_t ppn = ppm_page2ppn( page_xp );
[635]250if( (DEBUG_MAPPER_HANDLE_MISS < cycle) && (inode != NULL) )
251{
[656]252    printk("\n[%s] thread[%x,%x] exit for page %d in <%s> / page %x / ppn %x\n",
253    __FUNCTION__, this->process->pid, this->trdid, page_id, name, page_ptr, ppn );
[635]254}
255if( (DEBUG_MAPPER_HANDLE_MISS < cycle) && (inode == NULL) )
256{
[656]257    printk("\n[%s] thread[%x,%x] exit for page %d in FAT / page %x / ppn %x\n",
258    __FUNCTION__, this->process->pid, this->trdid, page_id, page_ptr, ppn );
[635]259}
260#endif
261
[656]262#if( DEBUG_MAPPER_HANDLE_MISS & 2 )
263if( DEBUG_MAPPER_HANDLE_MISS < cycle ) 
264{
265    if (inode != NULL) grdxt_remote_display( XPTR( mapper_cxy , &mapper_ptr->rt ) , name );
266    else               grdxt_remote_display( XPTR( mapper_cxy , &mapper_ptr->rt ) , "FAT" );
267}
268#endif
269
[635]270    return 0;
271
[657]272}  // end mapper_handle_miss()
[635]273
[657]274/////////////////////////////////////////////
275xptr_t  mapper_get_page( xptr_t    mapper_xp,
276                         uint32_t  page_id )
[1]277{
[183]278    error_t       error;
[1]279
[606]280    thread_t * this = CURRENT_THREAD;
281
282    // get mapper cluster and local pointer
[656]283    mapper_t * mapper_ptr = GET_PTR( mapper_xp );
284    cxy_t      mapper_cxy = GET_CXY( mapper_xp );
[606]285
[672]286assert( __FUNCTION__, (hal_remote_lpt( XPTR( mapper_cxy , &mapper_ptr->inode ) ) != NULL ),
[657]287"should not be used for the FAT mapper");
288
[438]289#if DEBUG_MAPPER_GET_PAGE
[625]290uint32_t      cycle = (uint32_t)hal_get_cycles();
[606]291char          name[CONFIG_VFS_MAX_NAME_LENGTH];
[657]292if( DEBUG_MAPPER_GET_PAGE < cycle ) 
[625]293{
[657]294    vfs_inode_t * inode = hal_remote_lpt( XPTR( mapper_cxy , &mapper_ptr->inode ) );
[625]295    vfs_inode_get_name( XPTR( mapper_cxy , inode ) , name );
296    printk("\n[%s] thread[%x,%x] enter for page %d of <%s> mapper / cycle %d\n",
297    __FUNCTION__, this->process->pid, this->trdid, page_id, name, cycle );
298}
[435]299#endif
[204]300
[656]301#if( DEBUG_MAPPER_GET_PAGE & 2 )
302if( DEBUG_MAPPER_GET_PAGE < cycle ) 
303ppm_remote_display( local_cxy );
304#endif
305
[581]306    // check thread can yield
307    thread_assert_can_yield( this , __FUNCTION__ );
308
[606]309    // build extended pointer on mapper lock and mapper rt
[656]310    xptr_t lock_xp  = XPTR( mapper_cxy , &mapper_ptr->lock );
311    xptr_t rt_xp    = XPTR( mapper_cxy , &mapper_ptr->rt );
[606]312
[1]313    // take mapper lock in READ_MODE
[606]314    remote_rwlock_rd_acquire( lock_xp );
[1]315
316    // search page in radix tree
[656]317    xptr_t page_xp  = grdxt_remote_lookup( rt_xp , page_id );
[1]318
[606]319    // test mapper miss
[635]320    if( page_xp == XPTR_NULL )                  // miss => handle it
[1]321    {
322        // release the lock in READ_MODE and take it in WRITE_MODE
[606]323        remote_rwlock_rd_release( lock_xp );
324        remote_rwlock_wr_acquire( lock_xp );
[1]325
[606]326        // second test on missing page because the page status can be modified
[1]327        // by another thread, when passing from READ_MODE to WRITE_MODE.
328        // from this point there is no concurrent accesses to mapper.
[606]329        page_xp = grdxt_remote_lookup( rt_xp , page_id );
[1]330
[606]331        if ( page_xp == XPTR_NULL )  // miss confirmed => handle it
[1]332        {
[657]333            error = mapper_handle_miss( mapper_xp,
334                                        page_id,
335                                        &page_xp );
[635]336            if( error )
[610]337            {
[606]338                printk("\n[ERROR] in %s : thread[%x,%x] cannot handle mapper miss\n",
339                __FUNCTION__ , this->process->pid, this->trdid );
340                remote_rwlock_wr_release( lock_xp );
341                return XPTR_NULL;
[1]342            }
343        }
[635]344
345#if (DEBUG_MAPPER_GET_PAGE & 1)
[657]346if( DEBUG_MAPPER_GET_PAGE < cycle )
347printk("\n[%s] thread[%x,%x] introduced missing page %d in <%s> mapper / ppn %x\n",
348__FUNCTION__, this->process->pid, this->trdid, page_id, name, ppm_page2ppn(page_xp) );
[635]349#endif
[606]350       
351        // release mapper lock from WRITE_MODE
352        remote_rwlock_wr_release( lock_xp );
[1]353    }
[606]354    else                                              // hit
[1]355    {
[606]356        // release mapper lock from READ_MODE
357        remote_rwlock_rd_release( lock_xp );
[1]358    }
359
[438]360#if DEBUG_MAPPER_GET_PAGE
[657]361if( DEBUG_MAPPER_GET_PAGE < cycle )
362printk("\n[%s] thread[%x,%x] exit for page %d of <%s> mapper / ppn %x\n",
363__FUNCTION__, this->process->pid, this->trdid, page_id, name, ppm_page2ppn(page_xp) );
[435]364#endif
[204]365
[656]366#if( DEBUG_MAPPER_GET_PAGE & 2)
367if( DEBUG_MAPPER_GET_PAGE < cycle ) 
368ppm_remote_display( local_cxy );
369#endif
370
[606]371    return page_xp;
[204]372
[657]373}  // end mapper_get_page()
[204]374
[657]375/////////////////////////////////////////////////
376xptr_t  mapper_get_fat_page( xptr_t    mapper_xp,
377                             uint32_t  page_id )
378{
379    error_t       error;
380
381    thread_t * this = CURRENT_THREAD;
382
383    // get mapper cluster and local pointer
384    mapper_t * mapper_ptr = GET_PTR( mapper_xp );
385    cxy_t      mapper_cxy = GET_CXY( mapper_xp );
386
[672]387assert( __FUNCTION__, (hal_remote_lpt( XPTR( mapper_cxy , &mapper_ptr->inode ) ) == NULL ),
[657]388"should be used for the FAT mapper");
389
390#if DEBUG_MAPPER_GET_FAT_PAGE
391uint32_t      cycle = (uint32_t)hal_get_cycles();
392if( DEBUG_MAPPER_GET_FAT_PAGE < cycle ) 
393printk("\n[%s] thread[%x,%x] enter for page %d of FAT mapper / cycle %d\n",
394__FUNCTION__, this->process->pid, this->trdid, page_id, cycle );
395#endif
396
397#if( DEBUG_MAPPER_GET_FAT_PAGE & 2 )
398if( DEBUG_MAPPER_GET_FAT_PAGE < cycle ) 
399ppm_remote_display( local_cxy );
400#endif
401
402    // check thread can yield
403    thread_assert_can_yield( this , __FUNCTION__ );
404
405    // build extended pointer on mapper lock and mapper rt
406    xptr_t lock_xp  = XPTR( mapper_cxy , &mapper_ptr->lock );
407    xptr_t rt_xp    = XPTR( mapper_cxy , &mapper_ptr->rt );
408
409    // take mapper lock in READ_MODE
410    remote_rwlock_rd_acquire( lock_xp );
411
412    // search page in radix tree
413    xptr_t page_xp  = grdxt_remote_lookup( rt_xp , page_id );
414
415    // test mapper miss
416    if( page_xp == XPTR_NULL )                  // miss => handle it
417    {
418        // release the lock in READ_MODE and take it in WRITE_MODE
419        remote_rwlock_rd_release( lock_xp );
420        remote_rwlock_wr_acquire( lock_xp );
421
422        // second test on missing page because the page status can be modified
423        // by another thread, when passing from READ_MODE to WRITE_MODE.
424        // from this point there is no concurrent accesses to mapper.
425        page_xp = grdxt_remote_lookup( rt_xp , page_id );
426
427        if ( page_xp == XPTR_NULL )  // miss confirmed => handle it
428        {
429            error = mapper_handle_miss( mapper_xp,
430                                        page_id,
431                                        &page_xp );
432            if( error )
433            {
434                printk("\n[ERROR] in %s : thread[%x,%x] cannot handle mapper miss\n",
435                __FUNCTION__ , this->process->pid, this->trdid );
436                remote_rwlock_wr_release( lock_xp );
437                return XPTR_NULL;
438            }
439        }
440
441#if (DEBUG_MAPPER_GET_FAT_PAGE & 1)
442if( DEBUG_MAPPER_GET_FAT_PAGE < cycle )
443printk("\n[%s] thread[%x,%x] introduced missing page %d in FAT mapper / ppn %x\n",
444__FUNCTION__, this->process->pid, this->trdid, page_id, ppm_page2ppn(page_xp) );
445#endif
446       
447        // release mapper lock from WRITE_MODE
448        remote_rwlock_wr_release( lock_xp );
449    }
450    else                                              // hit
451    {
452        // release mapper lock from READ_MODE
453        remote_rwlock_rd_release( lock_xp );
454    }
455
456#if DEBUG_MAPPER_GET_FAT_PAGE
457if( DEBUG_MAPPER_GET_FAT_PAGE < cycle )
458printk("\n[%s] thread[%x,%x] exit for page %d of FAT mapper  / ppn %x\n",
459__FUNCTION__, this->process->pid, this->trdid, page_id, ppm_page2ppn(page_xp) );
460#endif
461
462#if( DEBUG_MAPPER_GET_FAT_PAGE & 2)
463if( DEBUG_MAPPER_GET_FAT_PAGE < cycle ) 
464ppm_remote_display( local_cxy );
465#endif
466
467    return page_xp;
468
469}  // end mapper_get_fat_page()
470
[635]471////////////////////////////////////////////////////
472void mapper_remote_release_page( xptr_t   mapper_xp,
473                                 page_t * page )
[1]474{
[635]475    // get mapper cluster an local pointer
476    cxy_t      mapper_cxy = GET_CXY( mapper_xp );
477    mapper_t * mapper_ptr = GET_PTR( mapper_xp );
[1]478
[606]479    // build extended pointer on mapper lock
[635]480    xptr_t lock_xp = XPTR( mapper_cxy , &mapper_ptr->lock );
[606]481
[1]482    // take mapper lock in WRITE_MODE
[635]483    remote_rwlock_wr_acquire( lock_xp );
[1]484
485    // remove physical page from radix tree
[635]486    grdxt_remote_remove( XPTR( mapper_cxy , &mapper_ptr->rt ) , page->index );
[1]487
488    // release mapper lock from WRITE_MODE
[635]489    remote_rwlock_wr_release( lock_xp );
[1]490
491    // release page to PPM
[635]492    ppm_remote_free_pages( mapper_cxy , page );
493                           
[204]494}  // end mapper_release_page()
495
[610]496///////////////////////////////////////////////
497error_t mapper_move_user( xptr_t     mapper_xp,
[313]498                          bool_t     to_buffer,
499                          uint32_t   file_offset,
500                          void     * buffer,
501                          uint32_t   size )
[1]502{
[23]503    uint32_t   page_offset;    // first byte to move to/from a mapper page
[628]504    uint32_t   page_bytes;     // number of bytes to move to/from a mapper page
[606]505    uint32_t   page_id;        // current mapper page index
[23]506    uint32_t   done;           // number of moved bytes
[606]507    xptr_t     page_xp;        // extended pointer on current mapper page descriptor
[330]508
[438]509#if DEBUG_MAPPER_MOVE_USER
[626]510uint32_t      cycle      = (uint32_t)hal_get_cycles();
511thread_t    * this       = CURRENT_THREAD;
512cxy_t         mapper_cxy = GET_CXY( mapper_xp );
513mapper_t    * mapper_ptr = GET_PTR( mapper_xp );
514vfs_inode_t * inode_ptr  = hal_remote_lpt( XPTR( mapper_cxy , &mapper_ptr->inode ) );
515xptr_t        inode_xp   = XPTR( mapper_cxy , inode_ptr );
516char          name[CONFIG_VFS_MAX_NAME_LENGTH];
517vfs_inode_get_name( inode_xp , name );
[438]518if( DEBUG_MAPPER_MOVE_USER < cycle )
[626]519{
520    if( to_buffer )
521    printk("\n[%s] thread[%x,%x] : mapper(%s) -> buffer(%x) / bytes %d / cycle %d\n",
522    __FUNCTION__, this->process->pid, this->trdid, name, buffer, size, cycle );
523    else
524    printk("\n[%s] thread[%x,%x] : buffer(%x) -> mapper(%s) / bytes %d / cycle %d\n",
525    __FUNCTION__, this->process->pid, this->trdid, buffer, name, size, cycle );
526}
[435]527#endif
[1]528
[628]529    // compute indexes of first and last bytes in file
[23]530    uint32_t min_byte = file_offset;
[606]531    uint32_t max_byte = file_offset + size - 1;
[1]532
[23]533    // compute indexes of pages for first and last byte in mapper
534    uint32_t first = min_byte >> CONFIG_PPM_PAGE_SHIFT;
535    uint32_t last  = max_byte >> CONFIG_PPM_PAGE_SHIFT;
[1]536
[606]537#if (DEBUG_MAPPER_MOVE_USER & 1)
538if( DEBUG_MAPPER_MOVE_USER < cycle )
[626]539printk("\n[%s] thread[%x,%x] : mapper(%x,%x) / first_page %d / last_page %d\n",
540__FUNCTION__, this->process->pid, this->trdid, mapper_cxy, mapper_ptr, first, last );
[606]541#endif
542
[23]543    done = 0;
[1]544
[23]545    // loop on pages in mapper
[606]546    for( page_id = first ; page_id <= last ; page_id++ )
[1]547    {
[183]548        // compute page_offset
[606]549        if( page_id == first ) page_offset = min_byte & CONFIG_PPM_PAGE_MASK;
550        else                   page_offset = 0;
[1]551
[313]552        // compute number of bytes in page
[628]553        if      ( first   == last  ) page_bytes = size;
554        else if ( page_id == first ) page_bytes = CONFIG_PPM_PAGE_SIZE - page_offset;
555        else if ( page_id == last  ) page_bytes = (max_byte & CONFIG_PPM_PAGE_MASK) + 1;
556        else                         page_bytes = CONFIG_PPM_PAGE_SIZE;
[1]557
[438]558#if (DEBUG_MAPPER_MOVE_USER & 1)
559if( DEBUG_MAPPER_MOVE_USER < cycle )
[626]560printk("\n[%s] thread[%x,%x] : page_id %d / page_offset %d / bytes %d\n",
[628]561__FUNCTION__, this->process->pid, this->trdid, page_id , page_offset , page_bytes );
[435]562#endif
[265]563
[628]564        // get extended pointer on page descriptor in mapper
[657]565        page_xp = mapper_get_page( mapper_xp , page_id ); 
[1]566
[606]567        if ( page_xp == XPTR_NULL ) return -1;
[1]568
[651]569        // compute extended pointer on kernel mapper
[637]570        xptr_t     map_xp  = ppm_page2base( page_xp ) + page_offset;
[626]571
[610]572#if (DEBUG_MAPPER_MOVE_USER & 1)
573if( DEBUG_MAPPER_MOVE_USER < cycle )
[626]574printk("\n[%s] thread[%x,%x] : get buffer(%x,%x) in mapper\n",
[651]575__FUNCTION__, this->process->pid, this->trdid, GET_CXY(map_xp), GET_PTR(map_xp) );
[610]576#endif
[626]577        // compute pointer in user buffer
[606]578        uint8_t * buf_ptr = (uint8_t *)buffer + done;
[1]579
580        // move fragment
[330]581        if( to_buffer )
[1]582        {
[637]583            hal_copy_to_uspace( buf_ptr , map_xp , page_bytes ); 
[626]584
585#if DEBUG_MAPPER_MOVE_USER & 1
586if( DEBUG_MAPPER_MOVE_USER < cycle )
587printk("\n[%s] thread[%x,%x] moved %d bytes / mapper %s (%x,%x) -> user buffer(%x,%x)\n",
[628]588__FUNCTION__, this->process->pid, this->trdid, page_bytes,
[637]589name, GET_CXY(map_xp), GET_PTR(map_xp), local_cxy, buf_ptr );
[626]590#endif
591
[1]592        }
[330]593        else
[1]594        {
[606]595            ppm_page_do_dirty( page_xp ); 
[637]596            hal_copy_from_uspace( map_xp , buf_ptr , page_bytes ); 
[626]597
598#if DEBUG_MAPPER_MOVE_USER & 1
599if( DEBUG_MAPPER_MOVE_USER < cycle )
600printk("\n[%s] thread[%x,%x] moved %d bytes / user buffer(%x,%x) -> mapper %s (%x,%x)\n",
[628]601__FUNCTION__, this->process->pid, this->trdid, page_bytes,
[637]602local_cxy, buf_ptr, name, GET_CXY(map_xp), GET_PTR(map_xp) );
[657]603mapper_display_page(  mapper_xp , page_id , 128 );
[626]604#endif
605
[1]606        }
607
[628]608        done += page_bytes;
[1]609    }
610
[438]611#if DEBUG_MAPPER_MOVE_USER
[626]612cycle      = (uint32_t)hal_get_cycles();
[438]613if( DEBUG_MAPPER_MOVE_USER < cycle )
[626]614{
615    if( to_buffer )
616    printk("\n[%s] thread[%x,%x] completed mapper(%s) -> buffer(%x) / cycle %d\n",
617    __FUNCTION__, this->process->pid, this->trdid, name, buffer, cycle );
618    else
619    printk("\n[%s] thread[%x,%x] completed buffer(%x) -> mapper(%s) / cycle %d\n",
620    __FUNCTION__, this->process->pid, this->trdid, buffer, name, cycle );
621}
[435]622#endif
[204]623
[1]624    return 0;
625
[313]626}  // end mapper_move_user()
[204]627
[313]628////////////////////////////////////////////////
[606]629error_t mapper_move_kernel( xptr_t    mapper_xp,
630                            bool_t    to_buffer,
631                            uint32_t  file_offset,
632                            xptr_t    buffer_xp,
633                            uint32_t  size )
[313]634{
635    uint32_t   page_offset;    // first byte to move to/from a mapper page
[628]636    uint32_t   page_bytes;     // number of bytes to move to/from a mapper page
[606]637    uint32_t   page_id;        // current mapper page index
[313]638    uint32_t   done;           // number of moved bytes
[606]639    xptr_t     page_xp;        // extended pointer on current mapper page descriptor
[313]640
641    uint8_t  * src_ptr;        // source buffer local pointer
642    cxy_t      src_cxy;        // source cluster
643    uint8_t  * dst_ptr;        // destination buffer local pointer
644    cxy_t      dst_cxy;        // destination cluster
[330]645
[406]646    // get buffer cluster and local pointer
647    cxy_t     buffer_cxy = GET_CXY( buffer_xp );
[606]648    uint8_t * buffer_ptr = GET_PTR( buffer_xp );
[313]649
[606]650    // get mapper cluster
651    cxy_t     mapper_cxy = GET_CXY( mapper_xp );
652
[438]653#if DEBUG_MAPPER_MOVE_KERNEL
[625]654char          name[CONFIG_VFS_MAX_NAME_LENGTH];
655uint32_t      cycle  = (uint32_t)hal_get_cycles();
656thread_t    * this   = CURRENT_THREAD;
657mapper_t    * mapper = GET_PTR( mapper_xp );
658vfs_inode_t * inode  = hal_remote_lpt( XPTR( mapper_cxy , &mapper->inode ) );
659vfs_inode_get_name( XPTR( mapper_cxy , inode ) , name );
[438]660if( DEBUG_MAPPER_MOVE_KERNEL < cycle )
[625]661printk("\n[%s] thread[%x,%x] enter / %d bytes / offset %d / mapper <%s> / cycle %d\n",
662__FUNCTION__, this->process->pid, this->trdid, size, file_offset, name, cycle );
[435]663#endif
[406]664
[313]665    // compute offsets of first and last bytes in file
666    uint32_t min_byte = file_offset;
667    uint32_t max_byte = file_offset + size -1;
668
669    // compute indexes for first and last pages in mapper
670    uint32_t first = min_byte >> CONFIG_PPM_PAGE_SHIFT;
671    uint32_t last  = max_byte >> CONFIG_PPM_PAGE_SHIFT;
672
673    // compute source and destination clusters
674    if( to_buffer )
675    {
676        dst_cxy = buffer_cxy;
[606]677        src_cxy = mapper_cxy;
[313]678    }
679    else
680    {
681        src_cxy = buffer_cxy;
[606]682        dst_cxy = mapper_cxy;
[313]683    }
684
685    done = 0;
686
687    // loop on pages in mapper
[606]688    for( page_id = first ; page_id <= last ; page_id++ )
[313]689    {
690        // compute page_offset
[606]691        if( page_id == first ) page_offset = min_byte & CONFIG_PPM_PAGE_MASK;
692        else                   page_offset = 0;
[313]693
694        // compute number of bytes to move in page
[628]695        if      ( first == last  )   page_bytes = size;
696        else if ( page_id == first ) page_bytes = CONFIG_PPM_PAGE_SIZE - page_offset;
697        else if ( page_id == last  ) page_bytes = (max_byte & CONFIG_PPM_PAGE_MASK) + 1;
698        else                         page_bytes = CONFIG_PPM_PAGE_SIZE;
[313]699
[606]700        // get extended pointer on page descriptor
[657]701        page_xp = mapper_get_page( mapper_xp , page_id );
[313]702
[606]703        if ( page_xp == XPTR_NULL ) return -1;
[313]704
[315]705        // get page base address
[606]706        xptr_t    base_xp  = ppm_page2base( page_xp );
[367]707        uint8_t * base_ptr = (uint8_t *)GET_PTR( base_xp );
[330]708
[313]709        // compute source and destination pointers
710        if( to_buffer )
711        {
[315]712            dst_ptr = buffer_ptr + done;
[367]713            src_ptr = base_ptr + page_offset;
[313]714        }
715        else
716        {
[315]717            src_ptr = buffer_ptr + done;
[367]718            dst_ptr = base_ptr + page_offset;
[313]719
[606]720            ppm_page_do_dirty( page_xp );
[313]721        }
722
[610]723#if (DEBUG_MAPPER_MOVE_KERNEL & 1)
724if( DEBUG_MAPPER_MOVE_KERNEL < cycle )
[625]725{
726    if( to_buffer )
[656]727    printk("\n[%s] mapper <%s> page %d => buffer (%x,%x) / %d bytes\n",
[628]728    __FUNCTION__, name, page_id, dst_cxy, dst_ptr, page_bytes );
[625]729    else
[656]730    printk("\n[%s] buffer (%x,%x) => mapper <%s> page %d / %d bytes\n",
[628]731    __FUNCTION__, src_cxy, src_ptr, name, page_id, page_bytes );
[625]732}
[610]733#endif
734
[313]735        // move fragment
[628]736        hal_remote_memcpy( XPTR( dst_cxy , dst_ptr ), XPTR( src_cxy , src_ptr ), page_bytes );
[330]737
[628]738        done += page_bytes;
[313]739    }
740
[438]741#if DEBUG_MAPPER_MOVE_KERNEL
[625]742cycle  = (uint32_t)hal_get_cycles();
[438]743if( DEBUG_MAPPER_MOVE_KERNEL < cycle )
[656]744printk("\n[%s] thread[%x,%x] exit / mapper <%s> / buffer (%x,%x) / cycle %d\n",
745__FUNCTION__, this->process->pid, this->trdid, name, buffer_cxy, buffer_ptr, cycle );
[435]746#endif
[313]747
748    return 0;
749
[406]750}  // end mapper_move_kernel()
[313]751
[606]752///////////////////////////////////////////////////
753error_t mapper_remote_get_32( xptr_t     mapper_xp,
[628]754                              uint32_t   page_id,
[606]755                              uint32_t   word_id,
[628]756                              uint32_t * value )
[606]757{
758    xptr_t     page_xp;      // extended pointer on searched page descriptor
759    xptr_t     base_xp;      // extended pointer on searched page base
760   
761    // get page containing the searched word
[657]762    page_xp  = mapper_get_page( mapper_xp , page_id );
[606]763
764    if( page_xp == XPTR_NULL )  return -1;
765   
766    // get page base
767    base_xp = ppm_page2base( page_xp );
768
769    // get the value from mapper
[628]770    *value = hal_remote_l32( base_xp + (word_id<<2) ); 
[606]771
772    return 0;
773
774}  // end mapper_remote_get_32()
775
776///////////////////////////////////////////////////
777error_t mapper_remote_set_32( xptr_t     mapper_xp,
[628]778                              uint32_t   page_id,
[606]779                              uint32_t   word_id,
780                              uint32_t   value )
781{
782    xptr_t     page_xp;      // extended pointer on searched page descriptor
783    xptr_t     base_xp;      // extended pointer on searched page base
784
785    // get page containing the searched word
[657]786    page_xp  = mapper_get_page( mapper_xp , page_id );
[606]787
788    if( page_xp == XPTR_NULL ) return -1;
789
790    // get page base
791    base_xp = ppm_page2base( page_xp );
792
793    // set value to mapper
[628]794    hal_remote_s32( (base_xp + (word_id << 2)) , value );
[606]795
[628]796    // set the dirty flag in page descriptor
[606]797    ppm_page_do_dirty( page_xp );
798
799    return 0;
800
801}  // end mapper_remote_set_32()
802
[657]803////////////////////////////////////////
804error_t mapper_sync( xptr_t  mapper_xp )
[623]805{
[657]806    uint32_t   found_key;           // unused, required by grdxt_remote_get_first()
[623]807    error_t    error;
808
[657]809    // get mapper cluster and local pointer
810    mapper_t * mapper_ptr = GET_PTR( mapper_xp );
811    cxy_t      mapper_cxy = GET_CXY( mapper_xp );
812
[623]813#if DEBUG_MAPPER_SYNC
814thread_t * this  = CURRENT_THREAD;
815uint32_t   cycle = (uint32_t)hal_get_cycles();
816char       name[CONFIG_VFS_MAX_NAME_LENGTH];
[657]817vfs_inode_get_name( XPTR( mapper_cxy , &mapper_ptr->inode ) , name );
[623]818#endif
819
[657]820    // build extended pointer on radix tree
821    xptr_t   rt_xp = XPTR( mapper_cxy , &mapper_ptr->rt );
[623]822
823    // initialise loop variable
[657]824    uint32_t start_key = 0;
[623]825
826    // scan radix-tree until last page found
827    while( 1 )
828    {
829        // get page descriptor from radix tree
[657]830        xptr_t page_xp = grdxt_remote_get_first( rt_xp , start_key , &found_key );
[623]831         
[657]832        page_t * page_ptr = GET_PTR( page_xp );
[623]833
[657]834        // exit loop when last page found
835        if( page_ptr == NULL ) break;
[623]836
[657]837        // get page flags & index fields
838        uint32_t flags = hal_remote_l32( XPTR( mapper_cxy , &page_ptr->flags ) );
839        uint32_t index = hal_remote_l32( XPTR( mapper_cxy , &page_ptr->index ) );
[623]840
841        // synchronize page if dirty
[657]842        if( flags & PG_DIRTY )
[623]843        {
844
845#if DEBUG_MAPPER_SYNC
846if( cycle > DEBUG_MAPPER_SYNC )
[626]847printk("\n[%s] thread[%x,%x] synchonise page %d of <%s> to IOC device\n",
[657]848__FUNCTION__, this->process->pid, this->trdid, page_ptr->index, name );
[623]849#endif
850            // copy page to file system
851            error = vfs_fs_move_page( page_xp , IOC_WRITE );
852
853            if( error )
854            {
855                printk("\n[ERROR] in %s : cannot synchonize dirty page %d\n", 
[657]856                __FUNCTION__, page_ptr->index );
[623]857                return -1;
858            }
859
860            // remove page from PPM dirty list
861            ppm_page_undo_dirty( page_xp ); 
862        } 
863        else
864        {
865
866#if DEBUG_MAPPER_SYNC
867if( cycle > DEBUG_MAPPER_SYNC )
868printk("\n[%s] thread[%x,%x] skip page %d for <%s>\n",
[657]869__FUNCTION__, this->process->pid, this->trdid, page_ptr->index, name );
[623]870#endif
871        }
872
873        // update loop variable
[657]874        start_key = index + 1;
[623]875    }  // end while
876
877    return 0;
878
879}  // end mapper_sync()
880
[656]881///////////////////////////////////////////////
882void mapper_display_page( xptr_t     mapper_xp,
[657]883                          uint32_t   page_id,
[656]884                          uint32_t   nbytes )
[611]885{
[614]886    char          buffer[4096];   // local buffer
887    uint32_t      line;           // line index
888    uint32_t      word;           // word index
889 
890    char       name[CONFIG_VFS_MAX_NAME_LENGTH];
[606]891
[672]892assert( __FUNCTION__, (nbytes <= 4096)         , "nbytes cannot be larger than 4096");
893assert( __FUNCTION__, (mapper_xp != XPTR_NULL) , "mapper_xp argument cannot be null");
[611]894
[656]895    // get mapper cluster and local pointer
896    cxy_t      mapper_cxy = GET_CXY( mapper_xp );
897    mapper_t * mapper_ptr = GET_PTR( mapper_xp );
[611]898
[657]899    // get extended pointer on page descriptor
900    xptr_t page_xp = mapper_get_page( mapper_xp , page_id );
901
902    // get page cluster and local pointer
[656]903    cxy_t    page_cxy = GET_CXY( page_xp );
904    page_t * page_ptr = GET_PTR( page_xp );
[614]905
[656]906    // get page_id and mapper from page descriptor
[657]907    uint32_t   index   = hal_remote_l32( XPTR( page_cxy , &page_ptr->index ) );
[656]908    mapper_t * mapper  = hal_remote_lpt( XPTR( page_cxy , &page_ptr->mapper ) );
909
[672]910assert( __FUNCTION__, (mapper_cxy == page_cxy ) , "mapper and page must be in same cluster");
911assert( __FUNCTION__, (mapper_ptr == mapper   ) , "unconsistent mapper field in page descriptor");
912assert( __FUNCTION__, (page_id    == index    ) , "unconsistent index  field in page descriptor");
[656]913
[614]914    // get inode
[656]915    vfs_inode_t * inode_ptr = hal_remote_lpt( XPTR( mapper_cxy , &mapper_ptr->inode ) );
[614]916
917    // get inode name
[656]918    if( inode_ptr == NULL ) strcpy( name , "FAT" );
[614]919    else  vfs_inode_get_name( XPTR( mapper_cxy , inode_ptr ) , name );
920   
[611]921    // get extended pointer on page base
[656]922    xptr_t base_xp = ppm_page2base( page_xp );
[611]923   
924    // copy remote page to local buffer
925    hal_remote_memcpy( XPTR( local_cxy , buffer ) , base_xp , nbytes );
926
[656]927    // display header
928    uint32_t * tabi = (uint32_t *)buffer;
929    printk("\n***** mapper <%s> / page_id %d / cxy %x / mapper %x / buffer %x\n",
930    name, page_id, mapper_cxy, mapper_ptr, GET_PTR( base_xp ) );
931
[611]932    // display 8 words per line
933    for( line = 0 ; line < (nbytes >> 5) ; line++ )
934    {
[625]935        printk("%X : ", line << 5 );
[614]936        for( word = 0 ; word < 8 ; word++ ) printk("%X ", tabi[(line<<3) + word] );
[611]937        printk("\n");
938    }
939
[656]940}  // end mapper_display_page()
[611]941
942
Note: See TracBrowser for help on using the repository browser.