source: trunk/kernel/mm/mapper.c @ 624

Last change on this file since 624 was 624, checked in by alain, 6 years ago

Fix several bugs to use the instruction MMU in kernel mode
in replacement of the instruction address extension register,
and remove the "kentry" segment.

This version is running on the tsar_generic_iob" platform.

One interesting bug: the cp0_ebase defining the kernel entry point
(for interrupts, exceptions and syscalls) must be initialized
early in kernel_init(), because the VFS initialisation done by
kernel_ini() uses RPCs, and RPCs uses Inter-Processor-Interrup.

File size: 24.9 KB
RevLine 
[1]1/*
[606]2 * mapper.c - Kernel cache for FS files or directories implementation.
[1]3 *
4 * Authors   Mohamed Lamine Karaoui (2015)
[623]5 *           Alain Greiner (2016,2017,2018,2019)
[1]6 *
7 * Copyright (c)  UPMC Sorbonne Universites
8 *
9 * This file is part of ALMOS-MKH.
10 *
11 * ALMOS-MKH is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; version 2.0 of the License.
14 *
15 * ALMOS-MKH is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with ALMOS-MKH; if not, write to the Free Software Foundation,
22 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
[14]25#include <kernel_config.h>
[457]26#include <hal_kernel_types.h>
[1]27#include <hal_special.h>
[23]28#include <hal_uspace.h>
[1]29#include <grdxt.h>
[614]30#include <string.h>
[1]31#include <rwlock.h>
32#include <printk.h>
[279]33#include <memcpy.h>
[1]34#include <thread.h>
35#include <core.h>
36#include <process.h>
37#include <kmem.h>
38#include <kcm.h>
[567]39#include <ppm.h>
[1]40#include <page.h>
41#include <cluster.h>
42#include <vfs.h>
43#include <mapper.h>
[614]44#include <dev_ioc.h>
[1]45
[567]46
[246]47//////////////////////////////////////////////
48mapper_t * mapper_create( vfs_fs_type_t type )
[1]49{
50    mapper_t * mapper;
51    kmem_req_t req;
52    error_t    error;
53
[606]54    // allocate memory for mapper
[183]55    req.type  = KMEM_MAPPER;
56    req.size  = sizeof(mapper_t);
[1]57    req.flags = AF_KERNEL | AF_ZERO;
[183]58    mapper    = (mapper_t *)kmem_alloc( &req );
[1]59
60    if( mapper == NULL )
61    {
62        printk("\n[ERROR] in %s : no memory for mapper descriptor\n", __FUNCTION__ );
63        return NULL;
64    }
65
66    // initialize refcount & inode
[183]67    mapper->refcount = 0;
[1]68    mapper->inode    = NULL;
69
70    // initialize radix tree
[606]71    error = grdxt_init( &mapper->rt,
72                        CONFIG_MAPPER_GRDXT_W1,
73                        CONFIG_MAPPER_GRDXT_W2,
74                        CONFIG_MAPPER_GRDXT_W3 );
[1]75
76    if( error )
77    {
78        printk("\n[ERROR] in %s : cannot initialize radix tree\n", __FUNCTION__ );
[183]79        req.type  = KMEM_MAPPER;
[1]80        req.ptr   = mapper;
81        kmem_free( &req );
82        return NULL;
83    }
84
[246]85    // initialize mapper type
86    mapper->type = type;
87
[1]88    // initialize mapper lock
[606]89    remote_rwlock_init( XPTR( local_cxy , &mapper->lock ) , LOCK_MAPPER_STATE );
[1]90
91    // initialize waiting threads xlist (empty)
[183]92    xlist_root_init( XPTR( local_cxy , &mapper->wait_root ) );
[1]93
94    // initialize vsegs xlist (empty)
[183]95    xlist_root_init( XPTR( local_cxy , &mapper->vsegs_root ) );
[1]96
97    return mapper;
98
[204]99}  // end mapper_create()
100
[606]101////////////////////////////////////////
102void mapper_destroy( mapper_t * mapper )
[1]103{
104    page_t   * page;
105    uint32_t   found_index = 0;
106    uint32_t   start_index = 0;
107    kmem_req_t req;
108
[606]109    // scan radix tree
[1]110    do
111    {
112        // get page from radix tree
[606]113        page = (page_t *)grdxt_get_first( &mapper->rt , start_index , &found_index );
[1]114
[606]115        // release registered pages to PPM
[18]116        if( page != NULL )
[1]117        {
118            // remove page from mapper and release to PPM
[606]119            mapper_release_page( mapper , page );
[1]120
121            // update start_key value for next page
122            start_index = found_index;
123        }
124    }
125    while( page != NULL );
126
[606]127    // release the memory allocated to radix tree itself
128    grdxt_destroy( &mapper->rt );
[1]129
130    // release memory for mapper descriptor
131    req.type = KMEM_MAPPER;
132    req.ptr  = mapper;
133    kmem_free( &req );
134
[204]135}  // end mapper_destroy()
136
[606]137////////////////////////////////////////////////////
138xptr_t  mapper_remote_get_page( xptr_t    mapper_xp,
139                                uint32_t  page_id )
[1]140{
[183]141    error_t       error;
[606]142    mapper_t    * mapper_ptr;
143    cxy_t         mapper_cxy;
144    xptr_t        lock_xp;        // extended pointer on mapper lock
145    xptr_t        page_xp;        // extended pointer on searched page descriptor
146    xptr_t        rt_xp;          // extended pointer on radix tree in mapper
[1]147
[606]148    thread_t * this = CURRENT_THREAD;
149
150    // get mapper cluster and local pointer
151    mapper_ptr = GET_PTR( mapper_xp );
152    mapper_cxy = GET_CXY( mapper_xp );
153
[438]154#if DEBUG_MAPPER_GET_PAGE
[435]155uint32_t cycle = (uint32_t)hal_get_cycles();
[606]156char          name[CONFIG_VFS_MAX_NAME_LENGTH];
157vfs_inode_t * inode = hal_remote_lpt( XPTR( mapper_cxy , &mapper_ptr->inode ) );
158vfs_inode_get_name( XPTR( mapper_cxy , inode ) , name );
[438]159if( DEBUG_MAPPER_GET_PAGE < cycle )
[606]160printk("\n[%s] thread [%x,%x] enter for page %d of <%s> / cycle %d\n",
161__FUNCTION__, this->process->pid, this->trdid, page_id, name, cycle );
[435]162#endif
[204]163
[581]164    // check thread can yield
165    thread_assert_can_yield( this , __FUNCTION__ );
166
[606]167    // build extended pointer on mapper lock and mapper rt
168    lock_xp  = XPTR( mapper_cxy , &mapper_ptr->lock );
169    rt_xp    = XPTR( mapper_cxy , &mapper_ptr->rt );
170
[1]171    // take mapper lock in READ_MODE
[606]172    remote_rwlock_rd_acquire( lock_xp );
[1]173
174    // search page in radix tree
[606]175    page_xp  = grdxt_remote_lookup( rt_xp , page_id );
[1]176
[606]177    // test mapper miss
178    if( page_xp == XPTR_NULL )                  // miss => try to handle it
[1]179    {
180        // release the lock in READ_MODE and take it in WRITE_MODE
[606]181        remote_rwlock_rd_release( lock_xp );
182        remote_rwlock_wr_acquire( lock_xp );
[1]183
[606]184        // second test on missing page because the page status can be modified
[1]185        // by another thread, when passing from READ_MODE to WRITE_MODE.
186        // from this point there is no concurrent accesses to mapper.
[606]187        page_xp = grdxt_remote_lookup( rt_xp , page_id );
[1]188
[606]189        if ( page_xp == XPTR_NULL )  // miss confirmed => handle it
[1]190        {
[204]191
[610]192            if( mapper_cxy == local_cxy )   // mapper is local
193            {
194
[438]195#if (DEBUG_MAPPER_GET_PAGE & 1)
196if( DEBUG_MAPPER_GET_PAGE < cycle )
[610]197printk("\n[%s] missing page => load it from FS / local access \n", __FUNCTION__ );
[435]198#endif
[606]199                 error = mapper_handle_miss( mapper_ptr,
200                                             page_id, 
201                                             &page_xp );
202            } 
203            else
[1]204            {
[610]205
206#if (DEBUG_MAPPER_GET_PAGE & 1)
207if( DEBUG_MAPPER_GET_PAGE < cycle )
208printk("\n[%s] missing page => load it from FS / RPC access \n", __FUNCTION__ );
209#endif
[606]210                 rpc_mapper_handle_miss_client( mapper_cxy,
211                                                mapper_ptr,
212                                                page_id,
213                                                &page_xp,
214                                                &error );
[1]215            }
[18]216
[606]217            if ( error )
[1]218            {
[606]219                printk("\n[ERROR] in %s : thread[%x,%x] cannot handle mapper miss\n",
220                __FUNCTION__ , this->process->pid, this->trdid );
221                remote_rwlock_wr_release( lock_xp );
222                return XPTR_NULL;
[1]223            }
224        }
[606]225       
226        // release mapper lock from WRITE_MODE
227        remote_rwlock_wr_release( lock_xp );
[1]228    }
[606]229    else                                              // hit
[1]230    {
[606]231        // release mapper lock from READ_MODE
232        remote_rwlock_rd_release( lock_xp );
[1]233    }
234
[438]235#if DEBUG_MAPPER_GET_PAGE
[435]236cycle = (uint32_t)hal_get_cycles();
[438]237if( DEBUG_MAPPER_GET_PAGE < cycle )
[606]238printk("\n[%s] thread[%x,%x] exit for page %d of <%s> / ppn %x / cycle %d\n",
239__FUNCTION__, this->process->pid, this->trdid, 
240page_id, name, ppm_page2ppn( page_xp ), cycle );
[435]241#endif
[204]242
[606]243    return page_xp;
[204]244
[606]245}  // end mapper_remote_get_page()
[204]246
[606]247//////////////////////////////////////////////
248error_t mapper_handle_miss( mapper_t * mapper,
249                            uint32_t   page_id,
250                            xptr_t   * page_xp )
[1]251{
[606]252    kmem_req_t   req;
253    page_t     * page;
254    error_t      error;
[1]255
[606]256    thread_t * this = CURRENT_THREAD;
[1]257
[606]258#if DEBUG_MAPPER_HANDLE_MISS
259uint32_t cycle = (uint32_t)hal_get_cycles();
260char          name[CONFIG_VFS_MAX_NAME_LENGTH];
261vfs_inode_t * inode = mapper->inode;
262vfs_inode_get_name( XPTR( local_cxy , inode ) , name );
[623]263if( DEBUG_MAPPER_HANDLE_MISS < cycle )
[610]264printk("\n[%s] enter for page %d in <%s> / cycle %d",
[606]265__FUNCTION__, page_id, name, cycle );
266if( DEBUG_MAPPER_HANDLE_MISS & 1 )
[610]267grdxt_display( XPTR( local_cxy , &mapper->rt ) , name );
[606]268#endif
269
[610]270    // allocate one page from the local cluster
[606]271    req.type  = KMEM_PAGE;
272    req.size  = 0;
273    req.flags = AF_NONE;
274    page = kmem_alloc( &req );
275
276    if( page == NULL )
277    {
278        printk("\n[ERROR] in %s : thread [%x,%x] cannot allocate page in cluster %x\n",
279        __FUNCTION__ , this->process->pid, this->trdid , local_cxy );
280        return -1;
281    }
282
283    // initialize the page descriptor
284    page_init( page );
285    page_set_flag( page , PG_INIT );
286    page_refcount_up( page );
287    page->mapper = mapper;
288    page->index  = page_id;
289
290    // insert page in mapper radix tree
291    error = grdxt_insert( &mapper->rt , page_id , page );
292
[1]293    if( error )
294    {
[606]295        printk("\n[ERROR] in %s : thread[%x,%x] cannot insert page in mapper\n",
296        __FUNCTION__ , this->process->pid, this->trdid );
297        mapper_release_page( mapper , page );
298        req.ptr  = page;
299        req.type = KMEM_PAGE;
300        kmem_free(&req);
301        return -1;
[1]302    }
[18]303
[606]304    // launch I/O operation to load page from device to mapper
[614]305    error = vfs_fs_move_page( XPTR( local_cxy , page ) , IOC_SYNC_READ );
[606]306
307    if( error )
308    {
309        printk("\n[ERROR] in %s : thread[%x,%x] cannot load page from device\n",
310        __FUNCTION__ , this->process->pid, this->trdid );
311        mapper_release_page( mapper , page );
312        req.ptr  = page;
313        req.type = KMEM_PAGE;
314        kmem_free( &req );
315        return -1;
316    }
317
318    // set extended pointer on allocated page
319    *page_xp = XPTR( local_cxy , page );
320
321#if DEBUG_MAPPER_HANDLE_MISS
322cycle = (uint32_t)hal_get_cycles();
[623]323if( DEBUG_MAPPER_HANDLE_MISS < cycle )
[610]324printk("\n[%s] exit for page %d in <%s> / ppn %x / cycle %d",
[606]325__FUNCTION__, page_id, name, ppm_page2ppn( *page_xp ), cycle );
326if( DEBUG_MAPPER_HANDLE_MISS & 1 )
[610]327grdxt_display( XPTR( local_cxy , &mapper->rt ) , name );
[606]328#endif
329
330    return 0;
331
332}  // end mapper_handle_miss()
333
334////////////////////////////////////////////
335void mapper_release_page( mapper_t * mapper,
336                          page_t   * page )
337{
338    // build extended pointer on mapper lock
339    xptr_t mapper_lock_xp = XPTR( local_cxy , &mapper->lock );
340
[1]341    // take mapper lock in WRITE_MODE
[606]342    remote_rwlock_wr_acquire( mapper_lock_xp );
[1]343
344    // remove physical page from radix tree
[606]345    grdxt_remove( &mapper->rt , page->index );
[1]346
347    // release mapper lock from WRITE_MODE
[606]348    remote_rwlock_wr_release( mapper_lock_xp );
[1]349
350    // release page to PPM
[183]351    kmem_req_t   req;
352    req.type  = KMEM_PAGE;
[1]353    req.ptr   = page;
354    kmem_free( &req );
355
[204]356}  // end mapper_release_page()
357
[610]358///////////////////////////////////////////////
359error_t mapper_move_user( xptr_t     mapper_xp,
[313]360                          bool_t     to_buffer,
361                          uint32_t   file_offset,
362                          void     * buffer,
363                          uint32_t   size )
[1]364{
[23]365    uint32_t   page_offset;    // first byte to move to/from a mapper page
366    uint32_t   page_count;     // number of bytes to move to/from a mapper page
[606]367    uint32_t   page_id;        // current mapper page index
[23]368    uint32_t   done;           // number of moved bytes
[606]369    xptr_t     page_xp;        // extended pointer on current mapper page descriptor
[330]370
[438]371#if DEBUG_MAPPER_MOVE_USER
[606]372uint32_t   cycle = (uint32_t)hal_get_cycles();
373thread_t * this  = CURRENT_THREAD;
[438]374if( DEBUG_MAPPER_MOVE_USER < cycle )
[606]375printk("\n[%s] thread[%x,%x] : to_buf %d / buffer %x / size %d / offset %d / cycle %d\n",
376__FUNCTION__, this->process->pid, this->trdid,
377to_buffer, buffer, size, file_offset, cycle );
[435]378#endif
[1]379
[23]380    // compute offsets of first and last bytes in file
381    uint32_t min_byte = file_offset;
[606]382    uint32_t max_byte = file_offset + size - 1;
[1]383
[23]384    // compute indexes of pages for first and last byte in mapper
385    uint32_t first = min_byte >> CONFIG_PPM_PAGE_SHIFT;
386    uint32_t last  = max_byte >> CONFIG_PPM_PAGE_SHIFT;
[1]387
[606]388#if (DEBUG_MAPPER_MOVE_USER & 1)
389if( DEBUG_MAPPER_MOVE_USER < cycle )
[610]390printk("\n[%s] thread[%x,%x] : first_page %d / last_page %d\n",
391__FUNCTION__, this->process->pid, this->trdid, first, last );
[606]392#endif
393
[23]394    done = 0;
[1]395
[23]396    // loop on pages in mapper
[606]397    for( page_id = first ; page_id <= last ; page_id++ )
[1]398    {
[183]399        // compute page_offset
[606]400        if( page_id == first ) page_offset = min_byte & CONFIG_PPM_PAGE_MASK;
401        else                   page_offset = 0;
[1]402
[313]403        // compute number of bytes in page
[606]404        if      ( first   == last  ) page_count = size;
405        else if ( page_id == first ) page_count = CONFIG_PPM_PAGE_SIZE - page_offset;
406        else if ( page_id == last  ) page_count = (max_byte & CONFIG_PPM_PAGE_MASK) + 1;
407        else                         page_count = CONFIG_PPM_PAGE_SIZE;
[1]408
[438]409#if (DEBUG_MAPPER_MOVE_USER & 1)
410if( DEBUG_MAPPER_MOVE_USER < cycle )
[610]411printk("\n[%s] thread[%x,%x] : page_id = %d / page_offset = %d / page_count = %d\n",
412__FUNCTION__, this->process->pid, this->trdid, page_id , page_offset , page_count );
[435]413#endif
[265]414
[606]415        // get extended pointer on page descriptor
416        page_xp = mapper_remote_get_page( mapper_xp , page_id ); 
[1]417
[606]418        if ( page_xp == XPTR_NULL ) return -1;
[1]419
[610]420#if (DEBUG_MAPPER_MOVE_USER & 1)
421if( DEBUG_MAPPER_MOVE_USER < cycle )
422printk("\n[%s] thread[%x,%x] : get page (%x,%x) from mapper\n",
423__FUNCTION__, this->process->pid, this->trdid, GET_CXY(page_xp), GET_PTR(page_xp) );
424#endif
425
[23]426        // compute pointer in mapper
[606]427        xptr_t    base_xp = ppm_page2base( page_xp );
428        uint8_t * map_ptr = (uint8_t *)GET_PTR( base_xp ) + page_offset;
[1]429
[23]430        // compute pointer in buffer
[606]431        uint8_t * buf_ptr = (uint8_t *)buffer + done;
[1]432
433        // move fragment
[330]434        if( to_buffer )
[1]435        {
[606]436            hal_copy_to_uspace( buf_ptr , map_ptr , page_count ); 
[1]437        }
[330]438        else
[1]439        {
[606]440            ppm_page_do_dirty( page_xp ); 
441            hal_copy_from_uspace( map_ptr , buf_ptr , page_count ); 
[1]442        }
443
[23]444        done += page_count;
[1]445    }
446
[438]447#if DEBUG_MAPPER_MOVE_USER
[435]448cycle = (uint32_t)hal_get_cycles();
[438]449if( DEBUG_MAPPER_MOVE_USER < cycle )
[606]450printk("\n[%s] thread[%x,%x] exit / cycle %d\n",
451__FUNCTION__, this->process->pid, this->trdid, cycle );
[435]452#endif
[204]453
[1]454    return 0;
455
[313]456}  // end mapper_move_user()
[204]457
[313]458////////////////////////////////////////////////
[606]459error_t mapper_move_kernel( xptr_t    mapper_xp,
460                            bool_t    to_buffer,
461                            uint32_t  file_offset,
462                            xptr_t    buffer_xp,
463                            uint32_t  size )
[313]464{
465    uint32_t   page_offset;    // first byte to move to/from a mapper page
466    uint32_t   page_count;     // number of bytes to move to/from a mapper page
[606]467    uint32_t   page_id;        // current mapper page index
[313]468    uint32_t   done;           // number of moved bytes
[606]469    xptr_t     page_xp;        // extended pointer on current mapper page descriptor
[313]470
471    uint8_t  * src_ptr;        // source buffer local pointer
472    cxy_t      src_cxy;        // source cluster
473    uint8_t  * dst_ptr;        // destination buffer local pointer
474    cxy_t      dst_cxy;        // destination cluster
[330]475
[406]476    // get buffer cluster and local pointer
477    cxy_t     buffer_cxy = GET_CXY( buffer_xp );
[606]478    uint8_t * buffer_ptr = GET_PTR( buffer_xp );
[313]479
[606]480    // get mapper cluster
481    cxy_t     mapper_cxy = GET_CXY( mapper_xp );
482
[438]483#if DEBUG_MAPPER_MOVE_KERNEL
[606]484uint32_t   cycle = (uint32_t)hal_get_cycles();
485thread_t * this  = CURRENT_THREAD;
[438]486if( DEBUG_MAPPER_MOVE_KERNEL < cycle )
[606]487printk("\n[%s] thread[%x,%x] enter / to_buf %d / buf_cxy %x / buf_ptr %x / cycle %d\n",
488__FUNCTION__, this->process->pid, this->trdid, to_buffer, buffer_cxy, buffer_ptr, cycle );
[435]489#endif
[406]490
[313]491    // compute offsets of first and last bytes in file
492    uint32_t min_byte = file_offset;
493    uint32_t max_byte = file_offset + size -1;
494
495    // compute indexes for first and last pages in mapper
496    uint32_t first = min_byte >> CONFIG_PPM_PAGE_SHIFT;
497    uint32_t last  = max_byte >> CONFIG_PPM_PAGE_SHIFT;
498
[438]499#if (DEBUG_MAPPER_MOVE_KERNEL & 1)
500if( DEBUG_MAPPER_MOVE_KERNEL < cycle )
[606]501printk("\n[%s] first_page %d / last_page %d\n", __FUNCTION__, first, last );
[435]502#endif
[313]503
504    // compute source and destination clusters
505    if( to_buffer )
506    {
507        dst_cxy = buffer_cxy;
[606]508        src_cxy = mapper_cxy;
[313]509    }
510    else
511    {
512        src_cxy = buffer_cxy;
[606]513        dst_cxy = mapper_cxy;
[313]514    }
515
516    done = 0;
517
518    // loop on pages in mapper
[606]519    for( page_id = first ; page_id <= last ; page_id++ )
[313]520    {
521        // compute page_offset
[606]522        if( page_id == first ) page_offset = min_byte & CONFIG_PPM_PAGE_MASK;
523        else                   page_offset = 0;
[313]524
525        // compute number of bytes to move in page
[606]526        if      ( first == last  )   page_count = size;
527        else if ( page_id == first ) page_count = CONFIG_PPM_PAGE_SIZE - page_offset;
528        else if ( page_id == last  ) page_count = (max_byte & CONFIG_PPM_PAGE_MASK) + 1;
529        else                         page_count = CONFIG_PPM_PAGE_SIZE;
[313]530
[438]531#if (DEBUG_MAPPER_MOVE_KERNEL & 1)
532if( DEBUG_MAPPER_MOVE_KERNEL < cycle )
[606]533printk("\n[%s] page_id = %d / offset = %d / bytes = %d\n",
534__FUNCTION__ , page_id , page_offset , page_count );
[435]535#endif
[313]536
[606]537        // get extended pointer on page descriptor
538        page_xp = mapper_remote_get_page( mapper_xp , page_id );
[313]539
[606]540        if ( page_xp == XPTR_NULL ) return -1;
[313]541
[315]542        // get page base address
[606]543        xptr_t    base_xp  = ppm_page2base( page_xp );
[367]544        uint8_t * base_ptr = (uint8_t *)GET_PTR( base_xp );
[330]545
[313]546        // compute source and destination pointers
547        if( to_buffer )
548        {
[315]549            dst_ptr = buffer_ptr + done;
[367]550            src_ptr = base_ptr + page_offset;
[313]551        }
552        else
553        {
[315]554            src_ptr = buffer_ptr + done;
[367]555            dst_ptr = base_ptr + page_offset;
[313]556
[606]557            ppm_page_do_dirty( page_xp );
[313]558        }
559
[610]560#if (DEBUG_MAPPER_MOVE_KERNEL & 1)
561if( DEBUG_MAPPER_MOVE_KERNEL < cycle )
562printk("\n[%s] src_cxy %x / src_ptr %x / dst_cxy %x / dst_ptr %x\n",
563__FUNCTION__, src_cxy, src_ptr, dst_cxy, dst_ptr );
564#endif
565
[313]566        // move fragment
567        hal_remote_memcpy( XPTR( dst_cxy , dst_ptr ), XPTR( src_cxy , src_ptr ), page_count );
[330]568
[313]569        done += page_count;
570    }
571
[438]572#if DEBUG_MAPPER_MOVE_KERNEL
[435]573cycle = (uint32_t)hal_get_cycles();
[438]574if( DEBUG_MAPPER_MOVE_KERNEL < cycle )
[606]575printk("\n[%s] thread[%x,%x] exit / to_buf %d / buf_cxy %x / buf_ptr %x / cycle %d\n",
576__FUNCTION__, this->process->pid, this->trdid, to_buffer, buffer_cxy, buffer_ptr, cycle );
[435]577#endif
[313]578
579    return 0;
580
[406]581}  // end mapper_move_kernel()
[313]582
[606]583///////////////////////////////////////////////////
584error_t mapper_remote_get_32( xptr_t     mapper_xp,
585                              uint32_t   word_id,
586                              uint32_t * p_value )
587{
588    uint32_t   page_id;      // page index in file
589    uint32_t   local_id;     // word index in page
590    xptr_t     page_xp;      // extended pointer on searched page descriptor
591    xptr_t     base_xp;      // extended pointer on searched page base
592
593   
594    // get page index and local word index
595    page_id  = word_id >> 10;
596    local_id = word_id & 0x3FF;
597
598    // get page containing the searched word
599    page_xp  = mapper_remote_get_page( mapper_xp , page_id );
600
601    if( page_xp == XPTR_NULL )  return -1;
602   
603    // get page base
604    base_xp = ppm_page2base( page_xp );
605
606    // get the value from mapper
607    *p_value = hal_remote_l32( base_xp + (local_id<<2) ); 
608
609    return 0;
610
611}  // end mapper_remote_get_32()
612
613///////////////////////////////////////////////////
614error_t mapper_remote_set_32( xptr_t     mapper_xp,
615                              uint32_t   word_id,
616                              uint32_t   value )
617{
618   
619    uint32_t   page_id;      // page index in file
620    uint32_t   local_id;     // word index in page
621    xptr_t     page_xp;      // extended pointer on searched page descriptor
622    xptr_t     base_xp;      // extended pointer on searched page base
623
624    // get page index and local vord index
625    page_id  = word_id >> 10;
626    local_id = word_id & 0x3FF;
627
628    // get page containing the searched word
629    page_xp  = mapper_remote_get_page( mapper_xp , page_id );
630
631    if( page_xp == XPTR_NULL ) return -1;
632
633    // get page base
634    base_xp = ppm_page2base( page_xp );
635
636    // set value to mapper
637    hal_remote_s32( (base_xp + (local_id << 2)) , value );
638
639    // set the dirty flag
640    ppm_page_do_dirty( page_xp );
641
642    return 0;
643
644}  // end mapper_remote_set_32()
645
[623]646/////////////////////////////////////////
647error_t mapper_sync( mapper_t *  mapper )
648{
649    page_t   * page;                // local pointer on current page descriptor
650    xptr_t     page_xp;             // extended pointer on current page descriptor
651    grdxt_t  * rt;                  // pointer on radix_tree descriptor
652    uint32_t   start_key;           // start page index in mapper
653    uint32_t   found_key;           // current page index in mapper
654    error_t    error;
655
656#if DEBUG_MAPPER_SYNC
657thread_t * this  = CURRENT_THREAD;
658uint32_t   cycle = (uint32_t)hal_get_cycles();
659char       name[CONFIG_VFS_MAX_NAME_LENGTH];
660vfs_inode_get_name( XPTR( local_cxy , mapper->inode ) , name );
661#endif
662
663    // get pointer on radix tree
664    rt        = &mapper->rt;
665
666    // initialise loop variable
667    start_key = 0;
668
669    // scan radix-tree until last page found
670    while( 1 )
671    {
672        // get page descriptor from radix tree
673        page = (page_t *)grdxt_get_first( rt , start_key , &found_key );
674         
675        if( page == NULL ) break;
676
677assert( (page->index == found_key ), __FUNCTION__, "wrong page descriptor index" );
678assert( (page->order == 0),          __FUNCTION__, "mapper page order must be 0" );
679
680        // build extended pointer on page descriptor
681        page_xp = XPTR( local_cxy , page );
682
683        // synchronize page if dirty
684        if( (page->flags & PG_DIRTY) != 0 )
685        {
686
687#if DEBUG_MAPPER_SYNC
688if( cycle > DEBUG_MAPPER_SYNC )
689printk("\n[%s] thread[%x,%x] synchonise page %d of <%s> to device\n",
690__FUNCTION__, this->process->pid, this->trdid, page->index, name );
691#endif
692            // copy page to file system
693            error = vfs_fs_move_page( page_xp , IOC_WRITE );
694
695            if( error )
696            {
697                printk("\n[ERROR] in %s : cannot synchonize dirty page %d\n", 
698                __FUNCTION__, page->index );
699                return -1;
700            }
701
702            // remove page from PPM dirty list
703            ppm_page_undo_dirty( page_xp ); 
704        } 
705        else
706        {
707
708#if DEBUG_MAPPER_SYNC
709if( cycle > DEBUG_MAPPER_SYNC )
710printk("\n[%s] thread[%x,%x] skip page %d for <%s>\n",
711__FUNCTION__, this->process->pid, this->trdid, page->index, name );
712#endif
713        }
714
715        // update loop variable
716        start_key = page->index + 1;
717    }  // end while
718
719    return 0;
720
721}  // end mapper_sync()
722
[611]723//////////////////////////////////////////////////
724error_t mapper_display_page( xptr_t     mapper_xp,
725                             uint32_t   page_id,
[614]726                             uint32_t   nbytes )
[611]727{
[614]728    xptr_t        page_xp;        // extended pointer on page descriptor
729    xptr_t        base_xp;        // extended pointer on page base
730    char          buffer[4096];   // local buffer
731    uint32_t    * tabi;           // pointer on uint32_t to scan buffer
732    char        * tabc;           // pointer on char to scan buffer
733    uint32_t      line;           // line index
734    uint32_t      word;           // word index
735    uint32_t      n;              // char index
736    cxy_t         mapper_cxy;     // mapper cluster identifier
737    mapper_t    * mapper_ptr;     // mapper local pointer
738    vfs_inode_t * inode_ptr;      // inode local pointer
739 
740    char       name[CONFIG_VFS_MAX_NAME_LENGTH];
[606]741
[611]742    if( nbytes > 4096)
743    {
744        printk("\n[ERROR] in %s : nbytes (%d) cannot be larger than 4096\n",
745        __FUNCTION__, nbytes );
746        return -1;
747    }
748   
749    // get extended pointer on page descriptor
750    page_xp = mapper_remote_get_page( mapper_xp , page_id );
751
752    if( page_xp == XPTR_NULL)
753    {
754        printk("\n[ERROR] in %s : cannot access page %d in mapper\n",
755        __FUNCTION__, page_id );
756        return -1;
757    }
758
[614]759    // get cluster and local pointer
760    mapper_cxy = GET_CXY( mapper_xp );
761    mapper_ptr = GET_PTR( mapper_xp );
762
763    // get inode
764    inode_ptr = hal_remote_lpt( XPTR( mapper_cxy , &mapper_ptr->inode ) );
765
766    // get inode name
767    if( inode_ptr == NULL ) strcpy( name , "fat" );
768    else  vfs_inode_get_name( XPTR( mapper_cxy , inode_ptr ) , name );
769   
[611]770    // get extended pointer on page base
771    base_xp = ppm_page2base( page_xp );
772   
773    // copy remote page to local buffer
774    hal_remote_memcpy( XPTR( local_cxy , buffer ) , base_xp , nbytes );
775
776    // display 8 words per line
[614]777    tabi = (uint32_t *)buffer;
778    tabc = (char *)buffer;
779    printk("\n***** <%s> first %d bytes of page %d *****\n", name, nbytes, page_id );
[611]780    for( line = 0 ; line < (nbytes >> 5) ; line++ )
781    {
782        printk("%X : ", line );
[614]783        for( word = 0 ; word < 8 ; word++ ) printk("%X ", tabi[(line<<3) + word] );
784        printk(" | ");
785        for( n = 0 ; n < 32 ; n++ ) printk("%c", tabc[(line<<5) + n] );
[611]786        printk("\n");
787    }
788
789    return 0;
790
791}  // end mapper_display_page
792
793
Note: See TracBrowser for help on using the repository browser.