source: trunk/kernel/mm/mapper.c @ 631

Last change on this file since 631 was 628, checked in by alain, 6 years ago

Introduce teh page_min / page_max mechanism in the fatfs_release_inode()
function, to avoid to scan all pages in FAT mapper.

File size: 27.2 KB
Line 
1/*
2 * mapper.c - Kernel cache for FS files or directories implementation.
3 *
4 * Authors   Mohamed Lamine Karaoui (2015)
5 *           Alain Greiner (2016,2017,2018,2019)
6 *
7 * Copyright (c)  UPMC Sorbonne Universites
8 *
9 * This file is part of ALMOS-MKH.
10 *
11 * ALMOS-MKH is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; version 2.0 of the License.
14 *
15 * ALMOS-MKH is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with ALMOS-MKH; if not, write to the Free Software Foundation,
22 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
25#include <kernel_config.h>
26#include <hal_kernel_types.h>
27#include <hal_special.h>
28#include <hal_uspace.h>
29#include <grdxt.h>
30#include <string.h>
31#include <rwlock.h>
32#include <printk.h>
33#include <memcpy.h>
34#include <thread.h>
35#include <core.h>
36#include <process.h>
37#include <kmem.h>
38#include <kcm.h>
39#include <ppm.h>
40#include <page.h>
41#include <cluster.h>
42#include <vfs.h>
43#include <mapper.h>
44#include <dev_ioc.h>
45
46
47//////////////////////////////////////////////
48mapper_t * mapper_create( vfs_fs_type_t type )
49{
50    mapper_t * mapper;
51    kmem_req_t req;
52    error_t    error;
53
54    // allocate memory for mapper
55    req.type  = KMEM_MAPPER;
56    req.size  = sizeof(mapper_t);
57    req.flags = AF_KERNEL | AF_ZERO;
58    mapper    = (mapper_t *)kmem_alloc( &req );
59
60    if( mapper == NULL )
61    {
62        printk("\n[ERROR] in %s : no memory for mapper descriptor\n", __FUNCTION__ );
63        return NULL;
64    }
65
66    // initialize refcount & inode
67    mapper->refcount = 0;
68    mapper->inode    = NULL;
69
70    // initialize radix tree
71    error = grdxt_init( &mapper->rt,
72                        CONFIG_MAPPER_GRDXT_W1,
73                        CONFIG_MAPPER_GRDXT_W2,
74                        CONFIG_MAPPER_GRDXT_W3 );
75
76    if( error )
77    {
78        printk("\n[ERROR] in %s : cannot initialize radix tree\n", __FUNCTION__ );
79        req.type  = KMEM_MAPPER;
80        req.ptr   = mapper;
81        kmem_free( &req );
82        return NULL;
83    }
84
85    // initialize mapper type
86    mapper->type = type;
87
88    // initialize mapper lock
89    remote_rwlock_init( XPTR( local_cxy , &mapper->lock ) , LOCK_MAPPER_STATE );
90
91    // initialize waiting threads xlist (empty)
92    xlist_root_init( XPTR( local_cxy , &mapper->wait_root ) );
93
94    // initialize vsegs xlist (empty)
95    xlist_root_init( XPTR( local_cxy , &mapper->vsegs_root ) );
96
97    return mapper;
98
99}  // end mapper_create()
100
101////////////////////////////////////////
102void mapper_destroy( mapper_t * mapper )
103{
104    page_t   * page;
105    uint32_t   found_index = 0;
106    uint32_t   start_index = 0;
107    kmem_req_t req;
108
109    // scan radix tree
110    do
111    {
112        // get page from radix tree
113        page = (page_t *)grdxt_get_first( &mapper->rt , start_index , &found_index );
114
115        // release registered pages to PPM
116        if( page != NULL )
117        {
118            // remove page from mapper and release to PPM
119            mapper_release_page( mapper , page );
120
121            // update start_key value for next page
122            start_index = found_index;
123        }
124    }
125    while( page != NULL );
126
127    // release the memory allocated to radix tree itself
128    grdxt_destroy( &mapper->rt );
129
130    // release memory for mapper descriptor
131    req.type = KMEM_MAPPER;
132    req.ptr  = mapper;
133    kmem_free( &req );
134
135}  // end mapper_destroy()
136
137////////////////////////////////////////////////////
138xptr_t  mapper_remote_get_page( xptr_t    mapper_xp,
139                                uint32_t  page_id )
140{
141    error_t       error;
142    mapper_t    * mapper_ptr;
143    cxy_t         mapper_cxy;
144    xptr_t        lock_xp;        // extended pointer on mapper lock
145    xptr_t        page_xp;        // extended pointer on searched page descriptor
146    xptr_t        rt_xp;          // extended pointer on radix tree in mapper
147
148    thread_t * this = CURRENT_THREAD;
149
150    // get mapper cluster and local pointer
151    mapper_ptr = GET_PTR( mapper_xp );
152    mapper_cxy = GET_CXY( mapper_xp );
153
154#if DEBUG_MAPPER_GET_PAGE
155vfs_inode_t * inode = hal_remote_lpt( XPTR( mapper_cxy , &mapper_ptr->inode ) );
156uint32_t      cycle = (uint32_t)hal_get_cycles();
157char          name[CONFIG_VFS_MAX_NAME_LENGTH];
158if( (DEBUG_MAPPER_GET_PAGE < cycle) && (inode == NULL) )  // FAT mapper
159{
160    printk("\n[%s] thread[%x,%x] enter for page %d of FAT mapper / cycle %d\n",
161    __FUNCTION__, this->process->pid, this->trdid, page_id, cycle );
162}
163if( (DEBUG_MAPPER_GET_PAGE < cycle) && (inode != NULL) )  // file mapper
164{
165    vfs_inode_get_name( XPTR( mapper_cxy , inode ) , name );
166    printk("\n[%s] thread[%x,%x] enter for page %d of <%s> mapper / cycle %d\n",
167    __FUNCTION__, this->process->pid, this->trdid, page_id, name, cycle );
168}
169#endif
170
171    // check thread can yield
172    thread_assert_can_yield( this , __FUNCTION__ );
173
174    // build extended pointer on mapper lock and mapper rt
175    lock_xp  = XPTR( mapper_cxy , &mapper_ptr->lock );
176    rt_xp    = XPTR( mapper_cxy , &mapper_ptr->rt );
177
178    // take mapper lock in READ_MODE
179    remote_rwlock_rd_acquire( lock_xp );
180
181    // search page in radix tree
182    page_xp  = grdxt_remote_lookup( rt_xp , page_id );
183
184    // test mapper miss
185    if( page_xp == XPTR_NULL )                  // miss => try to handle it
186    {
187        // release the lock in READ_MODE and take it in WRITE_MODE
188        remote_rwlock_rd_release( lock_xp );
189        remote_rwlock_wr_acquire( lock_xp );
190
191        // second test on missing page because the page status can be modified
192        // by another thread, when passing from READ_MODE to WRITE_MODE.
193        // from this point there is no concurrent accesses to mapper.
194        page_xp = grdxt_remote_lookup( rt_xp , page_id );
195
196        if ( page_xp == XPTR_NULL )  // miss confirmed => handle it
197        {
198
199            if( mapper_cxy == local_cxy )   // mapper is local
200            {
201
202#if (DEBUG_MAPPER_GET_PAGE & 1)
203if( DEBUG_MAPPER_GET_PAGE < cycle )
204printk("\n[%s] missing page => load it from FS / local access \n", __FUNCTION__ );
205#endif
206                 error = mapper_handle_miss( mapper_ptr,
207                                             page_id, 
208                                             &page_xp );
209            } 
210            else
211            {
212
213#if (DEBUG_MAPPER_GET_PAGE & 1)
214if( DEBUG_MAPPER_GET_PAGE < cycle )
215printk("\n[%s] missing page => load it from FS / RPC access \n", __FUNCTION__ );
216#endif
217                 rpc_mapper_handle_miss_client( mapper_cxy,
218                                                mapper_ptr,
219                                                page_id,
220                                                &page_xp,
221                                                &error );
222            }
223
224            if ( error )
225            {
226                printk("\n[ERROR] in %s : thread[%x,%x] cannot handle mapper miss\n",
227                __FUNCTION__ , this->process->pid, this->trdid );
228                remote_rwlock_wr_release( lock_xp );
229                return XPTR_NULL;
230            }
231        }
232       
233        // release mapper lock from WRITE_MODE
234        remote_rwlock_wr_release( lock_xp );
235    }
236    else                                              // hit
237    {
238        // release mapper lock from READ_MODE
239        remote_rwlock_rd_release( lock_xp );
240    }
241
242#if DEBUG_MAPPER_GET_PAGE
243cycle = (uint32_t)hal_get_cycles();
244if( (DEBUG_MAPPER_GET_PAGE < cycle) && (inode != NULL) )
245{
246    printk("\n[%s] thread[%x,%x] exit for page %d of <%s> mapper / ppn %x / cycle %d\n",
247    __FUNCTION__, this->process->pid, this->trdid, page_id,
248    name, ppm_page2ppn(page_xp), cycle );
249}
250if( (DEBUG_MAPPER_GET_PAGE < cycle) && (inode == NULL) )
251{
252    printk("\n[%s] thread[%x,%x] exit for page %d of FAT mapper  / ppn %x / cycle %d\n",
253    __FUNCTION__, this->process->pid, this->trdid, page_id,
254    ppm_page2ppn(page_xp), cycle );
255}
256#endif
257
258    return page_xp;
259
260}  // end mapper_remote_get_page()
261
262//////////////////////////////////////////////
263error_t mapper_handle_miss( mapper_t * mapper,
264                            uint32_t   page_id,
265                            xptr_t   * page_xp )
266{
267    kmem_req_t   req;
268    page_t     * page;
269    error_t      error;
270
271    thread_t * this = CURRENT_THREAD;
272
273#if DEBUG_MAPPER_HANDLE_MISS
274uint32_t      cycle = (uint32_t)hal_get_cycles();
275char          name[CONFIG_VFS_MAX_NAME_LENGTH];
276vfs_inode_t * inode = mapper->inode;
277if( (DEBUG_MAPPER_HANDLE_MISS < cycle) && (inode != NULL) )
278{
279    vfs_inode_get_name( XPTR( local_cxy , inode ) , name );
280    printk("\n[%s] thread[%x,%x] enter for page %d in <%s> / cycle %d",
281    __FUNCTION__, this->process->pid, this->trdid, page_id, name, cycle );
282    if( DEBUG_MAPPER_HANDLE_MISS & 1 ) grdxt_display( XPTR(local_cxy,&mapper->rt), name );
283}
284if( (DEBUG_MAPPER_HANDLE_MISS < cycle) && (inode == NULL) )
285{
286    printk("\n[%s] thread[%x,%x] enter for page %d in FAT / cycle %d",
287    __FUNCTION__, this->process->pid, this->trdid, page_id, cycle );
288    if( DEBUG_MAPPER_HANDLE_MISS & 1 ) grdxt_display( XPTR(local_cxy,&mapper->rt), "FAT" );
289}
290#endif
291
292    // allocate one page from the local cluster
293    req.type  = KMEM_PAGE;
294    req.size  = 0;
295    req.flags = AF_NONE;
296    page = kmem_alloc( &req );
297
298    if( page == NULL )
299    {
300        printk("\n[ERROR] in %s : thread [%x,%x] cannot allocate page in cluster %x\n",
301        __FUNCTION__ , this->process->pid, this->trdid , local_cxy );
302        return -1;
303    }
304
305    // initialize the page descriptor
306    page_init( page );
307    page_set_flag( page , PG_INIT );
308    page_refcount_up( page );
309    page->mapper = mapper;
310    page->index  = page_id;
311
312    // insert page in mapper radix tree
313    error = grdxt_insert( &mapper->rt , page_id , page );
314
315    if( error )
316    {
317        printk("\n[ERROR] in %s : thread[%x,%x] cannot insert page in mapper\n",
318        __FUNCTION__ , this->process->pid, this->trdid );
319        mapper_release_page( mapper , page );
320        req.ptr  = page;
321        req.type = KMEM_PAGE;
322        kmem_free(&req);
323        return -1;
324    }
325
326    // launch I/O operation to load page from IOC device to mapper
327    error = vfs_fs_move_page( XPTR( local_cxy , page ) , IOC_SYNC_READ );
328
329    if( error )
330    {
331        printk("\n[ERROR] in %s : thread[%x,%x] cannot load page from device\n",
332        __FUNCTION__ , this->process->pid, this->trdid );
333        mapper_release_page( mapper , page );
334        req.ptr  = page;
335        req.type = KMEM_PAGE;
336        kmem_free( &req );
337        return -1;
338    }
339
340    // set extended pointer on allocated page
341    *page_xp = XPTR( local_cxy , page );
342
343#if DEBUG_MAPPER_HANDLE_MISS
344cycle = (uint32_t)hal_get_cycles();
345if( (DEBUG_MAPPER_HANDLE_MISS < cycle) && (inode != NULL) )
346{
347    printk("\n[%s] thread[%x,%x] exit for page %d in <%s> / ppn %x / cycle %d",
348    __FUNCTION__, this->process->pid, this->trdid,
349    page_id, name, ppm_page2ppn( *page_xp ), cycle );
350    if( DEBUG_MAPPER_HANDLE_MISS & 1 ) grdxt_display( XPTR(local_cxy,&mapper->rt) , name );
351}
352if( (DEBUG_MAPPER_HANDLE_MISS < cycle) && (inode == NULL) )
353{
354    printk("\n[%s] thread[%x,%x] exit for page %d in FAT / ppn %x / cycle %d",
355    __FUNCTION__, this->process->pid, this->trdid,
356    page_id, ppm_page2ppn( *page_xp ), cycle );
357    if( DEBUG_MAPPER_HANDLE_MISS & 1 ) grdxt_display( XPTR(local_cxy,&mapper->rt ), "FAT" );
358}
359#endif
360
361    return 0;
362
363}  // end mapper_handle_miss()
364
365////////////////////////////////////////////
366void mapper_release_page( mapper_t * mapper,
367                          page_t   * page )
368{
369    // build extended pointer on mapper lock
370    xptr_t mapper_lock_xp = XPTR( local_cxy , &mapper->lock );
371
372    // take mapper lock in WRITE_MODE
373    remote_rwlock_wr_acquire( mapper_lock_xp );
374
375    // remove physical page from radix tree
376    grdxt_remove( &mapper->rt , page->index );
377
378    // release mapper lock from WRITE_MODE
379    remote_rwlock_wr_release( mapper_lock_xp );
380
381    // release page to PPM
382    kmem_req_t   req;
383    req.type  = KMEM_PAGE;
384    req.ptr   = page;
385    kmem_free( &req );
386
387}  // end mapper_release_page()
388
389///////////////////////////////////////////////
390error_t mapper_move_user( xptr_t     mapper_xp,
391                          bool_t     to_buffer,
392                          uint32_t   file_offset,
393                          void     * buffer,
394                          uint32_t   size )
395{
396    uint32_t   page_offset;    // first byte to move to/from a mapper page
397    uint32_t   page_bytes;     // number of bytes to move to/from a mapper page
398    uint32_t   page_id;        // current mapper page index
399    uint32_t   done;           // number of moved bytes
400    xptr_t     page_xp;        // extended pointer on current mapper page descriptor
401
402#if DEBUG_MAPPER_MOVE_USER
403uint32_t      cycle      = (uint32_t)hal_get_cycles();
404thread_t    * this       = CURRENT_THREAD;
405cxy_t         mapper_cxy = GET_CXY( mapper_xp );
406mapper_t    * mapper_ptr = GET_PTR( mapper_xp );
407vfs_inode_t * inode_ptr  = hal_remote_lpt( XPTR( mapper_cxy , &mapper_ptr->inode ) );
408xptr_t        inode_xp   = XPTR( mapper_cxy , inode_ptr );
409char          name[CONFIG_VFS_MAX_NAME_LENGTH];
410vfs_inode_get_name( inode_xp , name );
411if( DEBUG_MAPPER_MOVE_USER < cycle )
412{
413    if( to_buffer )
414    printk("\n[%s] thread[%x,%x] : mapper(%s) -> buffer(%x) / bytes %d / cycle %d\n",
415    __FUNCTION__, this->process->pid, this->trdid, name, buffer, size, cycle );
416    else
417    printk("\n[%s] thread[%x,%x] : buffer(%x) -> mapper(%s) / bytes %d / cycle %d\n",
418    __FUNCTION__, this->process->pid, this->trdid, buffer, name, size, cycle );
419}
420#endif
421
422    // compute indexes of first and last bytes in file
423    uint32_t min_byte = file_offset;
424    uint32_t max_byte = file_offset + size - 1;
425
426    // compute indexes of pages for first and last byte in mapper
427    uint32_t first = min_byte >> CONFIG_PPM_PAGE_SHIFT;
428    uint32_t last  = max_byte >> CONFIG_PPM_PAGE_SHIFT;
429
430#if (DEBUG_MAPPER_MOVE_USER & 1)
431if( DEBUG_MAPPER_MOVE_USER < cycle )
432printk("\n[%s] thread[%x,%x] : mapper(%x,%x) / first_page %d / last_page %d\n",
433__FUNCTION__, this->process->pid, this->trdid, mapper_cxy, mapper_ptr, first, last );
434#endif
435
436    done = 0;
437
438    // loop on pages in mapper
439    for( page_id = first ; page_id <= last ; page_id++ )
440    {
441        // compute page_offset
442        if( page_id == first ) page_offset = min_byte & CONFIG_PPM_PAGE_MASK;
443        else                   page_offset = 0;
444
445        // compute number of bytes in page
446        if      ( first   == last  ) page_bytes = size;
447        else if ( page_id == first ) page_bytes = CONFIG_PPM_PAGE_SIZE - page_offset;
448        else if ( page_id == last  ) page_bytes = (max_byte & CONFIG_PPM_PAGE_MASK) + 1;
449        else                         page_bytes = CONFIG_PPM_PAGE_SIZE;
450
451#if (DEBUG_MAPPER_MOVE_USER & 1)
452if( DEBUG_MAPPER_MOVE_USER < cycle )
453printk("\n[%s] thread[%x,%x] : page_id %d / page_offset %d / bytes %d\n",
454__FUNCTION__, this->process->pid, this->trdid, page_id , page_offset , page_bytes );
455#endif
456
457        // get extended pointer on page descriptor in mapper
458        page_xp = mapper_remote_get_page( mapper_xp , page_id ); 
459
460        if ( page_xp == XPTR_NULL ) return -1;
461
462        // compute cluster and pointers on page in mapper
463        xptr_t     map_xp  = ppm_page2base( page_xp );
464        uint8_t  * map_ptr = GET_PTR( map_xp );
465        cxy_t      map_cxy = GET_CXY( map_xp );
466
467#if (DEBUG_MAPPER_MOVE_USER & 1)
468if( DEBUG_MAPPER_MOVE_USER < cycle )
469printk("\n[%s] thread[%x,%x] : get buffer(%x,%x) in mapper\n",
470__FUNCTION__, this->process->pid, this->trdid, map_cxy, map_ptr );
471#endif
472        // compute pointer in user buffer
473        uint8_t * buf_ptr = (uint8_t *)buffer + done;
474
475        // move fragment
476        if( to_buffer )
477        {
478            hal_copy_to_uspace( map_cxy , map_ptr + page_offset , buf_ptr , page_bytes ); 
479
480#if DEBUG_MAPPER_MOVE_USER & 1
481if( DEBUG_MAPPER_MOVE_USER < cycle )
482printk("\n[%s] thread[%x,%x] moved %d bytes / mapper %s (%x,%x) -> user buffer(%x,%x)\n",
483__FUNCTION__, this->process->pid, this->trdid, page_bytes,
484name, map_cxy, map_ptr + page_offset, local_cxy, buf_ptr );
485#endif
486
487        }
488        else
489        {
490            ppm_page_do_dirty( page_xp ); 
491            hal_copy_from_uspace( map_cxy , map_ptr + page_offset , buf_ptr , page_bytes ); 
492
493#if DEBUG_MAPPER_MOVE_USER & 1
494if( DEBUG_MAPPER_MOVE_USER < cycle )
495printk("\n[%s] thread[%x,%x] moved %d bytes / user buffer(%x,%x) -> mapper %s (%x,%x)\n",
496__FUNCTION__, this->process->pid, this->trdid, page_bytes,
497local_cxy, buf_ptr, name, map_cxy, map_ptr + page_offset );
498mapper_display_page(  mapper_xp , page_id, 128 );
499#endif
500
501        }
502
503        done += page_bytes;
504    }
505
506#if DEBUG_MAPPER_MOVE_USER
507cycle      = (uint32_t)hal_get_cycles();
508if( DEBUG_MAPPER_MOVE_USER < cycle )
509{
510    if( to_buffer )
511    printk("\n[%s] thread[%x,%x] completed mapper(%s) -> buffer(%x) / cycle %d\n",
512    __FUNCTION__, this->process->pid, this->trdid, name, buffer, cycle );
513    else
514    printk("\n[%s] thread[%x,%x] completed buffer(%x) -> mapper(%s) / cycle %d\n",
515    __FUNCTION__, this->process->pid, this->trdid, buffer, name, cycle );
516}
517#endif
518
519    return 0;
520
521}  // end mapper_move_user()
522
523////////////////////////////////////////////////
524error_t mapper_move_kernel( xptr_t    mapper_xp,
525                            bool_t    to_buffer,
526                            uint32_t  file_offset,
527                            xptr_t    buffer_xp,
528                            uint32_t  size )
529{
530    uint32_t   page_offset;    // first byte to move to/from a mapper page
531    uint32_t   page_bytes;     // number of bytes to move to/from a mapper page
532    uint32_t   page_id;        // current mapper page index
533    uint32_t   done;           // number of moved bytes
534    xptr_t     page_xp;        // extended pointer on current mapper page descriptor
535
536    uint8_t  * src_ptr;        // source buffer local pointer
537    cxy_t      src_cxy;        // source cluster
538    uint8_t  * dst_ptr;        // destination buffer local pointer
539    cxy_t      dst_cxy;        // destination cluster
540
541    // get buffer cluster and local pointer
542    cxy_t     buffer_cxy = GET_CXY( buffer_xp );
543    uint8_t * buffer_ptr = GET_PTR( buffer_xp );
544
545    // get mapper cluster
546    cxy_t     mapper_cxy = GET_CXY( mapper_xp );
547
548#if DEBUG_MAPPER_MOVE_KERNEL
549char          name[CONFIG_VFS_MAX_NAME_LENGTH];
550uint32_t      cycle  = (uint32_t)hal_get_cycles();
551thread_t    * this   = CURRENT_THREAD;
552mapper_t    * mapper = GET_PTR( mapper_xp );
553vfs_inode_t * inode  = hal_remote_lpt( XPTR( mapper_cxy , &mapper->inode ) );
554vfs_inode_get_name( XPTR( mapper_cxy , inode ) , name );
555if( DEBUG_MAPPER_MOVE_KERNEL < cycle )
556printk("\n[%s] thread[%x,%x] enter / %d bytes / offset %d / mapper <%s> / cycle %d\n",
557__FUNCTION__, this->process->pid, this->trdid, size, file_offset, name, cycle );
558#endif
559
560    // compute offsets of first and last bytes in file
561    uint32_t min_byte = file_offset;
562    uint32_t max_byte = file_offset + size -1;
563
564    // compute indexes for first and last pages in mapper
565    uint32_t first = min_byte >> CONFIG_PPM_PAGE_SHIFT;
566    uint32_t last  = max_byte >> CONFIG_PPM_PAGE_SHIFT;
567
568    // compute source and destination clusters
569    if( to_buffer )
570    {
571        dst_cxy = buffer_cxy;
572        src_cxy = mapper_cxy;
573    }
574    else
575    {
576        src_cxy = buffer_cxy;
577        dst_cxy = mapper_cxy;
578    }
579
580    done = 0;
581
582    // loop on pages in mapper
583    for( page_id = first ; page_id <= last ; page_id++ )
584    {
585        // compute page_offset
586        if( page_id == first ) page_offset = min_byte & CONFIG_PPM_PAGE_MASK;
587        else                   page_offset = 0;
588
589        // compute number of bytes to move in page
590        if      ( first == last  )   page_bytes = size;
591        else if ( page_id == first ) page_bytes = CONFIG_PPM_PAGE_SIZE - page_offset;
592        else if ( page_id == last  ) page_bytes = (max_byte & CONFIG_PPM_PAGE_MASK) + 1;
593        else                         page_bytes = CONFIG_PPM_PAGE_SIZE;
594
595        // get extended pointer on page descriptor
596        page_xp = mapper_remote_get_page( mapper_xp , page_id );
597
598        if ( page_xp == XPTR_NULL ) return -1;
599
600        // get page base address
601        xptr_t    base_xp  = ppm_page2base( page_xp );
602        uint8_t * base_ptr = (uint8_t *)GET_PTR( base_xp );
603
604        // compute source and destination pointers
605        if( to_buffer )
606        {
607            dst_ptr = buffer_ptr + done;
608            src_ptr = base_ptr + page_offset;
609        }
610        else
611        {
612            src_ptr = buffer_ptr + done;
613            dst_ptr = base_ptr + page_offset;
614
615            ppm_page_do_dirty( page_xp );
616        }
617
618#if (DEBUG_MAPPER_MOVE_KERNEL & 1)
619if( DEBUG_MAPPER_MOVE_KERNEL < cycle )
620{
621    if( to_buffer )
622    printk("\n[%s] mapper <%s> page %d => buffer(%x,%x) / %d bytes\n",
623    __FUNCTION__, name, page_id, dst_cxy, dst_ptr, page_bytes );
624    else
625    printk("\n[%s] buffer(%x,%x) => mapper <%s> page %d / %d bytes\n",
626    __FUNCTION__, src_cxy, src_ptr, name, page_id, page_bytes );
627}
628#endif
629
630        // move fragment
631        hal_remote_memcpy( XPTR( dst_cxy , dst_ptr ), XPTR( src_cxy , src_ptr ), page_bytes );
632
633        done += page_bytes;
634    }
635
636#if DEBUG_MAPPER_MOVE_KERNEL
637cycle  = (uint32_t)hal_get_cycles();
638if( DEBUG_MAPPER_MOVE_KERNEL < cycle )
639printk("\n[%s] thread[%x,%x] exit / cycle %d\n",
640__FUNCTION__, this->process->pid, this->trdid, cycle );
641#endif
642
643    return 0;
644
645}  // end mapper_move_kernel()
646
647///////////////////////////////////////////////////
648error_t mapper_remote_get_32( xptr_t     mapper_xp,
649                              uint32_t   page_id,
650                              uint32_t   word_id,
651                              uint32_t * value )
652{
653    xptr_t     page_xp;      // extended pointer on searched page descriptor
654    xptr_t     base_xp;      // extended pointer on searched page base
655   
656    // get page containing the searched word
657    page_xp  = mapper_remote_get_page( mapper_xp , page_id );
658
659    if( page_xp == XPTR_NULL )  return -1;
660   
661    // get page base
662    base_xp = ppm_page2base( page_xp );
663
664    // get the value from mapper
665    *value = hal_remote_l32( base_xp + (word_id<<2) ); 
666
667    return 0;
668
669}  // end mapper_remote_get_32()
670
671///////////////////////////////////////////////////
672error_t mapper_remote_set_32( xptr_t     mapper_xp,
673                              uint32_t   page_id,
674                              uint32_t   word_id,
675                              uint32_t   value )
676{
677    xptr_t     page_xp;      // extended pointer on searched page descriptor
678    xptr_t     base_xp;      // extended pointer on searched page base
679
680    // get page containing the searched word
681    page_xp  = mapper_remote_get_page( mapper_xp , page_id );
682
683    if( page_xp == XPTR_NULL ) return -1;
684
685    // get page base
686    base_xp = ppm_page2base( page_xp );
687
688    // set value to mapper
689    hal_remote_s32( (base_xp + (word_id << 2)) , value );
690
691    // set the dirty flag in page descriptor
692    ppm_page_do_dirty( page_xp );
693
694    return 0;
695
696}  // end mapper_remote_set_32()
697
698/////////////////////////////////////////
699error_t mapper_sync( mapper_t *  mapper )
700{
701    page_t   * page;                // local pointer on current page descriptor
702    xptr_t     page_xp;             // extended pointer on current page descriptor
703    grdxt_t  * rt;                  // pointer on radix_tree descriptor
704    uint32_t   start_key;           // start page index in mapper
705    uint32_t   found_key;           // current page index in mapper
706    error_t    error;
707
708#if DEBUG_MAPPER_SYNC
709thread_t * this  = CURRENT_THREAD;
710uint32_t   cycle = (uint32_t)hal_get_cycles();
711char       name[CONFIG_VFS_MAX_NAME_LENGTH];
712vfs_inode_get_name( XPTR( local_cxy , mapper->inode ) , name );
713#endif
714
715    // get pointer on radix tree
716    rt = &mapper->rt;
717
718    // initialise loop variable
719    start_key = 0;
720
721    // scan radix-tree until last page found
722    while( 1 )
723    {
724        // get page descriptor from radix tree
725        page = (page_t *)grdxt_get_first( rt , start_key , &found_key );
726         
727        if( page == NULL ) break;
728
729assert( (page->index == found_key ), "wrong page descriptor index" );
730assert( (page->order == 0),          "mapper page order must be 0" );
731
732        // build extended pointer on page descriptor
733        page_xp = XPTR( local_cxy , page );
734
735        // synchronize page if dirty
736        if( (page->flags & PG_DIRTY) != 0 )
737        {
738
739#if DEBUG_MAPPER_SYNC
740if( cycle > DEBUG_MAPPER_SYNC )
741printk("\n[%s] thread[%x,%x] synchonise page %d of <%s> to IOC device\n",
742__FUNCTION__, this->process->pid, this->trdid, page->index, name );
743#endif
744            // copy page to file system
745            error = vfs_fs_move_page( page_xp , IOC_WRITE );
746
747            if( error )
748            {
749                printk("\n[ERROR] in %s : cannot synchonize dirty page %d\n", 
750                __FUNCTION__, page->index );
751                return -1;
752            }
753
754            // remove page from PPM dirty list
755            ppm_page_undo_dirty( page_xp ); 
756        } 
757        else
758        {
759
760#if DEBUG_MAPPER_SYNC
761if( cycle > DEBUG_MAPPER_SYNC )
762printk("\n[%s] thread[%x,%x] skip page %d for <%s>\n",
763__FUNCTION__, this->process->pid, this->trdid, page->index, name );
764#endif
765        }
766
767        // update loop variable
768        start_key = page->index + 1;
769    }  // end while
770
771    return 0;
772
773}  // end mapper_sync()
774
775//////////////////////////////////////////////////
776error_t mapper_display_page( xptr_t     mapper_xp,
777                             uint32_t   page_id,
778                             uint32_t   nbytes )
779{
780    xptr_t        page_xp;        // extended pointer on page descriptor
781    xptr_t        base_xp;        // extended pointer on page base
782    char          buffer[4096];   // local buffer
783    uint32_t    * tabi;           // pointer on uint32_t to scan buffer
784    uint32_t      line;           // line index
785    uint32_t      word;           // word index
786    cxy_t         mapper_cxy;     // mapper cluster identifier
787    mapper_t    * mapper_ptr;     // mapper local pointer
788    vfs_inode_t * inode_ptr;      // inode local pointer
789 
790    char       name[CONFIG_VFS_MAX_NAME_LENGTH];
791
792    if( nbytes > 4096)
793    {
794        printk("\n[ERROR] in %s : nbytes (%d) cannot be larger than 4096\n",
795        __FUNCTION__, nbytes );
796        return -1;
797    }
798   
799    // get extended pointer on page descriptor
800    page_xp = mapper_remote_get_page( mapper_xp , page_id );
801
802    if( page_xp == XPTR_NULL)
803    {
804        printk("\n[ERROR] in %s : cannot access page %d in mapper\n",
805        __FUNCTION__, page_id );
806        return -1;
807    }
808
809    // get cluster and local pointer
810    mapper_cxy = GET_CXY( mapper_xp );
811    mapper_ptr = GET_PTR( mapper_xp );
812
813    // get inode
814    inode_ptr = hal_remote_lpt( XPTR( mapper_cxy , &mapper_ptr->inode ) );
815
816    // get inode name
817    if( inode_ptr == NULL ) strcpy( name , "fat" );
818    else  vfs_inode_get_name( XPTR( mapper_cxy , inode_ptr ) , name );
819   
820    // get extended pointer on page base
821    base_xp = ppm_page2base( page_xp );
822   
823    // copy remote page to local buffer
824    hal_remote_memcpy( XPTR( local_cxy , buffer ) , base_xp , nbytes );
825
826    // display 8 words per line
827    tabi = (uint32_t *)buffer;
828    printk("\n***** mapper <%s> / %d bytes in page %d (%x,%x)\n",
829    name, nbytes, page_id, GET_CXY(base_xp), GET_PTR(base_xp) );
830    for( line = 0 ; line < (nbytes >> 5) ; line++ )
831    {
832        printk("%X : ", line << 5 );
833        for( word = 0 ; word < 8 ; word++ ) printk("%X ", tabi[(line<<3) + word] );
834        printk("\n");
835    }
836
837    return 0;
838
839}  // end mapper_display_page
840
841
Note: See TracBrowser for help on using the repository browser.