source: trunk/kernel/mm/mapper.c @ 623

Last change on this file since 623 was 623, checked in by alain, 6 years ago

Introduce three new types of vsegs (KCODE,KDATA,KDEV)
to map the kernel vsegs in the process VSL and GPT.
This now used by both the TSAR and the I86 architectures.

File size: 25.0 KB
Line 
1/*
2 * mapper.c - Kernel cache for FS files or directories implementation.
3 *
4 * Authors   Mohamed Lamine Karaoui (2015)
5 *           Alain Greiner (2016,2017,2018,2019)
6 *
7 * Copyright (c)  UPMC Sorbonne Universites
8 *
9 * This file is part of ALMOS-MKH.
10 *
11 * ALMOS-MKH is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; version 2.0 of the License.
14 *
15 * ALMOS-MKH is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with ALMOS-MKH; if not, write to the Free Software Foundation,
22 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
25#include <kernel_config.h>
26#include <hal_kernel_types.h>
27#include <hal_special.h>
28#include <hal_uspace.h>
29#include <grdxt.h>
30#include <string.h>
31#include <rwlock.h>
32#include <printk.h>
33#include <memcpy.h>
34#include <thread.h>
35#include <core.h>
36#include <process.h>
37#include <kmem.h>
38#include <kcm.h>
39#include <ppm.h>
40#include <page.h>
41#include <cluster.h>
42#include <vfs.h>
43#include <mapper.h>
44#include <dev_ioc.h>
45
46
47//////////////////////////////////////////////
48mapper_t * mapper_create( vfs_fs_type_t type )
49{
50    mapper_t * mapper;
51    kmem_req_t req;
52    error_t    error;
53
54    // allocate memory for mapper
55    req.type  = KMEM_MAPPER;
56    req.size  = sizeof(mapper_t);
57    req.flags = AF_KERNEL | AF_ZERO;
58    mapper    = (mapper_t *)kmem_alloc( &req );
59
60    if( mapper == NULL )
61    {
62        printk("\n[ERROR] in %s : no memory for mapper descriptor\n", __FUNCTION__ );
63        return NULL;
64    }
65
66    // initialize refcount & inode
67    mapper->refcount = 0;
68    mapper->inode    = NULL;
69
70    // initialize radix tree
71    error = grdxt_init( &mapper->rt,
72                        CONFIG_MAPPER_GRDXT_W1,
73                        CONFIG_MAPPER_GRDXT_W2,
74                        CONFIG_MAPPER_GRDXT_W3 );
75
76    if( error )
77    {
78        printk("\n[ERROR] in %s : cannot initialize radix tree\n", __FUNCTION__ );
79        req.type  = KMEM_MAPPER;
80        req.ptr   = mapper;
81        kmem_free( &req );
82        return NULL;
83    }
84
85    // initialize mapper type
86    mapper->type = type;
87
88    // initialize mapper lock
89    remote_rwlock_init( XPTR( local_cxy , &mapper->lock ) , LOCK_MAPPER_STATE );
90
91    // initialize waiting threads xlist (empty)
92    xlist_root_init( XPTR( local_cxy , &mapper->wait_root ) );
93
94    // initialize vsegs xlist (empty)
95    xlist_root_init( XPTR( local_cxy , &mapper->vsegs_root ) );
96
97    return mapper;
98
99}  // end mapper_create()
100
101////////////////////////////////////////
102void mapper_destroy( mapper_t * mapper )
103{
104    page_t   * page;
105    uint32_t   found_index = 0;
106    uint32_t   start_index = 0;
107    kmem_req_t req;
108
109    // scan radix tree
110    do
111    {
112        // get page from radix tree
113        page = (page_t *)grdxt_get_first( &mapper->rt , start_index , &found_index );
114
115        // release registered pages to PPM
116        if( page != NULL )
117        {
118            // remove page from mapper and release to PPM
119            mapper_release_page( mapper , page );
120
121            // update start_key value for next page
122            start_index = found_index;
123        }
124    }
125    while( page != NULL );
126
127    // release the memory allocated to radix tree itself
128    grdxt_destroy( &mapper->rt );
129
130    // release memory for mapper descriptor
131    req.type = KMEM_MAPPER;
132    req.ptr  = mapper;
133    kmem_free( &req );
134
135}  // end mapper_destroy()
136
137////////////////////////////////////////////////////
138xptr_t  mapper_remote_get_page( xptr_t    mapper_xp,
139                                uint32_t  page_id )
140{
141    error_t       error;
142    mapper_t    * mapper_ptr;
143    cxy_t         mapper_cxy;
144    xptr_t        lock_xp;        // extended pointer on mapper lock
145    xptr_t        page_xp;        // extended pointer on searched page descriptor
146    xptr_t        rt_xp;          // extended pointer on radix tree in mapper
147
148    thread_t * this = CURRENT_THREAD;
149
150    // get mapper cluster and local pointer
151    mapper_ptr = GET_PTR( mapper_xp );
152    mapper_cxy = GET_CXY( mapper_xp );
153
154#if DEBUG_MAPPER_GET_PAGE
155uint32_t cycle = (uint32_t)hal_get_cycles();
156char          name[CONFIG_VFS_MAX_NAME_LENGTH];
157vfs_inode_t * inode = hal_remote_lpt( XPTR( mapper_cxy , &mapper_ptr->inode ) );
158vfs_inode_get_name( XPTR( mapper_cxy , inode ) , name );
159if( DEBUG_MAPPER_GET_PAGE < cycle )
160printk("\n[%s] thread [%x,%x] enter for page %d of <%s> / cycle %d\n",
161__FUNCTION__, this->process->pid, this->trdid, page_id, name, cycle );
162#endif
163
164    // check thread can yield
165    thread_assert_can_yield( this , __FUNCTION__ );
166
167    // build extended pointer on mapper lock and mapper rt
168    lock_xp  = XPTR( mapper_cxy , &mapper_ptr->lock );
169    rt_xp    = XPTR( mapper_cxy , &mapper_ptr->rt );
170
171    // take mapper lock in READ_MODE
172    remote_rwlock_rd_acquire( lock_xp );
173
174    // search page in radix tree
175    page_xp  = grdxt_remote_lookup( rt_xp , page_id );
176
177    // test mapper miss
178    if( page_xp == XPTR_NULL )                  // miss => try to handle it
179    {
180        // release the lock in READ_MODE and take it in WRITE_MODE
181        remote_rwlock_rd_release( lock_xp );
182        remote_rwlock_wr_acquire( lock_xp );
183
184        // second test on missing page because the page status can be modified
185        // by another thread, when passing from READ_MODE to WRITE_MODE.
186        // from this point there is no concurrent accesses to mapper.
187        page_xp = grdxt_remote_lookup( rt_xp , page_id );
188
189        if ( page_xp == XPTR_NULL )  // miss confirmed => handle it
190        {
191
192            if( mapper_cxy == local_cxy )   // mapper is local
193            {
194
195#if (DEBUG_MAPPER_GET_PAGE & 1)
196if( DEBUG_MAPPER_GET_PAGE < cycle )
197printk("\n[%s] missing page => load it from FS / local access \n", __FUNCTION__ );
198#endif
199                 error = mapper_handle_miss( mapper_ptr,
200                                             page_id, 
201                                             &page_xp );
202            } 
203            else
204            {
205
206#if (DEBUG_MAPPER_GET_PAGE & 1)
207if( DEBUG_MAPPER_GET_PAGE < cycle )
208printk("\n[%s] missing page => load it from FS / RPC access \n", __FUNCTION__ );
209#endif
210                 rpc_mapper_handle_miss_client( mapper_cxy,
211                                                mapper_ptr,
212                                                page_id,
213                                                &page_xp,
214                                                &error );
215            }
216
217            if ( error )
218            {
219                printk("\n[ERROR] in %s : thread[%x,%x] cannot handle mapper miss\n",
220                __FUNCTION__ , this->process->pid, this->trdid );
221                remote_rwlock_wr_release( lock_xp );
222                return XPTR_NULL;
223            }
224        }
225       
226        // release mapper lock from WRITE_MODE
227        remote_rwlock_wr_release( lock_xp );
228    }
229    else                                              // hit
230    {
231        // release mapper lock from READ_MODE
232        remote_rwlock_rd_release( lock_xp );
233    }
234
235#if DEBUG_MAPPER_GET_PAGE
236cycle = (uint32_t)hal_get_cycles();
237if( DEBUG_MAPPER_GET_PAGE < cycle )
238printk("\n[%s] thread[%x,%x] exit for page %d of <%s> / ppn %x / cycle %d\n",
239__FUNCTION__, this->process->pid, this->trdid, 
240page_id, name, ppm_page2ppn( page_xp ), cycle );
241#endif
242
243    return page_xp;
244
245}  // end mapper_remote_get_page()
246
247//////////////////////////////////////////////
248error_t mapper_handle_miss( mapper_t * mapper,
249                            uint32_t   page_id,
250                            xptr_t   * page_xp )
251{
252    kmem_req_t   req;
253    page_t     * page;
254    error_t      error;
255
256    thread_t * this = CURRENT_THREAD;
257
258#if DEBUG_MAPPER_HANDLE_MISS
259uint32_t cycle = (uint32_t)hal_get_cycles();
260char          name[CONFIG_VFS_MAX_NAME_LENGTH];
261vfs_inode_t * inode = mapper->inode;
262vfs_inode_get_name( XPTR( local_cxy , inode ) , name );
263if( DEBUG_MAPPER_HANDLE_MISS < cycle )
264printk("\n[%s] enter for page %d in <%s> / cycle %d",
265__FUNCTION__, page_id, name, cycle );
266if( DEBUG_MAPPER_HANDLE_MISS & 1 )
267grdxt_display( XPTR( local_cxy , &mapper->rt ) , name );
268#endif
269
270    // allocate one page from the local cluster
271    req.type  = KMEM_PAGE;
272    req.size  = 0;
273    req.flags = AF_NONE;
274    page = kmem_alloc( &req );
275
276    if( page == NULL )
277    {
278        printk("\n[ERROR] in %s : thread [%x,%x] cannot allocate page in cluster %x\n",
279        __FUNCTION__ , this->process->pid, this->trdid , local_cxy );
280        return -1;
281    }
282
283    // initialize the page descriptor
284    page_init( page );
285    page_set_flag( page , PG_INIT );
286    page_refcount_up( page );
287    page->mapper = mapper;
288    page->index  = page_id;
289
290    // insert page in mapper radix tree
291    error = grdxt_insert( &mapper->rt , page_id , page );
292
293    if( error )
294    {
295        printk("\n[ERROR] in %s : thread[%x,%x] cannot insert page in mapper\n",
296        __FUNCTION__ , this->process->pid, this->trdid );
297        mapper_release_page( mapper , page );
298        req.ptr  = page;
299        req.type = KMEM_PAGE;
300        kmem_free(&req);
301        return -1;
302    }
303
304    // launch I/O operation to load page from device to mapper
305    error = vfs_fs_move_page( XPTR( local_cxy , page ) , IOC_SYNC_READ );
306
307    if( error )
308    {
309        printk("\n[ERROR] in %s : thread[%x,%x] cannot load page from device\n",
310        __FUNCTION__ , this->process->pid, this->trdid );
311        mapper_release_page( mapper , page );
312        req.ptr  = page;
313        req.type = KMEM_PAGE;
314        kmem_free( &req );
315        return -1;
316    }
317
318    // set extended pointer on allocated page
319    *page_xp = XPTR( local_cxy , page );
320
321#if DEBUG_MAPPER_HANDLE_MISS
322cycle = (uint32_t)hal_get_cycles();
323if( DEBUG_MAPPER_HANDLE_MISS < cycle )
324printk("\n[%s] exit for page %d in <%s> / ppn %x / cycle %d",
325__FUNCTION__, page_id, name, ppm_page2ppn( *page_xp ), cycle );
326if( DEBUG_MAPPER_HANDLE_MISS & 1 )
327grdxt_display( XPTR( local_cxy , &mapper->rt ) , name );
328#endif
329
330    return 0;
331
332}  // end mapper_handle_miss()
333
334////////////////////////////////////////////
335void mapper_release_page( mapper_t * mapper,
336                          page_t   * page )
337{
338    // build extended pointer on mapper lock
339    xptr_t mapper_lock_xp = XPTR( local_cxy , &mapper->lock );
340
341    // take mapper lock in WRITE_MODE
342    remote_rwlock_wr_acquire( mapper_lock_xp );
343
344    // remove physical page from radix tree
345    grdxt_remove( &mapper->rt , page->index );
346
347    // release mapper lock from WRITE_MODE
348    remote_rwlock_wr_release( mapper_lock_xp );
349
350    // release page to PPM
351    kmem_req_t   req;
352    req.type  = KMEM_PAGE;
353    req.ptr   = page;
354    kmem_free( &req );
355
356}  // end mapper_release_page()
357
358///////////////////////////////////////////////
359error_t mapper_move_user( xptr_t     mapper_xp,
360                          bool_t     to_buffer,
361                          uint32_t   file_offset,
362                          void     * buffer,
363                          uint32_t   size )
364{
365    uint32_t   page_offset;    // first byte to move to/from a mapper page
366    uint32_t   page_count;     // number of bytes to move to/from a mapper page
367    uint32_t   page_id;        // current mapper page index
368    uint32_t   done;           // number of moved bytes
369    xptr_t     page_xp;        // extended pointer on current mapper page descriptor
370
371#if DEBUG_MAPPER_MOVE_USER
372uint32_t   cycle = (uint32_t)hal_get_cycles();
373thread_t * this  = CURRENT_THREAD;
374if( DEBUG_MAPPER_MOVE_USER < cycle )
375printk("\n[%s] thread[%x,%x] : to_buf %d / buffer %x / size %d / offset %d / cycle %d\n",
376__FUNCTION__, this->process->pid, this->trdid,
377to_buffer, buffer, size, file_offset, cycle );
378#endif
379
380    // compute offsets of first and last bytes in file
381    uint32_t min_byte = file_offset;
382    uint32_t max_byte = file_offset + size - 1;
383
384    // compute indexes of pages for first and last byte in mapper
385    uint32_t first = min_byte >> CONFIG_PPM_PAGE_SHIFT;
386    uint32_t last  = max_byte >> CONFIG_PPM_PAGE_SHIFT;
387
388#if (DEBUG_MAPPER_MOVE_USER & 1)
389if( DEBUG_MAPPER_MOVE_USER < cycle )
390printk("\n[%s] thread[%x,%x] : first_page %d / last_page %d\n",
391__FUNCTION__, this->process->pid, this->trdid, first, last );
392#endif
393
394    done = 0;
395
396    // loop on pages in mapper
397    for( page_id = first ; page_id <= last ; page_id++ )
398    {
399        // compute page_offset
400        if( page_id == first ) page_offset = min_byte & CONFIG_PPM_PAGE_MASK;
401        else                   page_offset = 0;
402
403        // compute number of bytes in page
404        if      ( first   == last  ) page_count = size;
405        else if ( page_id == first ) page_count = CONFIG_PPM_PAGE_SIZE - page_offset;
406        else if ( page_id == last  ) page_count = (max_byte & CONFIG_PPM_PAGE_MASK) + 1;
407        else                         page_count = CONFIG_PPM_PAGE_SIZE;
408
409#if (DEBUG_MAPPER_MOVE_USER & 1)
410if( DEBUG_MAPPER_MOVE_USER < cycle )
411printk("\n[%s] thread[%x,%x] : page_id = %d / page_offset = %d / page_count = %d\n",
412__FUNCTION__, this->process->pid, this->trdid, page_id , page_offset , page_count );
413#endif
414
415        // get extended pointer on page descriptor
416        page_xp = mapper_remote_get_page( mapper_xp , page_id ); 
417
418        if ( page_xp == XPTR_NULL ) return -1;
419
420#if (DEBUG_MAPPER_MOVE_USER & 1)
421if( DEBUG_MAPPER_MOVE_USER < cycle )
422printk("\n[%s] thread[%x,%x] : get page (%x,%x) from mapper\n",
423__FUNCTION__, this->process->pid, this->trdid, GET_CXY(page_xp), GET_PTR(page_xp) );
424#endif
425
426        // compute pointer in mapper
427        xptr_t    base_xp = ppm_page2base( page_xp );
428        uint8_t * map_ptr = (uint8_t *)GET_PTR( base_xp ) + page_offset;
429
430        // compute pointer in buffer
431        uint8_t * buf_ptr = (uint8_t *)buffer + done;
432
433        // move fragment
434        if( to_buffer )
435        {
436            hal_copy_to_uspace( buf_ptr , map_ptr , page_count ); 
437        }
438        else
439        {
440            ppm_page_do_dirty( page_xp ); 
441            hal_copy_from_uspace( map_ptr , buf_ptr , page_count ); 
442
443putb(" in mapper_move_user()" , map_ptr , page_count );
444
445        }
446
447        done += page_count;
448    }
449
450#if DEBUG_MAPPER_MOVE_USER
451cycle = (uint32_t)hal_get_cycles();
452if( DEBUG_MAPPER_MOVE_USER < cycle )
453printk("\n[%s] thread[%x,%x] exit / cycle %d\n",
454__FUNCTION__, this->process->pid, this->trdid, cycle );
455#endif
456
457    return 0;
458
459}  // end mapper_move_user()
460
461////////////////////////////////////////////////
462error_t mapper_move_kernel( xptr_t    mapper_xp,
463                            bool_t    to_buffer,
464                            uint32_t  file_offset,
465                            xptr_t    buffer_xp,
466                            uint32_t  size )
467{
468    uint32_t   page_offset;    // first byte to move to/from a mapper page
469    uint32_t   page_count;     // number of bytes to move to/from a mapper page
470    uint32_t   page_id;        // current mapper page index
471    uint32_t   done;           // number of moved bytes
472    xptr_t     page_xp;        // extended pointer on current mapper page descriptor
473
474    uint8_t  * src_ptr;        // source buffer local pointer
475    cxy_t      src_cxy;        // source cluster
476    uint8_t  * dst_ptr;        // destination buffer local pointer
477    cxy_t      dst_cxy;        // destination cluster
478
479    // get buffer cluster and local pointer
480    cxy_t     buffer_cxy = GET_CXY( buffer_xp );
481    uint8_t * buffer_ptr = GET_PTR( buffer_xp );
482
483    // get mapper cluster
484    cxy_t     mapper_cxy = GET_CXY( mapper_xp );
485
486#if DEBUG_MAPPER_MOVE_KERNEL
487uint32_t   cycle = (uint32_t)hal_get_cycles();
488thread_t * this  = CURRENT_THREAD;
489if( DEBUG_MAPPER_MOVE_KERNEL < cycle )
490printk("\n[%s] thread[%x,%x] enter / to_buf %d / buf_cxy %x / buf_ptr %x / cycle %d\n",
491__FUNCTION__, this->process->pid, this->trdid, to_buffer, buffer_cxy, buffer_ptr, cycle );
492#endif
493
494    // compute offsets of first and last bytes in file
495    uint32_t min_byte = file_offset;
496    uint32_t max_byte = file_offset + size -1;
497
498    // compute indexes for first and last pages in mapper
499    uint32_t first = min_byte >> CONFIG_PPM_PAGE_SHIFT;
500    uint32_t last  = max_byte >> CONFIG_PPM_PAGE_SHIFT;
501
502#if (DEBUG_MAPPER_MOVE_KERNEL & 1)
503if( DEBUG_MAPPER_MOVE_KERNEL < cycle )
504printk("\n[%s] first_page %d / last_page %d\n", __FUNCTION__, first, last );
505#endif
506
507    // compute source and destination clusters
508    if( to_buffer )
509    {
510        dst_cxy = buffer_cxy;
511        src_cxy = mapper_cxy;
512    }
513    else
514    {
515        src_cxy = buffer_cxy;
516        dst_cxy = mapper_cxy;
517    }
518
519    done = 0;
520
521    // loop on pages in mapper
522    for( page_id = first ; page_id <= last ; page_id++ )
523    {
524        // compute page_offset
525        if( page_id == first ) page_offset = min_byte & CONFIG_PPM_PAGE_MASK;
526        else                   page_offset = 0;
527
528        // compute number of bytes to move in page
529        if      ( first == last  )   page_count = size;
530        else if ( page_id == first ) page_count = CONFIG_PPM_PAGE_SIZE - page_offset;
531        else if ( page_id == last  ) page_count = (max_byte & CONFIG_PPM_PAGE_MASK) + 1;
532        else                         page_count = CONFIG_PPM_PAGE_SIZE;
533
534#if (DEBUG_MAPPER_MOVE_KERNEL & 1)
535if( DEBUG_MAPPER_MOVE_KERNEL < cycle )
536printk("\n[%s] page_id = %d / offset = %d / bytes = %d\n",
537__FUNCTION__ , page_id , page_offset , page_count );
538#endif
539
540        // get extended pointer on page descriptor
541        page_xp = mapper_remote_get_page( mapper_xp , page_id );
542
543        if ( page_xp == XPTR_NULL ) return -1;
544
545        // get page base address
546        xptr_t    base_xp  = ppm_page2base( page_xp );
547        uint8_t * base_ptr = (uint8_t *)GET_PTR( base_xp );
548
549        // compute source and destination pointers
550        if( to_buffer )
551        {
552            dst_ptr = buffer_ptr + done;
553            src_ptr = base_ptr + page_offset;
554        }
555        else
556        {
557            src_ptr = buffer_ptr + done;
558            dst_ptr = base_ptr + page_offset;
559
560            ppm_page_do_dirty( page_xp );
561        }
562
563#if (DEBUG_MAPPER_MOVE_KERNEL & 1)
564if( DEBUG_MAPPER_MOVE_KERNEL < cycle )
565printk("\n[%s] src_cxy %x / src_ptr %x / dst_cxy %x / dst_ptr %x\n",
566__FUNCTION__, src_cxy, src_ptr, dst_cxy, dst_ptr );
567#endif
568
569        // move fragment
570        hal_remote_memcpy( XPTR( dst_cxy , dst_ptr ), XPTR( src_cxy , src_ptr ), page_count );
571
572        done += page_count;
573    }
574
575#if DEBUG_MAPPER_MOVE_KERNEL
576cycle = (uint32_t)hal_get_cycles();
577if( DEBUG_MAPPER_MOVE_KERNEL < cycle )
578printk("\n[%s] thread[%x,%x] exit / to_buf %d / buf_cxy %x / buf_ptr %x / cycle %d\n",
579__FUNCTION__, this->process->pid, this->trdid, to_buffer, buffer_cxy, buffer_ptr, cycle );
580#endif
581
582    return 0;
583
584}  // end mapper_move_kernel()
585
586///////////////////////////////////////////////////
587error_t mapper_remote_get_32( xptr_t     mapper_xp,
588                              uint32_t   word_id,
589                              uint32_t * p_value )
590{
591    uint32_t   page_id;      // page index in file
592    uint32_t   local_id;     // word index in page
593    xptr_t     page_xp;      // extended pointer on searched page descriptor
594    xptr_t     base_xp;      // extended pointer on searched page base
595
596   
597    // get page index and local word index
598    page_id  = word_id >> 10;
599    local_id = word_id & 0x3FF;
600
601    // get page containing the searched word
602    page_xp  = mapper_remote_get_page( mapper_xp , page_id );
603
604    if( page_xp == XPTR_NULL )  return -1;
605   
606    // get page base
607    base_xp = ppm_page2base( page_xp );
608
609    // get the value from mapper
610    *p_value = hal_remote_l32( base_xp + (local_id<<2) ); 
611
612    return 0;
613
614}  // end mapper_remote_get_32()
615
616///////////////////////////////////////////////////
617error_t mapper_remote_set_32( xptr_t     mapper_xp,
618                              uint32_t   word_id,
619                              uint32_t   value )
620{
621   
622    uint32_t   page_id;      // page index in file
623    uint32_t   local_id;     // word index in page
624    xptr_t     page_xp;      // extended pointer on searched page descriptor
625    xptr_t     base_xp;      // extended pointer on searched page base
626
627    // get page index and local vord index
628    page_id  = word_id >> 10;
629    local_id = word_id & 0x3FF;
630
631    // get page containing the searched word
632    page_xp  = mapper_remote_get_page( mapper_xp , page_id );
633
634    if( page_xp == XPTR_NULL ) return -1;
635
636    // get page base
637    base_xp = ppm_page2base( page_xp );
638
639    // set value to mapper
640    hal_remote_s32( (base_xp + (local_id << 2)) , value );
641
642    // set the dirty flag
643    ppm_page_do_dirty( page_xp );
644
645    return 0;
646
647}  // end mapper_remote_set_32()
648
649/////////////////////////////////////////
650error_t mapper_sync( mapper_t *  mapper )
651{
652    page_t   * page;                // local pointer on current page descriptor
653    xptr_t     page_xp;             // extended pointer on current page descriptor
654    grdxt_t  * rt;                  // pointer on radix_tree descriptor
655    uint32_t   start_key;           // start page index in mapper
656    uint32_t   found_key;           // current page index in mapper
657    error_t    error;
658
659#if DEBUG_MAPPER_SYNC
660thread_t * this  = CURRENT_THREAD;
661uint32_t   cycle = (uint32_t)hal_get_cycles();
662char       name[CONFIG_VFS_MAX_NAME_LENGTH];
663vfs_inode_get_name( XPTR( local_cxy , mapper->inode ) , name );
664#endif
665
666    // get pointer on radix tree
667    rt        = &mapper->rt;
668
669    // initialise loop variable
670    start_key = 0;
671
672    // scan radix-tree until last page found
673    while( 1 )
674    {
675        // get page descriptor from radix tree
676        page = (page_t *)grdxt_get_first( rt , start_key , &found_key );
677         
678        if( page == NULL ) break;
679
680assert( (page->index == found_key ), __FUNCTION__, "wrong page descriptor index" );
681assert( (page->order == 0),          __FUNCTION__, "mapper page order must be 0" );
682
683        // build extended pointer on page descriptor
684        page_xp = XPTR( local_cxy , page );
685
686        // synchronize page if dirty
687        if( (page->flags & PG_DIRTY) != 0 )
688        {
689
690#if DEBUG_MAPPER_SYNC
691if( cycle > DEBUG_MAPPER_SYNC )
692printk("\n[%s] thread[%x,%x] synchonise page %d of <%s> to device\n",
693__FUNCTION__, this->process->pid, this->trdid, page->index, name );
694#endif
695            // copy page to file system
696            error = vfs_fs_move_page( page_xp , IOC_WRITE );
697
698            if( error )
699            {
700                printk("\n[ERROR] in %s : cannot synchonize dirty page %d\n", 
701                __FUNCTION__, page->index );
702                return -1;
703            }
704
705            // remove page from PPM dirty list
706            ppm_page_undo_dirty( page_xp ); 
707        } 
708        else
709        {
710
711#if DEBUG_MAPPER_SYNC
712if( cycle > DEBUG_MAPPER_SYNC )
713printk("\n[%s] thread[%x,%x] skip page %d for <%s>\n",
714__FUNCTION__, this->process->pid, this->trdid, page->index, name );
715#endif
716        }
717
718        // update loop variable
719        start_key = page->index + 1;
720    }  // end while
721
722    return 0;
723
724}  // end mapper_sync()
725
726//////////////////////////////////////////////////
727error_t mapper_display_page( xptr_t     mapper_xp,
728                             uint32_t   page_id,
729                             uint32_t   nbytes )
730{
731    xptr_t        page_xp;        // extended pointer on page descriptor
732    xptr_t        base_xp;        // extended pointer on page base
733    char          buffer[4096];   // local buffer
734    uint32_t    * tabi;           // pointer on uint32_t to scan buffer
735    char        * tabc;           // pointer on char to scan buffer
736    uint32_t      line;           // line index
737    uint32_t      word;           // word index
738    uint32_t      n;              // char index
739    cxy_t         mapper_cxy;     // mapper cluster identifier
740    mapper_t    * mapper_ptr;     // mapper local pointer
741    vfs_inode_t * inode_ptr;      // inode local pointer
742 
743    char       name[CONFIG_VFS_MAX_NAME_LENGTH];
744
745    if( nbytes > 4096)
746    {
747        printk("\n[ERROR] in %s : nbytes (%d) cannot be larger than 4096\n",
748        __FUNCTION__, nbytes );
749        return -1;
750    }
751   
752    // get extended pointer on page descriptor
753    page_xp = mapper_remote_get_page( mapper_xp , page_id );
754
755    if( page_xp == XPTR_NULL)
756    {
757        printk("\n[ERROR] in %s : cannot access page %d in mapper\n",
758        __FUNCTION__, page_id );
759        return -1;
760    }
761
762    // get cluster and local pointer
763    mapper_cxy = GET_CXY( mapper_xp );
764    mapper_ptr = GET_PTR( mapper_xp );
765
766    // get inode
767    inode_ptr = hal_remote_lpt( XPTR( mapper_cxy , &mapper_ptr->inode ) );
768
769    // get inode name
770    if( inode_ptr == NULL ) strcpy( name , "fat" );
771    else  vfs_inode_get_name( XPTR( mapper_cxy , inode_ptr ) , name );
772   
773    // get extended pointer on page base
774    base_xp = ppm_page2base( page_xp );
775   
776    // copy remote page to local buffer
777    hal_remote_memcpy( XPTR( local_cxy , buffer ) , base_xp , nbytes );
778
779    // display 8 words per line
780    tabi = (uint32_t *)buffer;
781    tabc = (char *)buffer;
782    printk("\n***** <%s> first %d bytes of page %d *****\n", name, nbytes, page_id );
783    for( line = 0 ; line < (nbytes >> 5) ; line++ )
784    {
785        printk("%X : ", line );
786        for( word = 0 ; word < 8 ; word++ ) printk("%X ", tabi[(line<<3) + word] );
787        printk(" | ");
788        for( n = 0 ; n < 32 ; n++ ) printk("%c", tabc[(line<<5) + n] );
789        printk("\n");
790    }
791
792    return 0;
793
794}  // end mapper_display_page
795
796
Note: See TracBrowser for help on using the repository browser.