source: trunk/kernel/mm/mapper.c @ 676

Last change on this file since 676 was 672, checked in by alain, 4 years ago

1) Introduce up to 4 command lines arguments in the KSH "load" command.
These arguments are transfered to the user process through the
argc/argv mechanism, using the user space "args" vseg.

2) Introduce the named and anonymous "pipes", for inter-process communication
through the pipe() and mkfifo() syscalls.

3) Introduce the "chat" application to validate the two above mechanisms.

4) Improve printk() and assert() fonctions in printk.c.

File size: 31.4 KB
Line 
1/*
2 * mapper.c - Kernel cache for FS files or directories implementation.
3 *
4 * Authors   Mohamed Lamine Karaoui (2015)
5 *           Alain Greiner (2016,2017,2018,2019,2020)
6 *
7 * Copyright (c)  UPMC Sorbonne Universites
8 *
9 * This file is part of ALMOS-MKH.
10 *
11 * ALMOS-MKH is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; version 2.0 of the License.
14 *
15 * ALMOS-MKH is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with ALMOS-MKH; if not, write to the Free Software Foundation,
22 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
25#include <kernel_config.h>
26#include <hal_kernel_types.h>
27#include <hal_special.h>
28#include <hal_uspace.h>
29#include <hal_vmm.h>
30#include <grdxt.h>
31#include <string.h>
32#include <rwlock.h>
33#include <printk.h>
34#include <memcpy.h>
35#include <thread.h>
36#include <core.h>
37#include <process.h>
38#include <kmem.h>
39#include <kcm.h>
40#include <ppm.h>
41#include <page.h>
42#include <cluster.h>
43#include <vfs.h>
44#include <mapper.h>
45#include <dev_ioc.h>
46
47
48/////////////////////////////////////
49xptr_t  mapper_create( cxy_t     cxy,
50                       uint32_t  type )
51{
52    mapper_t * mapper_ptr;
53    kmem_req_t req;
54    error_t    error;
55
56    // allocate memory for mapper descriptor
57    req.type    = KMEM_KCM;
58    req.order   = bits_log2( sizeof(mapper_t) );
59    req.flags   = AF_KERNEL | AF_ZERO;
60    mapper_ptr  = kmem_remote_alloc( cxy , &req );
61
62    if( mapper_ptr == NULL )
63    {
64        printk("\n[ERROR] in %s : no memory for mapper descriptor\n", __FUNCTION__ );
65        return XPTR_NULL;
66    }
67
68    // initialize refcount and type
69    hal_remote_s32( XPTR( cxy , &mapper_ptr->refcount ) , 0 );
70    hal_remote_s32( XPTR( cxy , &mapper_ptr->fs_type )  , type );
71
72    // initialize radix tree
73    error = grdxt_remote_init( XPTR( cxy , &mapper_ptr->rt ),
74                               CONFIG_MAPPER_GRDXT_W1,
75                               CONFIG_MAPPER_GRDXT_W2,
76                               CONFIG_MAPPER_GRDXT_W3 );
77    if( error )
78    {
79        printk("\n[ERROR] in %s : cannot initialize radix tree\n", __FUNCTION__ );
80        req.type  = KMEM_KCM;
81        req.ptr   = mapper_ptr;
82        kmem_remote_free( cxy , &req );
83        return XPTR_NULL;
84    }
85
86    // initialize mapper lock
87    remote_rwlock_init( XPTR( cxy , &mapper_ptr->lock ) , LOCK_MAPPER_STATE );
88
89    // initialize waiting threads xlist (empty)
90    xlist_root_init( XPTR( cxy , &mapper_ptr->wait_root ) );
91
92    // initialize vsegs xlist (empty)
93    xlist_root_init( XPTR( cxy , &mapper_ptr->vsegs_root ) );
94
95    return XPTR( cxy , mapper_ptr );
96
97}  // end mapper_create()
98
99////////////////////////////////////////
100void mapper_destroy( xptr_t  mapper_xp )
101{
102    xptr_t     page_xp;
103    page_t   * page;
104    uint32_t   found_index = 0;
105    uint32_t   start_index = 0;
106    kmem_req_t req;
107
108    cxy_t      mapper_cxy = GET_CXY( mapper_xp ); 
109    mapper_t * mapper_ptr = GET_PTR( mapper_xp );
110
111    // build extended pointer on radix tree
112    xptr_t rt_xp = XPTR( mapper_cxy , &mapper_ptr->rt );
113
114    // scan radix tree
115    do
116    {
117        // get page from radix tree
118        page_xp = grdxt_remote_get_first( rt_xp,
119                                          start_index , 
120                                          &found_index );
121        page = GET_PTR( page_xp );
122       
123        // release registered pages to PPM
124        if( page != NULL )
125        {
126            // remove page from mapper and release to PPM
127            mapper_remote_release_page( mapper_xp , page );
128
129            // update start_key value for next page
130            start_index = found_index;
131        }
132    }
133    while( page != NULL );
134
135    // release the memory allocated to radix tree itself
136    grdxt_remote_destroy( rt_xp );
137
138    // release memory for mapper descriptor
139    req.type = KMEM_KCM;
140    req.ptr  = mapper_ptr;
141    kmem_remote_free( mapper_cxy , &req );
142
143}  // end mapper_destroy()
144
145/////////////////////////////////////////////////
146error_t mapper_handle_miss( xptr_t     mapper_xp,
147                            uint32_t   page_id,
148                            xptr_t   * page_xp_ptr )
149{
150    error_t    error;
151
152    uint32_t   inode_size = 0;   
153    uint32_t   inode_type = 0;
154
155    thread_t * this = CURRENT_THREAD;
156
157    // get target mapper cluster and local pointer
158    cxy_t         mapper_cxy = GET_CXY( mapper_xp );
159    mapper_t    * mapper_ptr = GET_PTR( mapper_xp );
160
161    // get inode pointer
162    vfs_inode_t * inode = hal_remote_lpt( XPTR( mapper_cxy , &mapper_ptr->inode ) );
163
164    // get inode size and type if relevant
165    if( inode != NULL )
166    {
167        inode_size = hal_remote_l32( XPTR( mapper_cxy , &inode->size ) );
168        inode_type = hal_remote_l32( XPTR( mapper_cxy , &inode->type ) );
169    }
170
171#if DEBUG_MAPPER_HANDLE_MISS
172uint32_t      cycle = (uint32_t)hal_get_cycles();
173char          name[CONFIG_VFS_MAX_NAME_LENGTH];
174if( (DEBUG_MAPPER_HANDLE_MISS < cycle) && (inode != NULL) )
175{
176    vfs_inode_get_name( XPTR( mapper_cxy , inode ) , name );
177    printk("\n[%s] thread[%x,%x] enter for page %d in <%s> / cxy %x / cycle %d\n",
178    __FUNCTION__, this->process->pid, this->trdid, page_id, name, mapper_cxy, cycle );
179}
180if( (DEBUG_MAPPER_HANDLE_MISS < cycle) && (inode == NULL) )
181{
182    printk("\n[%s] thread[%x,%x] enter for page %d in FAT / cxy %x / cycle %d\n",
183    __FUNCTION__, this->process->pid, this->trdid, page_id, mapper_cxy, cycle );
184}
185#endif
186
187#if( DEBUG_MAPPER_HANDLE_MISS & 2 )
188if( DEBUG_MAPPER_HANDLE_MISS < cycle ) 
189{
190    if (inode != NULL) grdxt_remote_display( XPTR( mapper_cxy , &mapper_ptr->rt ) , name );
191    else               grdxt_remote_display( XPTR( mapper_cxy , &mapper_ptr->rt ) , "FAT" );
192}
193#endif
194
195    // allocate one 4 Kbytes page from the remote mapper cluster
196    xptr_t page_xp = ppm_remote_alloc_pages( mapper_cxy , 0 );
197    page_t * page_ptr = GET_PTR( page_xp );
198                           
199    if( page_xp == XPTR_NULL )
200    {
201        printk("\n[ERROR] in %s : thread [%x,%x] cannot allocate page in cluster %x\n",
202        __FUNCTION__ , this->process->pid, this->trdid , mapper_cxy );
203        return -1;
204    }
205
206    // initialize the page descriptor
207    page_remote_init( page_xp );
208
209    // initialize specific page descriptor fields
210    hal_remote_s32( XPTR( mapper_cxy , &page_ptr->refcount ) , 1          );
211    hal_remote_s32( XPTR( mapper_cxy , &page_ptr->index )    , page_id    );
212    hal_remote_spt( XPTR( mapper_cxy , &page_ptr->mapper )   , mapper_ptr );
213    hal_remote_s32( XPTR( mapper_cxy , &page_ptr->flags )    , PG_INIT    );
214
215    // insert page in mapper radix tree
216    error = grdxt_remote_insert( XPTR( mapper_cxy , &mapper_ptr->rt),
217                                 page_id,
218                                 page_ptr );
219
220    if( error )
221    {
222        printk("\n[ERROR] in %s : thread[%x,%x] cannot insert page in mapper\n",
223        __FUNCTION__ , this->process->pid, this->trdid );
224        ppm_remote_free_pages( mapper_cxy , page_ptr );
225        return -1;
226    }
227
228    // launch I/O operation to load page from IOC device when required:
229    // - it is the FAT mapper
230    // - it is a directory mapper
231    // - it is a file mapper, and it exist data on IOC device for this page
232    if( (inode == NULL) || (inode_type == FILE_TYPE_DIR) || (inode_size > (page_id << 10) ) )
233    {
234        error = vfs_fs_move_page( page_xp , IOC_SYNC_READ );
235
236        if( error )
237        {
238            printk("\n[ERROR] in %s : thread[%x,%x] cannot load page from device\n",
239            __FUNCTION__ , this->process->pid, this->trdid );
240            mapper_remote_release_page( mapper_xp , page_ptr );
241            return -1;
242         }
243    }
244
245    // return extended pointer on allocated page
246    *page_xp_ptr = page_xp;
247
248#if DEBUG_MAPPER_HANDLE_MISS
249ppn_t ppn = ppm_page2ppn( page_xp );
250if( (DEBUG_MAPPER_HANDLE_MISS < cycle) && (inode != NULL) )
251{
252    printk("\n[%s] thread[%x,%x] exit for page %d in <%s> / page %x / ppn %x\n",
253    __FUNCTION__, this->process->pid, this->trdid, page_id, name, page_ptr, ppn );
254}
255if( (DEBUG_MAPPER_HANDLE_MISS < cycle) && (inode == NULL) )
256{
257    printk("\n[%s] thread[%x,%x] exit for page %d in FAT / page %x / ppn %x\n",
258    __FUNCTION__, this->process->pid, this->trdid, page_id, page_ptr, ppn );
259}
260#endif
261
262#if( DEBUG_MAPPER_HANDLE_MISS & 2 )
263if( DEBUG_MAPPER_HANDLE_MISS < cycle ) 
264{
265    if (inode != NULL) grdxt_remote_display( XPTR( mapper_cxy , &mapper_ptr->rt ) , name );
266    else               grdxt_remote_display( XPTR( mapper_cxy , &mapper_ptr->rt ) , "FAT" );
267}
268#endif
269
270    return 0;
271
272}  // end mapper_handle_miss()
273
274/////////////////////////////////////////////
275xptr_t  mapper_get_page( xptr_t    mapper_xp,
276                         uint32_t  page_id )
277{
278    error_t       error;
279
280    thread_t * this = CURRENT_THREAD;
281
282    // get mapper cluster and local pointer
283    mapper_t * mapper_ptr = GET_PTR( mapper_xp );
284    cxy_t      mapper_cxy = GET_CXY( mapper_xp );
285
286assert( __FUNCTION__, (hal_remote_lpt( XPTR( mapper_cxy , &mapper_ptr->inode ) ) != NULL ),
287"should not be used for the FAT mapper");
288
289#if DEBUG_MAPPER_GET_PAGE
290uint32_t      cycle = (uint32_t)hal_get_cycles();
291char          name[CONFIG_VFS_MAX_NAME_LENGTH];
292if( DEBUG_MAPPER_GET_PAGE < cycle ) 
293{
294    vfs_inode_t * inode = hal_remote_lpt( XPTR( mapper_cxy , &mapper_ptr->inode ) );
295    vfs_inode_get_name( XPTR( mapper_cxy , inode ) , name );
296    printk("\n[%s] thread[%x,%x] enter for page %d of <%s> mapper / cycle %d\n",
297    __FUNCTION__, this->process->pid, this->trdid, page_id, name, cycle );
298}
299#endif
300
301#if( DEBUG_MAPPER_GET_PAGE & 2 )
302if( DEBUG_MAPPER_GET_PAGE < cycle ) 
303ppm_remote_display( local_cxy );
304#endif
305
306    // check thread can yield
307    thread_assert_can_yield( this , __FUNCTION__ );
308
309    // build extended pointer on mapper lock and mapper rt
310    xptr_t lock_xp  = XPTR( mapper_cxy , &mapper_ptr->lock );
311    xptr_t rt_xp    = XPTR( mapper_cxy , &mapper_ptr->rt );
312
313    // take mapper lock in READ_MODE
314    remote_rwlock_rd_acquire( lock_xp );
315
316    // search page in radix tree
317    xptr_t page_xp  = grdxt_remote_lookup( rt_xp , page_id );
318
319    // test mapper miss
320    if( page_xp == XPTR_NULL )                  // miss => handle it
321    {
322        // release the lock in READ_MODE and take it in WRITE_MODE
323        remote_rwlock_rd_release( lock_xp );
324        remote_rwlock_wr_acquire( lock_xp );
325
326        // second test on missing page because the page status can be modified
327        // by another thread, when passing from READ_MODE to WRITE_MODE.
328        // from this point there is no concurrent accesses to mapper.
329        page_xp = grdxt_remote_lookup( rt_xp , page_id );
330
331        if ( page_xp == XPTR_NULL )  // miss confirmed => handle it
332        {
333            error = mapper_handle_miss( mapper_xp,
334                                        page_id,
335                                        &page_xp );
336            if( error )
337            {
338                printk("\n[ERROR] in %s : thread[%x,%x] cannot handle mapper miss\n",
339                __FUNCTION__ , this->process->pid, this->trdid );
340                remote_rwlock_wr_release( lock_xp );
341                return XPTR_NULL;
342            }
343        }
344
345#if (DEBUG_MAPPER_GET_PAGE & 1)
346if( DEBUG_MAPPER_GET_PAGE < cycle )
347printk("\n[%s] thread[%x,%x] introduced missing page %d in <%s> mapper / ppn %x\n",
348__FUNCTION__, this->process->pid, this->trdid, page_id, name, ppm_page2ppn(page_xp) );
349#endif
350       
351        // release mapper lock from WRITE_MODE
352        remote_rwlock_wr_release( lock_xp );
353    }
354    else                                              // hit
355    {
356        // release mapper lock from READ_MODE
357        remote_rwlock_rd_release( lock_xp );
358    }
359
360#if DEBUG_MAPPER_GET_PAGE
361if( DEBUG_MAPPER_GET_PAGE < cycle )
362printk("\n[%s] thread[%x,%x] exit for page %d of <%s> mapper / ppn %x\n",
363__FUNCTION__, this->process->pid, this->trdid, page_id, name, ppm_page2ppn(page_xp) );
364#endif
365
366#if( DEBUG_MAPPER_GET_PAGE & 2)
367if( DEBUG_MAPPER_GET_PAGE < cycle ) 
368ppm_remote_display( local_cxy );
369#endif
370
371    return page_xp;
372
373}  // end mapper_get_page()
374
375/////////////////////////////////////////////////
376xptr_t  mapper_get_fat_page( xptr_t    mapper_xp,
377                             uint32_t  page_id )
378{
379    error_t       error;
380
381    thread_t * this = CURRENT_THREAD;
382
383    // get mapper cluster and local pointer
384    mapper_t * mapper_ptr = GET_PTR( mapper_xp );
385    cxy_t      mapper_cxy = GET_CXY( mapper_xp );
386
387assert( __FUNCTION__, (hal_remote_lpt( XPTR( mapper_cxy , &mapper_ptr->inode ) ) == NULL ),
388"should be used for the FAT mapper");
389
390#if DEBUG_MAPPER_GET_FAT_PAGE
391uint32_t      cycle = (uint32_t)hal_get_cycles();
392if( DEBUG_MAPPER_GET_FAT_PAGE < cycle ) 
393printk("\n[%s] thread[%x,%x] enter for page %d of FAT mapper / cycle %d\n",
394__FUNCTION__, this->process->pid, this->trdid, page_id, cycle );
395#endif
396
397#if( DEBUG_MAPPER_GET_FAT_PAGE & 2 )
398if( DEBUG_MAPPER_GET_FAT_PAGE < cycle ) 
399ppm_remote_display( local_cxy );
400#endif
401
402    // check thread can yield
403    thread_assert_can_yield( this , __FUNCTION__ );
404
405    // build extended pointer on mapper lock and mapper rt
406    xptr_t lock_xp  = XPTR( mapper_cxy , &mapper_ptr->lock );
407    xptr_t rt_xp    = XPTR( mapper_cxy , &mapper_ptr->rt );
408
409    // take mapper lock in READ_MODE
410    remote_rwlock_rd_acquire( lock_xp );
411
412    // search page in radix tree
413    xptr_t page_xp  = grdxt_remote_lookup( rt_xp , page_id );
414
415    // test mapper miss
416    if( page_xp == XPTR_NULL )                  // miss => handle it
417    {
418        // release the lock in READ_MODE and take it in WRITE_MODE
419        remote_rwlock_rd_release( lock_xp );
420        remote_rwlock_wr_acquire( lock_xp );
421
422        // second test on missing page because the page status can be modified
423        // by another thread, when passing from READ_MODE to WRITE_MODE.
424        // from this point there is no concurrent accesses to mapper.
425        page_xp = grdxt_remote_lookup( rt_xp , page_id );
426
427        if ( page_xp == XPTR_NULL )  // miss confirmed => handle it
428        {
429            error = mapper_handle_miss( mapper_xp,
430                                        page_id,
431                                        &page_xp );
432            if( error )
433            {
434                printk("\n[ERROR] in %s : thread[%x,%x] cannot handle mapper miss\n",
435                __FUNCTION__ , this->process->pid, this->trdid );
436                remote_rwlock_wr_release( lock_xp );
437                return XPTR_NULL;
438            }
439        }
440
441#if (DEBUG_MAPPER_GET_FAT_PAGE & 1)
442if( DEBUG_MAPPER_GET_FAT_PAGE < cycle )
443printk("\n[%s] thread[%x,%x] introduced missing page %d in FAT mapper / ppn %x\n",
444__FUNCTION__, this->process->pid, this->trdid, page_id, ppm_page2ppn(page_xp) );
445#endif
446       
447        // release mapper lock from WRITE_MODE
448        remote_rwlock_wr_release( lock_xp );
449    }
450    else                                              // hit
451    {
452        // release mapper lock from READ_MODE
453        remote_rwlock_rd_release( lock_xp );
454    }
455
456#if DEBUG_MAPPER_GET_FAT_PAGE
457if( DEBUG_MAPPER_GET_FAT_PAGE < cycle )
458printk("\n[%s] thread[%x,%x] exit for page %d of FAT mapper  / ppn %x\n",
459__FUNCTION__, this->process->pid, this->trdid, page_id, ppm_page2ppn(page_xp) );
460#endif
461
462#if( DEBUG_MAPPER_GET_FAT_PAGE & 2)
463if( DEBUG_MAPPER_GET_FAT_PAGE < cycle ) 
464ppm_remote_display( local_cxy );
465#endif
466
467    return page_xp;
468
469}  // end mapper_get_fat_page()
470
471////////////////////////////////////////////////////
472void mapper_remote_release_page( xptr_t   mapper_xp,
473                                 page_t * page )
474{
475    // get mapper cluster an local pointer
476    cxy_t      mapper_cxy = GET_CXY( mapper_xp );
477    mapper_t * mapper_ptr = GET_PTR( mapper_xp );
478
479    // build extended pointer on mapper lock
480    xptr_t lock_xp = XPTR( mapper_cxy , &mapper_ptr->lock );
481
482    // take mapper lock in WRITE_MODE
483    remote_rwlock_wr_acquire( lock_xp );
484
485    // remove physical page from radix tree
486    grdxt_remote_remove( XPTR( mapper_cxy , &mapper_ptr->rt ) , page->index );
487
488    // release mapper lock from WRITE_MODE
489    remote_rwlock_wr_release( lock_xp );
490
491    // release page to PPM
492    ppm_remote_free_pages( mapper_cxy , page );
493                           
494}  // end mapper_release_page()
495
496///////////////////////////////////////////////
497error_t mapper_move_user( xptr_t     mapper_xp,
498                          bool_t     to_buffer,
499                          uint32_t   file_offset,
500                          void     * buffer,
501                          uint32_t   size )
502{
503    uint32_t   page_offset;    // first byte to move to/from a mapper page
504    uint32_t   page_bytes;     // number of bytes to move to/from a mapper page
505    uint32_t   page_id;        // current mapper page index
506    uint32_t   done;           // number of moved bytes
507    xptr_t     page_xp;        // extended pointer on current mapper page descriptor
508
509#if DEBUG_MAPPER_MOVE_USER
510uint32_t      cycle      = (uint32_t)hal_get_cycles();
511thread_t    * this       = CURRENT_THREAD;
512cxy_t         mapper_cxy = GET_CXY( mapper_xp );
513mapper_t    * mapper_ptr = GET_PTR( mapper_xp );
514vfs_inode_t * inode_ptr  = hal_remote_lpt( XPTR( mapper_cxy , &mapper_ptr->inode ) );
515xptr_t        inode_xp   = XPTR( mapper_cxy , inode_ptr );
516char          name[CONFIG_VFS_MAX_NAME_LENGTH];
517vfs_inode_get_name( inode_xp , name );
518if( DEBUG_MAPPER_MOVE_USER < cycle )
519{
520    if( to_buffer )
521    printk("\n[%s] thread[%x,%x] : mapper(%s) -> buffer(%x) / bytes %d / cycle %d\n",
522    __FUNCTION__, this->process->pid, this->trdid, name, buffer, size, cycle );
523    else
524    printk("\n[%s] thread[%x,%x] : buffer(%x) -> mapper(%s) / bytes %d / cycle %d\n",
525    __FUNCTION__, this->process->pid, this->trdid, buffer, name, size, cycle );
526}
527#endif
528
529    // compute indexes of first and last bytes in file
530    uint32_t min_byte = file_offset;
531    uint32_t max_byte = file_offset + size - 1;
532
533    // compute indexes of pages for first and last byte in mapper
534    uint32_t first = min_byte >> CONFIG_PPM_PAGE_SHIFT;
535    uint32_t last  = max_byte >> CONFIG_PPM_PAGE_SHIFT;
536
537#if (DEBUG_MAPPER_MOVE_USER & 1)
538if( DEBUG_MAPPER_MOVE_USER < cycle )
539printk("\n[%s] thread[%x,%x] : mapper(%x,%x) / first_page %d / last_page %d\n",
540__FUNCTION__, this->process->pid, this->trdid, mapper_cxy, mapper_ptr, first, last );
541#endif
542
543    done = 0;
544
545    // loop on pages in mapper
546    for( page_id = first ; page_id <= last ; page_id++ )
547    {
548        // compute page_offset
549        if( page_id == first ) page_offset = min_byte & CONFIG_PPM_PAGE_MASK;
550        else                   page_offset = 0;
551
552        // compute number of bytes in page
553        if      ( first   == last  ) page_bytes = size;
554        else if ( page_id == first ) page_bytes = CONFIG_PPM_PAGE_SIZE - page_offset;
555        else if ( page_id == last  ) page_bytes = (max_byte & CONFIG_PPM_PAGE_MASK) + 1;
556        else                         page_bytes = CONFIG_PPM_PAGE_SIZE;
557
558#if (DEBUG_MAPPER_MOVE_USER & 1)
559if( DEBUG_MAPPER_MOVE_USER < cycle )
560printk("\n[%s] thread[%x,%x] : page_id %d / page_offset %d / bytes %d\n",
561__FUNCTION__, this->process->pid, this->trdid, page_id , page_offset , page_bytes );
562#endif
563
564        // get extended pointer on page descriptor in mapper
565        page_xp = mapper_get_page( mapper_xp , page_id ); 
566
567        if ( page_xp == XPTR_NULL ) return -1;
568
569        // compute extended pointer on kernel mapper
570        xptr_t     map_xp  = ppm_page2base( page_xp ) + page_offset;
571
572#if (DEBUG_MAPPER_MOVE_USER & 1)
573if( DEBUG_MAPPER_MOVE_USER < cycle )
574printk("\n[%s] thread[%x,%x] : get buffer(%x,%x) in mapper\n",
575__FUNCTION__, this->process->pid, this->trdid, GET_CXY(map_xp), GET_PTR(map_xp) );
576#endif
577        // compute pointer in user buffer
578        uint8_t * buf_ptr = (uint8_t *)buffer + done;
579
580        // move fragment
581        if( to_buffer )
582        {
583            hal_copy_to_uspace( buf_ptr , map_xp , page_bytes ); 
584
585#if DEBUG_MAPPER_MOVE_USER & 1
586if( DEBUG_MAPPER_MOVE_USER < cycle )
587printk("\n[%s] thread[%x,%x] moved %d bytes / mapper %s (%x,%x) -> user buffer(%x,%x)\n",
588__FUNCTION__, this->process->pid, this->trdid, page_bytes,
589name, GET_CXY(map_xp), GET_PTR(map_xp), local_cxy, buf_ptr );
590#endif
591
592        }
593        else
594        {
595            ppm_page_do_dirty( page_xp ); 
596            hal_copy_from_uspace( map_xp , buf_ptr , page_bytes ); 
597
598#if DEBUG_MAPPER_MOVE_USER & 1
599if( DEBUG_MAPPER_MOVE_USER < cycle )
600printk("\n[%s] thread[%x,%x] moved %d bytes / user buffer(%x,%x) -> mapper %s (%x,%x)\n",
601__FUNCTION__, this->process->pid, this->trdid, page_bytes,
602local_cxy, buf_ptr, name, GET_CXY(map_xp), GET_PTR(map_xp) );
603mapper_display_page(  mapper_xp , page_id , 128 );
604#endif
605
606        }
607
608        done += page_bytes;
609    }
610
611#if DEBUG_MAPPER_MOVE_USER
612cycle      = (uint32_t)hal_get_cycles();
613if( DEBUG_MAPPER_MOVE_USER < cycle )
614{
615    if( to_buffer )
616    printk("\n[%s] thread[%x,%x] completed mapper(%s) -> buffer(%x) / cycle %d\n",
617    __FUNCTION__, this->process->pid, this->trdid, name, buffer, cycle );
618    else
619    printk("\n[%s] thread[%x,%x] completed buffer(%x) -> mapper(%s) / cycle %d\n",
620    __FUNCTION__, this->process->pid, this->trdid, buffer, name, cycle );
621}
622#endif
623
624    return 0;
625
626}  // end mapper_move_user()
627
628////////////////////////////////////////////////
629error_t mapper_move_kernel( xptr_t    mapper_xp,
630                            bool_t    to_buffer,
631                            uint32_t  file_offset,
632                            xptr_t    buffer_xp,
633                            uint32_t  size )
634{
635    uint32_t   page_offset;    // first byte to move to/from a mapper page
636    uint32_t   page_bytes;     // number of bytes to move to/from a mapper page
637    uint32_t   page_id;        // current mapper page index
638    uint32_t   done;           // number of moved bytes
639    xptr_t     page_xp;        // extended pointer on current mapper page descriptor
640
641    uint8_t  * src_ptr;        // source buffer local pointer
642    cxy_t      src_cxy;        // source cluster
643    uint8_t  * dst_ptr;        // destination buffer local pointer
644    cxy_t      dst_cxy;        // destination cluster
645
646    // get buffer cluster and local pointer
647    cxy_t     buffer_cxy = GET_CXY( buffer_xp );
648    uint8_t * buffer_ptr = GET_PTR( buffer_xp );
649
650    // get mapper cluster
651    cxy_t     mapper_cxy = GET_CXY( mapper_xp );
652
653#if DEBUG_MAPPER_MOVE_KERNEL
654char          name[CONFIG_VFS_MAX_NAME_LENGTH];
655uint32_t      cycle  = (uint32_t)hal_get_cycles();
656thread_t    * this   = CURRENT_THREAD;
657mapper_t    * mapper = GET_PTR( mapper_xp );
658vfs_inode_t * inode  = hal_remote_lpt( XPTR( mapper_cxy , &mapper->inode ) );
659vfs_inode_get_name( XPTR( mapper_cxy , inode ) , name );
660if( DEBUG_MAPPER_MOVE_KERNEL < cycle )
661printk("\n[%s] thread[%x,%x] enter / %d bytes / offset %d / mapper <%s> / cycle %d\n",
662__FUNCTION__, this->process->pid, this->trdid, size, file_offset, name, cycle );
663#endif
664
665    // compute offsets of first and last bytes in file
666    uint32_t min_byte = file_offset;
667    uint32_t max_byte = file_offset + size -1;
668
669    // compute indexes for first and last pages in mapper
670    uint32_t first = min_byte >> CONFIG_PPM_PAGE_SHIFT;
671    uint32_t last  = max_byte >> CONFIG_PPM_PAGE_SHIFT;
672
673    // compute source and destination clusters
674    if( to_buffer )
675    {
676        dst_cxy = buffer_cxy;
677        src_cxy = mapper_cxy;
678    }
679    else
680    {
681        src_cxy = buffer_cxy;
682        dst_cxy = mapper_cxy;
683    }
684
685    done = 0;
686
687    // loop on pages in mapper
688    for( page_id = first ; page_id <= last ; page_id++ )
689    {
690        // compute page_offset
691        if( page_id == first ) page_offset = min_byte & CONFIG_PPM_PAGE_MASK;
692        else                   page_offset = 0;
693
694        // compute number of bytes to move in page
695        if      ( first == last  )   page_bytes = size;
696        else if ( page_id == first ) page_bytes = CONFIG_PPM_PAGE_SIZE - page_offset;
697        else if ( page_id == last  ) page_bytes = (max_byte & CONFIG_PPM_PAGE_MASK) + 1;
698        else                         page_bytes = CONFIG_PPM_PAGE_SIZE;
699
700        // get extended pointer on page descriptor
701        page_xp = mapper_get_page( mapper_xp , page_id );
702
703        if ( page_xp == XPTR_NULL ) return -1;
704
705        // get page base address
706        xptr_t    base_xp  = ppm_page2base( page_xp );
707        uint8_t * base_ptr = (uint8_t *)GET_PTR( base_xp );
708
709        // compute source and destination pointers
710        if( to_buffer )
711        {
712            dst_ptr = buffer_ptr + done;
713            src_ptr = base_ptr + page_offset;
714        }
715        else
716        {
717            src_ptr = buffer_ptr + done;
718            dst_ptr = base_ptr + page_offset;
719
720            ppm_page_do_dirty( page_xp );
721        }
722
723#if (DEBUG_MAPPER_MOVE_KERNEL & 1)
724if( DEBUG_MAPPER_MOVE_KERNEL < cycle )
725{
726    if( to_buffer )
727    printk("\n[%s] mapper <%s> page %d => buffer (%x,%x) / %d bytes\n",
728    __FUNCTION__, name, page_id, dst_cxy, dst_ptr, page_bytes );
729    else
730    printk("\n[%s] buffer (%x,%x) => mapper <%s> page %d / %d bytes\n",
731    __FUNCTION__, src_cxy, src_ptr, name, page_id, page_bytes );
732}
733#endif
734
735        // move fragment
736        hal_remote_memcpy( XPTR( dst_cxy , dst_ptr ), XPTR( src_cxy , src_ptr ), page_bytes );
737
738        done += page_bytes;
739    }
740
741#if DEBUG_MAPPER_MOVE_KERNEL
742cycle  = (uint32_t)hal_get_cycles();
743if( DEBUG_MAPPER_MOVE_KERNEL < cycle )
744printk("\n[%s] thread[%x,%x] exit / mapper <%s> / buffer (%x,%x) / cycle %d\n",
745__FUNCTION__, this->process->pid, this->trdid, name, buffer_cxy, buffer_ptr, cycle );
746#endif
747
748    return 0;
749
750}  // end mapper_move_kernel()
751
752///////////////////////////////////////////////////
753error_t mapper_remote_get_32( xptr_t     mapper_xp,
754                              uint32_t   page_id,
755                              uint32_t   word_id,
756                              uint32_t * value )
757{
758    xptr_t     page_xp;      // extended pointer on searched page descriptor
759    xptr_t     base_xp;      // extended pointer on searched page base
760   
761    // get page containing the searched word
762    page_xp  = mapper_get_page( mapper_xp , page_id );
763
764    if( page_xp == XPTR_NULL )  return -1;
765   
766    // get page base
767    base_xp = ppm_page2base( page_xp );
768
769    // get the value from mapper
770    *value = hal_remote_l32( base_xp + (word_id<<2) ); 
771
772    return 0;
773
774}  // end mapper_remote_get_32()
775
776///////////////////////////////////////////////////
777error_t mapper_remote_set_32( xptr_t     mapper_xp,
778                              uint32_t   page_id,
779                              uint32_t   word_id,
780                              uint32_t   value )
781{
782    xptr_t     page_xp;      // extended pointer on searched page descriptor
783    xptr_t     base_xp;      // extended pointer on searched page base
784
785    // get page containing the searched word
786    page_xp  = mapper_get_page( mapper_xp , page_id );
787
788    if( page_xp == XPTR_NULL ) return -1;
789
790    // get page base
791    base_xp = ppm_page2base( page_xp );
792
793    // set value to mapper
794    hal_remote_s32( (base_xp + (word_id << 2)) , value );
795
796    // set the dirty flag in page descriptor
797    ppm_page_do_dirty( page_xp );
798
799    return 0;
800
801}  // end mapper_remote_set_32()
802
803////////////////////////////////////////
804error_t mapper_sync( xptr_t  mapper_xp )
805{
806    uint32_t   found_key;           // unused, required by grdxt_remote_get_first()
807    error_t    error;
808
809    // get mapper cluster and local pointer
810    mapper_t * mapper_ptr = GET_PTR( mapper_xp );
811    cxy_t      mapper_cxy = GET_CXY( mapper_xp );
812
813#if DEBUG_MAPPER_SYNC
814thread_t * this  = CURRENT_THREAD;
815uint32_t   cycle = (uint32_t)hal_get_cycles();
816char       name[CONFIG_VFS_MAX_NAME_LENGTH];
817vfs_inode_get_name( XPTR( mapper_cxy , &mapper_ptr->inode ) , name );
818#endif
819
820    // build extended pointer on radix tree
821    xptr_t   rt_xp = XPTR( mapper_cxy , &mapper_ptr->rt );
822
823    // initialise loop variable
824    uint32_t start_key = 0;
825
826    // scan radix-tree until last page found
827    while( 1 )
828    {
829        // get page descriptor from radix tree
830        xptr_t page_xp = grdxt_remote_get_first( rt_xp , start_key , &found_key );
831         
832        page_t * page_ptr = GET_PTR( page_xp );
833
834        // exit loop when last page found
835        if( page_ptr == NULL ) break;
836
837        // get page flags & index fields
838        uint32_t flags = hal_remote_l32( XPTR( mapper_cxy , &page_ptr->flags ) );
839        uint32_t index = hal_remote_l32( XPTR( mapper_cxy , &page_ptr->index ) );
840
841        // synchronize page if dirty
842        if( flags & PG_DIRTY )
843        {
844
845#if DEBUG_MAPPER_SYNC
846if( cycle > DEBUG_MAPPER_SYNC )
847printk("\n[%s] thread[%x,%x] synchonise page %d of <%s> to IOC device\n",
848__FUNCTION__, this->process->pid, this->trdid, page_ptr->index, name );
849#endif
850            // copy page to file system
851            error = vfs_fs_move_page( page_xp , IOC_WRITE );
852
853            if( error )
854            {
855                printk("\n[ERROR] in %s : cannot synchonize dirty page %d\n", 
856                __FUNCTION__, page_ptr->index );
857                return -1;
858            }
859
860            // remove page from PPM dirty list
861            ppm_page_undo_dirty( page_xp ); 
862        } 
863        else
864        {
865
866#if DEBUG_MAPPER_SYNC
867if( cycle > DEBUG_MAPPER_SYNC )
868printk("\n[%s] thread[%x,%x] skip page %d for <%s>\n",
869__FUNCTION__, this->process->pid, this->trdid, page_ptr->index, name );
870#endif
871        }
872
873        // update loop variable
874        start_key = index + 1;
875    }  // end while
876
877    return 0;
878
879}  // end mapper_sync()
880
881///////////////////////////////////////////////
882void mapper_display_page( xptr_t     mapper_xp,
883                          uint32_t   page_id,
884                          uint32_t   nbytes )
885{
886    char          buffer[4096];   // local buffer
887    uint32_t      line;           // line index
888    uint32_t      word;           // word index
889 
890    char       name[CONFIG_VFS_MAX_NAME_LENGTH];
891
892assert( __FUNCTION__, (nbytes <= 4096)         , "nbytes cannot be larger than 4096");
893assert( __FUNCTION__, (mapper_xp != XPTR_NULL) , "mapper_xp argument cannot be null");
894
895    // get mapper cluster and local pointer
896    cxy_t      mapper_cxy = GET_CXY( mapper_xp );
897    mapper_t * mapper_ptr = GET_PTR( mapper_xp );
898
899    // get extended pointer on page descriptor
900    xptr_t page_xp = mapper_get_page( mapper_xp , page_id );
901
902    // get page cluster and local pointer
903    cxy_t    page_cxy = GET_CXY( page_xp );
904    page_t * page_ptr = GET_PTR( page_xp );
905
906    // get page_id and mapper from page descriptor
907    uint32_t   index   = hal_remote_l32( XPTR( page_cxy , &page_ptr->index ) );
908    mapper_t * mapper  = hal_remote_lpt( XPTR( page_cxy , &page_ptr->mapper ) );
909
910assert( __FUNCTION__, (mapper_cxy == page_cxy ) , "mapper and page must be in same cluster");
911assert( __FUNCTION__, (mapper_ptr == mapper   ) , "unconsistent mapper field in page descriptor");
912assert( __FUNCTION__, (page_id    == index    ) , "unconsistent index  field in page descriptor");
913
914    // get inode
915    vfs_inode_t * inode_ptr = hal_remote_lpt( XPTR( mapper_cxy , &mapper_ptr->inode ) );
916
917    // get inode name
918    if( inode_ptr == NULL ) strcpy( name , "FAT" );
919    else  vfs_inode_get_name( XPTR( mapper_cxy , inode_ptr ) , name );
920   
921    // get extended pointer on page base
922    xptr_t base_xp = ppm_page2base( page_xp );
923   
924    // copy remote page to local buffer
925    hal_remote_memcpy( XPTR( local_cxy , buffer ) , base_xp , nbytes );
926
927    // display header
928    uint32_t * tabi = (uint32_t *)buffer;
929    printk("\n***** mapper <%s> / page_id %d / cxy %x / mapper %x / buffer %x\n",
930    name, page_id, mapper_cxy, mapper_ptr, GET_PTR( base_xp ) );
931
932    // display 8 words per line
933    for( line = 0 ; line < (nbytes >> 5) ; line++ )
934    {
935        printk("%X : ", line << 5 );
936        for( word = 0 ; word < 8 ; word++ ) printk("%X ", tabi[(line<<3) + word] );
937        printk("\n");
938    }
939
940}  // end mapper_display_page()
941
942
Note: See TracBrowser for help on using the repository browser.