source: trunk/kernel/mm/mapper.c @ 606

Last change on this file since 606 was 606, checked in by alain, 6 years ago

Improve the FAT32 file system to support cat, rm, cp commands.

File size: 19.6 KB
Line 
1/*
2 * mapper.c - Kernel cache for FS files or directories implementation.
3 *
4 * Authors   Mohamed Lamine Karaoui (2015)
5 *           Alain Greiner (2016,2017,2018)
6 *
7 * Copyright (c)  UPMC Sorbonne Universites
8 *
9 * This file is part of ALMOS-MKH.
10 *
11 * ALMOS-MKH is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; version 2.0 of the License.
14 *
15 * ALMOS-MKH is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with ALMOS-MKH; if not, write to the Free Software Foundation,
22 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
25#include <kernel_config.h>
26#include <hal_kernel_types.h>
27#include <hal_special.h>
28#include <hal_uspace.h>
29#include <grdxt.h>
30#include <rwlock.h>
31#include <printk.h>
32#include <memcpy.h>
33#include <thread.h>
34#include <core.h>
35#include <process.h>
36#include <kmem.h>
37#include <kcm.h>
38#include <ppm.h>
39#include <page.h>
40#include <cluster.h>
41#include <vfs.h>
42#include <mapper.h>
43
44
45//////////////////////////////////////////////
46mapper_t * mapper_create( vfs_fs_type_t type )
47{
48    mapper_t * mapper;
49    kmem_req_t req;
50    error_t    error;
51
52    // allocate memory for mapper
53    req.type  = KMEM_MAPPER;
54    req.size  = sizeof(mapper_t);
55    req.flags = AF_KERNEL | AF_ZERO;
56    mapper    = (mapper_t *)kmem_alloc( &req );
57
58    if( mapper == NULL )
59    {
60        printk("\n[ERROR] in %s : no memory for mapper descriptor\n", __FUNCTION__ );
61        return NULL;
62    }
63
64    // initialize refcount & inode
65    mapper->refcount = 0;
66    mapper->inode    = NULL;
67
68    // initialize radix tree
69    error = grdxt_init( &mapper->rt,
70                        CONFIG_MAPPER_GRDXT_W1,
71                        CONFIG_MAPPER_GRDXT_W2,
72                        CONFIG_MAPPER_GRDXT_W3 );
73
74    if( error )
75    {
76        printk("\n[ERROR] in %s : cannot initialize radix tree\n", __FUNCTION__ );
77        req.type  = KMEM_MAPPER;
78        req.ptr   = mapper;
79        kmem_free( &req );
80        return NULL;
81    }
82
83    // initialize mapper type
84    mapper->type = type;
85
86    // initialize mapper lock
87    remote_rwlock_init( XPTR( local_cxy , &mapper->lock ) , LOCK_MAPPER_STATE );
88
89    // initialize waiting threads xlist (empty)
90    xlist_root_init( XPTR( local_cxy , &mapper->wait_root ) );
91
92    // initialize vsegs xlist (empty)
93    xlist_root_init( XPTR( local_cxy , &mapper->vsegs_root ) );
94
95    return mapper;
96
97}  // end mapper_create()
98
99////////////////////////////////////////
100void mapper_destroy( mapper_t * mapper )
101{
102    page_t   * page;
103    uint32_t   found_index = 0;
104    uint32_t   start_index = 0;
105    kmem_req_t req;
106
107    // scan radix tree
108    do
109    {
110        // get page from radix tree
111        page = (page_t *)grdxt_get_first( &mapper->rt , start_index , &found_index );
112
113        // release registered pages to PPM
114        if( page != NULL )
115        {
116            // remove page from mapper and release to PPM
117            mapper_release_page( mapper , page );
118
119            // update start_key value for next page
120            start_index = found_index;
121        }
122    }
123    while( page != NULL );
124
125    // release the memory allocated to radix tree itself
126    grdxt_destroy( &mapper->rt );
127
128    // release memory for mapper descriptor
129    req.type = KMEM_MAPPER;
130    req.ptr  = mapper;
131    kmem_free( &req );
132
133}  // end mapper_destroy()
134
135////////////////////////////////////////////////////
136xptr_t  mapper_remote_get_page( xptr_t    mapper_xp,
137                                uint32_t  page_id )
138{
139    error_t       error;
140    mapper_t    * mapper_ptr;
141    cxy_t         mapper_cxy;
142    xptr_t        lock_xp;        // extended pointer on mapper lock
143    xptr_t        page_xp;        // extended pointer on searched page descriptor
144    xptr_t        rt_xp;          // extended pointer on radix tree in mapper
145
146    thread_t * this = CURRENT_THREAD;
147
148    // get mapper cluster and local pointer
149    mapper_ptr = GET_PTR( mapper_xp );
150    mapper_cxy = GET_CXY( mapper_xp );
151
152#if DEBUG_MAPPER_GET_PAGE
153uint32_t cycle = (uint32_t)hal_get_cycles();
154char          name[CONFIG_VFS_MAX_NAME_LENGTH];
155vfs_inode_t * inode = hal_remote_lpt( XPTR( mapper_cxy , &mapper_ptr->inode ) );
156vfs_inode_get_name( XPTR( mapper_cxy , inode ) , name );
157if( DEBUG_MAPPER_GET_PAGE < cycle )
158printk("\n[%s] thread [%x,%x] enter for page %d of <%s> / cycle %d\n",
159__FUNCTION__, this->process->pid, this->trdid, page_id, name, cycle );
160#endif
161
162    // check thread can yield
163    thread_assert_can_yield( this , __FUNCTION__ );
164
165    // build extended pointer on mapper lock and mapper rt
166    lock_xp  = XPTR( mapper_cxy , &mapper_ptr->lock );
167    rt_xp    = XPTR( mapper_cxy , &mapper_ptr->rt );
168
169    // take mapper lock in READ_MODE
170    remote_rwlock_rd_acquire( lock_xp );
171
172    // search page in radix tree
173    page_xp  = grdxt_remote_lookup( rt_xp , page_id );
174
175    // test mapper miss
176    if( page_xp == XPTR_NULL )                  // miss => try to handle it
177    {
178        // release the lock in READ_MODE and take it in WRITE_MODE
179        remote_rwlock_rd_release( lock_xp );
180        remote_rwlock_wr_acquire( lock_xp );
181
182        // second test on missing page because the page status can be modified
183        // by another thread, when passing from READ_MODE to WRITE_MODE.
184        // from this point there is no concurrent accesses to mapper.
185        page_xp = grdxt_remote_lookup( rt_xp , page_id );
186
187        if ( page_xp == XPTR_NULL )  // miss confirmed => handle it
188        {
189
190#if (DEBUG_MAPPER_GET_PAGE & 1)
191if( DEBUG_MAPPER_GET_PAGE < cycle )
192printk("\n[%s] missing page => load it from IOC device\n", __FUNCTION__ );
193#endif
194            if( mapper_cxy == local_cxy )   // mapper is local
195            {
196                 error = mapper_handle_miss( mapper_ptr,
197                                             page_id, 
198                                             &page_xp );
199            } 
200            else
201            {
202                 rpc_mapper_handle_miss_client( mapper_cxy,
203                                                mapper_ptr,
204                                                page_id,
205                                                &page_xp,
206                                                &error );
207            }
208
209            if ( error )
210            {
211                printk("\n[ERROR] in %s : thread[%x,%x] cannot handle mapper miss\n",
212                __FUNCTION__ , this->process->pid, this->trdid );
213                remote_rwlock_wr_release( lock_xp );
214                return XPTR_NULL;
215            }
216        }
217       
218        // release mapper lock from WRITE_MODE
219        remote_rwlock_wr_release( lock_xp );
220    }
221    else                                              // hit
222    {
223        // release mapper lock from READ_MODE
224        remote_rwlock_rd_release( lock_xp );
225    }
226
227#if DEBUG_MAPPER_GET_PAGE
228cycle = (uint32_t)hal_get_cycles();
229if( DEBUG_MAPPER_GET_PAGE < cycle )
230printk("\n[%s] thread[%x,%x] exit for page %d of <%s> / ppn %x / cycle %d\n",
231__FUNCTION__, this->process->pid, this->trdid, 
232page_id, name, ppm_page2ppn( page_xp ), cycle );
233#endif
234
235    return page_xp;
236
237}  // end mapper_remote_get_page()
238
239//////////////////////////////////////////////
240error_t mapper_handle_miss( mapper_t * mapper,
241                            uint32_t   page_id,
242                            xptr_t   * page_xp )
243{
244    kmem_req_t   req;
245    page_t     * page;
246    error_t      error;
247
248    thread_t * this = CURRENT_THREAD;
249
250#if DEBUG_MAPPER_HANDLE_MISS
251uint32_t cycle = (uint32_t)hal_get_cycles();
252char          name[CONFIG_VFS_MAX_NAME_LENGTH];
253vfs_inode_t * inode = mapper->inode;
254vfs_inode_get_name( XPTR( local_cxy , inode ) , name );
255if( DEBUG_MAPPER_HANDLE_MISS < cycle )
256printk("\n[%s] enter for page %d in <%s> / cycle %d\n",
257__FUNCTION__, page_id, name, cycle );
258if( DEBUG_MAPPER_HANDLE_MISS & 1 )
259grdxt_display( &mapper->rt , name );
260#endif
261
262    // allocate one page from the mapper cluster
263    req.type  = KMEM_PAGE;
264    req.size  = 0;
265    req.flags = AF_NONE;
266    page = kmem_alloc( &req );
267
268    if( page == NULL )
269    {
270        printk("\n[ERROR] in %s : thread [%x,%x] cannot allocate page in cluster %x\n",
271        __FUNCTION__ , this->process->pid, this->trdid , local_cxy );
272        return -1;
273    }
274
275    // initialize the page descriptor
276    page_init( page );
277    page_set_flag( page , PG_INIT );
278    page_refcount_up( page );
279    page->mapper = mapper;
280    page->index  = page_id;
281
282    // insert page in mapper radix tree
283    error = grdxt_insert( &mapper->rt , page_id , page );
284
285    if( error )
286    {
287        printk("\n[ERROR] in %s : thread[%x,%x] cannot insert page in mapper\n",
288        __FUNCTION__ , this->process->pid, this->trdid );
289        mapper_release_page( mapper , page );
290        req.ptr  = page;
291        req.type = KMEM_PAGE;
292        kmem_free(&req);
293        return -1;
294    }
295
296    // launch I/O operation to load page from device to mapper
297    error = vfs_fs_move_page( XPTR( local_cxy , page ) , true );
298
299    if( error )
300    {
301        printk("\n[ERROR] in %s : thread[%x,%x] cannot load page from device\n",
302        __FUNCTION__ , this->process->pid, this->trdid );
303        mapper_release_page( mapper , page );
304        req.ptr  = page;
305        req.type = KMEM_PAGE;
306        kmem_free( &req );
307        return -1;
308    }
309
310    // set extended pointer on allocated page
311    *page_xp = XPTR( local_cxy , page );
312
313#if DEBUG_MAPPER_HANDLE_MISS
314cycle = (uint32_t)hal_get_cycles();
315if( DEBUG_MAPPER_HANDLE_MISS < cycle )
316printk("\n[%s] exit for page %d in <%s> / ppn %x / cycle %d\n",
317__FUNCTION__, page_id, name, ppm_page2ppn( *page_xp ), cycle );
318if( DEBUG_MAPPER_HANDLE_MISS & 1 )
319grdxt_display( &mapper->rt , name );
320#endif
321
322    return 0;
323
324}  // end mapper_handle_miss()
325
326////////////////////////////////////////////
327void mapper_release_page( mapper_t * mapper,
328                          page_t   * page )
329{
330    // build extended pointer on mapper lock
331    xptr_t mapper_lock_xp = XPTR( local_cxy , &mapper->lock );
332
333    // take mapper lock in WRITE_MODE
334    remote_rwlock_wr_acquire( mapper_lock_xp );
335
336    // remove physical page from radix tree
337    grdxt_remove( &mapper->rt , page->index );
338
339    // release mapper lock from WRITE_MODE
340    remote_rwlock_wr_release( mapper_lock_xp );
341
342    // release page to PPM
343    kmem_req_t   req;
344    req.type  = KMEM_PAGE;
345    req.ptr   = page;
346    kmem_free( &req );
347
348}  // end mapper_release_page()
349
350////////////////////////////////////////////
351error_t mapper_move_user( mapper_t * mapper,
352                          bool_t     to_buffer,
353                          uint32_t   file_offset,
354                          void     * buffer,
355                          uint32_t   size )
356{
357    xptr_t     mapper_xp;      // extended pointer on local mapper
358    uint32_t   page_offset;    // first byte to move to/from a mapper page
359    uint32_t   page_count;     // number of bytes to move to/from a mapper page
360    uint32_t   page_id;        // current mapper page index
361    uint32_t   done;           // number of moved bytes
362    xptr_t     page_xp;        // extended pointer on current mapper page descriptor
363
364#if DEBUG_MAPPER_MOVE_USER
365uint32_t   cycle = (uint32_t)hal_get_cycles();
366thread_t * this  = CURRENT_THREAD;
367if( DEBUG_MAPPER_MOVE_USER < cycle )
368printk("\n[%s] thread[%x,%x] : to_buf %d / buffer %x / size %d / offset %d / cycle %d\n",
369__FUNCTION__, this->process->pid, this->trdid,
370to_buffer, buffer, size, file_offset, cycle );
371#endif
372
373    // build extended pointer on mapper
374    mapper_xp = XPTR( local_cxy , mapper );
375
376    // compute offsets of first and last bytes in file
377    uint32_t min_byte = file_offset;
378    uint32_t max_byte = file_offset + size - 1;
379
380    // compute indexes of pages for first and last byte in mapper
381    uint32_t first = min_byte >> CONFIG_PPM_PAGE_SHIFT;
382    uint32_t last  = max_byte >> CONFIG_PPM_PAGE_SHIFT;
383
384#if (DEBUG_MAPPER_MOVE_USER & 1)
385if( DEBUG_MAPPER_MOVE_USER < cycle )
386printk("\n[%s] first_page %d / last_page %d\n", __FUNCTION__, first, last );
387#endif
388
389    done = 0;
390
391    // loop on pages in mapper
392    for( page_id = first ; page_id <= last ; page_id++ )
393    {
394        // compute page_offset
395        if( page_id == first ) page_offset = min_byte & CONFIG_PPM_PAGE_MASK;
396        else                   page_offset = 0;
397
398        // compute number of bytes in page
399        if      ( first   == last  ) page_count = size;
400        else if ( page_id == first ) page_count = CONFIG_PPM_PAGE_SIZE - page_offset;
401        else if ( page_id == last  ) page_count = (max_byte & CONFIG_PPM_PAGE_MASK) + 1;
402        else                         page_count = CONFIG_PPM_PAGE_SIZE;
403
404#if (DEBUG_MAPPER_MOVE_USER & 1)
405if( DEBUG_MAPPER_MOVE_USER < cycle )
406printk("\n[%s] page_id = %d / page_offset = %d / page_count = %d\n",
407__FUNCTION__ , page_id , page_offset , page_count );
408#endif
409
410        // get extended pointer on page descriptor
411        page_xp = mapper_remote_get_page( mapper_xp , page_id ); 
412
413        if ( page_xp == XPTR_NULL ) return -1;
414
415        // compute pointer in mapper
416        xptr_t    base_xp = ppm_page2base( page_xp );
417        uint8_t * map_ptr = (uint8_t *)GET_PTR( base_xp ) + page_offset;
418
419        // compute pointer in buffer
420        uint8_t * buf_ptr = (uint8_t *)buffer + done;
421
422        // move fragment
423        if( to_buffer )
424        {
425            hal_copy_to_uspace( buf_ptr , map_ptr , page_count ); 
426        }
427        else
428        {
429            ppm_page_do_dirty( page_xp ); 
430            hal_copy_from_uspace( map_ptr , buf_ptr , page_count ); 
431        }
432
433        done += page_count;
434    }
435
436#if DEBUG_MAPPER_MOVE_USER
437cycle = (uint32_t)hal_get_cycles();
438if( DEBUG_MAPPER_MOVE_USER < cycle )
439printk("\n[%s] thread[%x,%x] exit / cycle %d\n",
440__FUNCTION__, this->process->pid, this->trdid, cycle );
441#endif
442
443    return 0;
444
445}  // end mapper_move_user()
446
447////////////////////////////////////////////////
448error_t mapper_move_kernel( xptr_t    mapper_xp,
449                            bool_t    to_buffer,
450                            uint32_t  file_offset,
451                            xptr_t    buffer_xp,
452                            uint32_t  size )
453{
454    uint32_t   page_offset;    // first byte to move to/from a mapper page
455    uint32_t   page_count;     // number of bytes to move to/from a mapper page
456    uint32_t   page_id;        // current mapper page index
457    uint32_t   done;           // number of moved bytes
458    xptr_t     page_xp;        // extended pointer on current mapper page descriptor
459
460    uint8_t  * src_ptr;        // source buffer local pointer
461    cxy_t      src_cxy;        // source cluster
462    uint8_t  * dst_ptr;        // destination buffer local pointer
463    cxy_t      dst_cxy;        // destination cluster
464
465    // get buffer cluster and local pointer
466    cxy_t     buffer_cxy = GET_CXY( buffer_xp );
467    uint8_t * buffer_ptr = GET_PTR( buffer_xp );
468
469    // get mapper cluster
470    cxy_t     mapper_cxy = GET_CXY( mapper_xp );
471
472#if DEBUG_MAPPER_MOVE_KERNEL
473uint32_t   cycle = (uint32_t)hal_get_cycles();
474thread_t * this  = CURRENT_THREAD;
475if( DEBUG_MAPPER_MOVE_KERNEL < cycle )
476printk("\n[%s] thread[%x,%x] enter / to_buf %d / buf_cxy %x / buf_ptr %x / cycle %d\n",
477__FUNCTION__, this->process->pid, this->trdid, to_buffer, buffer_cxy, buffer_ptr, cycle );
478#endif
479
480    // compute offsets of first and last bytes in file
481    uint32_t min_byte = file_offset;
482    uint32_t max_byte = file_offset + size -1;
483
484    // compute indexes for first and last pages in mapper
485    uint32_t first = min_byte >> CONFIG_PPM_PAGE_SHIFT;
486    uint32_t last  = max_byte >> CONFIG_PPM_PAGE_SHIFT;
487
488#if (DEBUG_MAPPER_MOVE_KERNEL & 1)
489if( DEBUG_MAPPER_MOVE_KERNEL < cycle )
490printk("\n[%s] first_page %d / last_page %d\n", __FUNCTION__, first, last );
491#endif
492
493    // compute source and destination clusters
494    if( to_buffer )
495    {
496        dst_cxy = buffer_cxy;
497        src_cxy = mapper_cxy;
498    }
499    else
500    {
501        src_cxy = buffer_cxy;
502        dst_cxy = mapper_cxy;
503    }
504
505    done = 0;
506
507    // loop on pages in mapper
508    for( page_id = first ; page_id <= last ; page_id++ )
509    {
510        // compute page_offset
511        if( page_id == first ) page_offset = min_byte & CONFIG_PPM_PAGE_MASK;
512        else                   page_offset = 0;
513
514        // compute number of bytes to move in page
515        if      ( first == last  )   page_count = size;
516        else if ( page_id == first ) page_count = CONFIG_PPM_PAGE_SIZE - page_offset;
517        else if ( page_id == last  ) page_count = (max_byte & CONFIG_PPM_PAGE_MASK) + 1;
518        else                         page_count = CONFIG_PPM_PAGE_SIZE;
519
520#if (DEBUG_MAPPER_MOVE_KERNEL & 1)
521if( DEBUG_MAPPER_MOVE_KERNEL < cycle )
522printk("\n[%s] page_id = %d / offset = %d / bytes = %d\n",
523__FUNCTION__ , page_id , page_offset , page_count );
524#endif
525
526        // get extended pointer on page descriptor
527        page_xp = mapper_remote_get_page( mapper_xp , page_id );
528
529        if ( page_xp == XPTR_NULL ) return -1;
530
531        // get page base address
532        xptr_t    base_xp  = ppm_page2base( page_xp );
533        uint8_t * base_ptr = (uint8_t *)GET_PTR( base_xp );
534
535        // compute source and destination pointers
536        if( to_buffer )
537        {
538            dst_ptr = buffer_ptr + done;
539            src_ptr = base_ptr + page_offset;
540        }
541        else
542        {
543            src_ptr = buffer_ptr + done;
544            dst_ptr = base_ptr + page_offset;
545
546            ppm_page_do_dirty( page_xp );
547        }
548
549        // move fragment
550        hal_remote_memcpy( XPTR( dst_cxy , dst_ptr ), XPTR( src_cxy , src_ptr ), page_count );
551
552        done += page_count;
553    }
554
555#if DEBUG_MAPPER_MOVE_KERNEL
556cycle = (uint32_t)hal_get_cycles();
557if( DEBUG_MAPPER_MOVE_KERNEL < cycle )
558printk("\n[%s] thread[%x,%x] exit / to_buf %d / buf_cxy %x / buf_ptr %x / cycle %d\n",
559__FUNCTION__, this->process->pid, this->trdid, to_buffer, buffer_cxy, buffer_ptr, cycle );
560#endif
561
562    return 0;
563
564}  // end mapper_move_kernel()
565
566///////////////////////////////////////////////////
567error_t mapper_remote_get_32( xptr_t     mapper_xp,
568                              uint32_t   word_id,
569                              uint32_t * p_value )
570{
571    uint32_t   page_id;      // page index in file
572    uint32_t   local_id;     // word index in page
573    xptr_t     page_xp;      // extended pointer on searched page descriptor
574    xptr_t     base_xp;      // extended pointer on searched page base
575
576   
577    // get page index and local word index
578    page_id  = word_id >> 10;
579    local_id = word_id & 0x3FF;
580
581    // get page containing the searched word
582    page_xp  = mapper_remote_get_page( mapper_xp , page_id );
583
584    if( page_xp == XPTR_NULL )  return -1;
585   
586    // get page base
587    base_xp = ppm_page2base( page_xp );
588
589    // get the value from mapper
590    *p_value = hal_remote_l32( base_xp + (local_id<<2) ); 
591
592    return 0;
593
594}  // end mapper_remote_get_32()
595
596///////////////////////////////////////////////////
597error_t mapper_remote_set_32( xptr_t     mapper_xp,
598                              uint32_t   word_id,
599                              uint32_t   value )
600{
601   
602    uint32_t   page_id;      // page index in file
603    uint32_t   local_id;     // word index in page
604    xptr_t     page_xp;      // extended pointer on searched page descriptor
605    xptr_t     base_xp;      // extended pointer on searched page base
606
607    // get page index and local vord index
608    page_id  = word_id >> 10;
609    local_id = word_id & 0x3FF;
610
611    // get page containing the searched word
612    page_xp  = mapper_remote_get_page( mapper_xp , page_id );
613
614    if( page_xp == XPTR_NULL ) return -1;
615
616    // get page base
617    base_xp = ppm_page2base( page_xp );
618
619    // set value to mapper
620    hal_remote_s32( (base_xp + (local_id << 2)) , value );
621
622    // set the dirty flag
623    ppm_page_do_dirty( page_xp );
624
625    return 0;
626
627}  // end mapper_remote_set_32()
628
629
Note: See TracBrowser for help on using the repository browser.