source: trunk/kernel/mm/mapper.c @ 266

Last change on this file since 266 was 265, checked in by alain, 7 years ago

Fix several bugs in VFS.

File size: 11.7 KB
RevLine 
[1]1/*
2 * mapper.c - Map memory, file or device in process virtual address space.
3 *
4 * Authors   Mohamed Lamine Karaoui (2015)
5 *           Alain Greiner (2016)
6 *
7 * Copyright (c)  UPMC Sorbonne Universites
8 *
9 * This file is part of ALMOS-MKH.
10 *
11 * ALMOS-MKH is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; version 2.0 of the License.
14 *
15 * ALMOS-MKH is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with ALMOS-MKH; if not, write to the Free Software Foundation,
22 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
[14]25#include <kernel_config.h>
[1]26#include <hal_types.h>
27#include <hal_special.h>
[23]28#include <hal_uspace.h>
[1]29#include <grdxt.h>
30#include <rwlock.h>
31#include <printk.h>
32#include <thread.h>
33#include <core.h>
34#include <process.h>
35#include <kmem.h>
36#include <kcm.h>
37#include <page.h>
38#include <cluster.h>
39#include <vfs.h>
40#include <mapper.h>
41
[246]42//////////////////////////////////////////////
43mapper_t * mapper_create( vfs_fs_type_t type )
[1]44{
45    mapper_t * mapper;
46    kmem_req_t req;
47    error_t    error;
48
49    // allocate memory for associated mapper
[183]50    req.type  = KMEM_MAPPER;
51    req.size  = sizeof(mapper_t);
[1]52    req.flags = AF_KERNEL | AF_ZERO;
[183]53    mapper    = (mapper_t *)kmem_alloc( &req );
[1]54
55    if( mapper == NULL )
56    {
57        printk("\n[ERROR] in %s : no memory for mapper descriptor\n", __FUNCTION__ );
58        return NULL;
59    }
60
61    // initialize refcount & inode
[183]62    mapper->refcount = 0;
[1]63    mapper->inode    = NULL;
64
65    // initialize radix tree
[183]66    error = grdxt_init( &mapper->radix,
[1]67                        CONFIG_VMM_GRDXT_W1,
68                        CONFIG_VMM_GRDXT_W2,
69                        CONFIG_VMM_GRDXT_W3 );
70
71    if( error )
72    {
73        printk("\n[ERROR] in %s : cannot initialize radix tree\n", __FUNCTION__ );
[183]74        req.type  = KMEM_MAPPER;
[1]75        req.ptr   = mapper;
76        kmem_free( &req );
77        return NULL;
78    }
79
[246]80    // initialize mapper type
81    mapper->type = type;
82
[1]83    // initialize mapper lock
[183]84    rwlock_init(  &mapper->lock );
[1]85
86    // initialize waiting threads xlist (empty)
[183]87    xlist_root_init( XPTR( local_cxy , &mapper->wait_root ) );
[1]88
89    // initialize vsegs xlist (empty)
[183]90    xlist_root_init( XPTR( local_cxy , &mapper->vsegs_root ) );
[1]91
92    return mapper;
93
[204]94}  // end mapper_create()
95
[1]96///////////////////////////////////////////
97error_t mapper_destroy( mapper_t * mapper )
98{
99    page_t   * page;
100    uint32_t   found_index = 0;
101    uint32_t   start_index = 0;
102    kmem_req_t req;
103    error_t    error;
104
105    // scan radix three and release all registered pages to PPM
106    do
107    {
108        // get page from radix tree
109        page = (page_t *)grdxt_get_first( &mapper->radix , start_index , &found_index );
110
[18]111        if( page != NULL )
[1]112        {
113            // remove page from mapper and release to PPM
[183]114            error = mapper_release_page( mapper , page );
[1]115
116            if ( error ) return error;
117
118            // update start_key value for next page
119            start_index = found_index;
120        }
121    }
122    while( page != NULL );
123
124    // release the memory allocated to radix-tree itself
125    grdxt_destroy( &mapper->radix );
126
127    // release memory for mapper descriptor
128    req.type = KMEM_MAPPER;
129    req.ptr  = mapper;
130    kmem_free( &req );
131
132    return 0;
[18]133
[204]134}  // end mapper_destroy()
135
[1]136////////////////////////////////////////////
137page_t * mapper_get_page( mapper_t * mapper,
138                          uint32_t   index )
139{
[183]140    kmem_req_t    req;
141    page_t      * page;
142    error_t       error;
[1]143
[246]144    mapper_dmsg("\n[INFO] %s : enters for page %d in mapper %x\n",
[204]145                __FUNCTION__ , index , mapper );
146
[1]147    thread_t * this = CURRENT_THREAD;
148
149    // take mapper lock in READ_MODE
150    rwlock_rd_lock( &mapper->lock );
151
152    // search page in radix tree
153    page = (page_t *)grdxt_lookup( &mapper->radix , index );
154
[18]155    // test if page available in mapper
[183]156    if( ( page == NULL) || page_is_flag( page , PG_INLOAD ) )  // page not available
[1]157    {
[204]158
[1]159        // release the lock in READ_MODE and take it in WRITE_MODE
160        rwlock_rd_unlock( &mapper->lock );
161        rwlock_wr_lock( &mapper->lock );
162
163        // second test on missing page because the page status can have been modified
164        // by another thread, when passing from READ_MODE to WRITE_MODE.
165        // from this point there is no concurrent accesses to mapper.
166
167        page = grdxt_lookup( &mapper->radix , index );
168
[238]169        if ( page == NULL )   // missing page => create it and load it from file system
[1]170        {
[265]171            mapper_dmsg("\n[INFO] %s : missing page => load from device\n", __FUNCTION__ );
[204]172
[1]173            // allocate one page from PPM
174            req.type  = KMEM_PAGE;
175            req.size  = 0;
176            req.flags = AF_NONE;
177            page = kmem_alloc( &req );
[18]178
[1]179            if( page == NULL )
180            {
181                printk("\n[ERROR] in %s : thread %x cannot allocate a page in cluster %x\n",
182                       __FUNCTION__ , this->trdid , local_cxy );
183                rwlock_wr_unlock( &mapper->lock );
184                return NULL;
185            }
186
187            // initialize the page descriptor
188            page_init( page );
189            page_set_flag( page , PG_INIT );
190            page_set_flag( page , PG_INLOAD );
191            page_refcount_up( page );
192            page->mapper = mapper;
193            page->index  = index;
194
195            // insert page in mapper radix tree
196            error = grdxt_insert( &mapper->radix, index , page );
197
198            // release mapper lock from WRITE_MODE
199            rwlock_wr_unlock( &mapper->lock );
200
[18]201            if( error )
[1]202            {
203                printk("\n[ERROR] in %s : thread %x cannot insert page in mapper\n",
204                       __FUNCTION__ , this->trdid );
[23]205                mapper_release_page( mapper , page );
[1]206                page_clear_flag( page , PG_ALL );
207                req.ptr  = page;
208                req.type = KMEM_PAGE;
209                kmem_free(&req);
210                return NULL;
211            }
[18]212
[238]213            // update the mapper and index fields in page descriptor
214            // required by the vfs_move_page_to_mapper()
215            page->mapper = mapper;
216            page->index  = index;
217
[1]218            // launch I/O operation to load page from file system
[238]219            error = vfs_mapper_move_page( page , true );   // to mapper
[1]220
221            if( error )
222            {
223                printk("\n[ERROR] in %s : thread %x cannot load page from device\n",
224                       __FUNCTION__ , this->trdid );
[23]225                mapper_release_page( mapper , page );
[1]226                page_clear_flag( page , PG_ALL );
227                req.ptr  = page;
228                req.type = KMEM_PAGE;
229                kmem_free( &req );
230                return NULL;
231            }
232
233            // reset the page INLOAD flag to make the page available to all readers
234            page_clear_flag( page , PG_INLOAD );
235
236        }
237        else if( page_is_flag( page , PG_INLOAD ) )   // page is loaded by another thread
238        {
239            // release mapper lock from WRITE_MODE
240            rwlock_wr_unlock( &mapper->lock );
241
242            // deschedule to wait load completion
243            while( 1 )
244            {
245                // exit waiting loop when loaded
246                if(  page_is_flag( page , PG_INLOAD ) ) break;
247
[18]248                // deschedule
[1]249                sched_yield();
250            }
251        }
252    }
[204]253    else                          // page available in mapper
[1]254    {
255
[204]256        rwlock_rd_unlock( &mapper->lock );
[1]257    }
258
[246]259    mapper_dmsg("\n[INFO] %s : exit for page %d in mapper %x / page_desc = %x\n",
260                __FUNCTION__ , index , mapper , page );
[204]261
262    return page;
263
264}  // end mapper_get_page()
265
[1]266///////////////////////////////////////////////
267error_t mapper_release_page( mapper_t * mapper,
268                             page_t   * page )
269{
270    error_t error;
271
272    // lauch IO operation to update page to file system
[238]273    error = vfs_mapper_move_page( page , false );    // from mapper
[1]274
275    if( error )
276    {
277        printk("\n[ERROR] in %s : cannot update file system\n", __FUNCTION__ );
278        return EIO;
279    }
[18]280
[1]281    // take mapper lock in WRITE_MODE
282    rwlock_wr_lock( &mapper->lock );
283
284    // remove physical page from radix tree
[183]285    grdxt_remove( &mapper->radix , page->index );
[1]286
287    // release mapper lock from WRITE_MODE
288    rwlock_wr_unlock( &mapper->lock );
289
290    // release page to PPM
[183]291    kmem_req_t   req;
292    req.type  = KMEM_PAGE;
[1]293    req.ptr   = page;
294    kmem_free( &req );
295
296    return 0;
297
[204]298}  // end mapper_release_page()
299
[265]300////////////////////////////////////////////////
301error_t mapper_move_buffer( mapper_t  *  mapper,
302                            bool_t       to_buffer,
303                            bool_t       is_user,
304                            uint32_t     file_offset,
305                            void      *  buffer,
306                            uint32_t     size )
[1]307{
[23]308    uint32_t   page_offset;    // first byte to move to/from a mapper page
309    uint32_t   page_count;     // number of bytes to move to/from a mapper page
310    uint32_t   index;          // current mapper page index
311    uint32_t   done;           // number of moved bytes
312    page_t   * page;           // current mapper page descriptor
313    uint8_t  * map_ptr;        // current mapper  address
314    uint8_t  * buf_ptr;        // current buffer  address
[204]315 
[265]316    mapper_dmsg("\n[INFO] %s : enters / to_buf = %d / buffer = %x\n",
[204]317                __FUNCTION__ , to_buffer , buffer );
[1]318
[23]319    // compute offsets of first and last bytes in file
320    uint32_t min_byte = file_offset;
321    uint32_t max_byte = file_offset + size -1;
[1]322
[23]323    // compute indexes of pages for first and last byte in mapper
324    uint32_t first = min_byte >> CONFIG_PPM_PAGE_SHIFT;
325    uint32_t last  = max_byte >> CONFIG_PPM_PAGE_SHIFT;
[1]326
[23]327    done = 0;
[1]328
[23]329    // loop on pages in mapper
330    for( index = first ; index <= last ; index++ )
[1]331    {
[183]332        // compute page_offset
[23]333        if( index == first ) page_offset = min_byte & CONFIG_PPM_PAGE_MASK;
334        else                 page_offset = 0;
[1]335
[183]336        // compute page_count
[23]337        if      ( first == last  ) page_count = size;
338        else if ( index == first ) page_count = CONFIG_PPM_PAGE_SIZE - page_offset;
339        else if ( index == last  ) page_count = (max_byte & CONFIG_PPM_PAGE_MASK) + 1;
340        else                       page_count = CONFIG_PPM_PAGE_SIZE;
[1]341
[265]342        mapper_dmsg("\n[INFO] %s : index = %d / offset = %d / count = %d\n",
343                    __FUNCTION__ , index , page_offset , page_count );
344
[23]345        // get page descriptor
346        page = mapper_get_page( mapper , index );
[1]347
348        if ( page == NULL ) return EINVAL;
349
[23]350        // compute pointer in mapper
[53]351        map_ptr = (uint8_t *)ppm_page2vaddr( page ) + page_offset;
[1]352
[23]353        // compute pointer in buffer
354        buf_ptr = (uint8_t *)buffer + done;
[1]355
[265]356        mapper_dmsg("\n[INFO] %s : index = %d / buf_ptr = %x / map_ptr = %x\n",
357                    __FUNCTION__ , index , buf_ptr , map_ptr );
358
[1]359        // move fragment
360        if( to_buffer )
361        {
[265]362            if( is_user ) hal_copy_to_uspace( buf_ptr , map_ptr , page_count );
363            else          memcpy( buf_ptr , map_ptr , page_count );
[1]364        }
[265]365        else                 
[1]366        {
367            page_do_dirty( page );
[265]368            if( is_user ) hal_copy_from_uspace( map_ptr , buf_ptr , page_count );
369            else          memcpy( map_ptr , buf_ptr , page_count );
[1]370        }
371
[23]372        done += page_count;
[1]373    }
374
[204]375    mapper_dmsg("\n[INFO] %s : exit for buffer %x\n",
376                __FUNCTION__, buffer );
377
[1]378    return 0;
379
[265]380}  // end mapper_move_buffer()
[204]381
Note: See TracBrowser for help on using the repository browser.