source: trunk/kernel/mm/mapper.c @ 421

Last change on this file since 421 was 408, checked in by alain, 7 years ago

Fix several bugs in the fork() syscall.

File size: 15.1 KB
RevLine 
[1]1/*
2 * mapper.c - Map memory, file or device in process virtual address space.
3 *
4 * Authors   Mohamed Lamine Karaoui (2015)
5 *           Alain Greiner (2016)
6 *
7 * Copyright (c)  UPMC Sorbonne Universites
8 *
9 * This file is part of ALMOS-MKH.
10 *
11 * ALMOS-MKH is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; version 2.0 of the License.
14 *
15 * ALMOS-MKH is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with ALMOS-MKH; if not, write to the Free Software Foundation,
22 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 */
24
[14]25#include <kernel_config.h>
[1]26#include <hal_types.h>
27#include <hal_special.h>
[23]28#include <hal_uspace.h>
[1]29#include <grdxt.h>
30#include <rwlock.h>
31#include <printk.h>
[279]32#include <memcpy.h>
[1]33#include <thread.h>
34#include <core.h>
35#include <process.h>
36#include <kmem.h>
37#include <kcm.h>
38#include <page.h>
39#include <cluster.h>
40#include <vfs.h>
41#include <mapper.h>
42
[246]43//////////////////////////////////////////////
44mapper_t * mapper_create( vfs_fs_type_t type )
[1]45{
46    mapper_t * mapper;
47    kmem_req_t req;
48    error_t    error;
49
50    // allocate memory for associated mapper
[183]51    req.type  = KMEM_MAPPER;
52    req.size  = sizeof(mapper_t);
[1]53    req.flags = AF_KERNEL | AF_ZERO;
[183]54    mapper    = (mapper_t *)kmem_alloc( &req );
[1]55
56    if( mapper == NULL )
57    {
58        printk("\n[ERROR] in %s : no memory for mapper descriptor\n", __FUNCTION__ );
59        return NULL;
60    }
61
62    // initialize refcount & inode
[183]63    mapper->refcount = 0;
[1]64    mapper->inode    = NULL;
65
66    // initialize radix tree
[183]67    error = grdxt_init( &mapper->radix,
[1]68                        CONFIG_VMM_GRDXT_W1,
69                        CONFIG_VMM_GRDXT_W2,
70                        CONFIG_VMM_GRDXT_W3 );
71
72    if( error )
73    {
74        printk("\n[ERROR] in %s : cannot initialize radix tree\n", __FUNCTION__ );
[183]75        req.type  = KMEM_MAPPER;
[1]76        req.ptr   = mapper;
77        kmem_free( &req );
78        return NULL;
79    }
80
[246]81    // initialize mapper type
82    mapper->type = type;
83
[1]84    // initialize mapper lock
[183]85    rwlock_init(  &mapper->lock );
[1]86
87    // initialize waiting threads xlist (empty)
[183]88    xlist_root_init( XPTR( local_cxy , &mapper->wait_root ) );
[1]89
90    // initialize vsegs xlist (empty)
[183]91    xlist_root_init( XPTR( local_cxy , &mapper->vsegs_root ) );
[1]92
93    return mapper;
94
[204]95}  // end mapper_create()
96
[1]97///////////////////////////////////////////
98error_t mapper_destroy( mapper_t * mapper )
99{
100    page_t   * page;
101    uint32_t   found_index = 0;
102    uint32_t   start_index = 0;
103    kmem_req_t req;
104    error_t    error;
105
106    // scan radix three and release all registered pages to PPM
107    do
108    {
109        // get page from radix tree
110        page = (page_t *)grdxt_get_first( &mapper->radix , start_index , &found_index );
111
[18]112        if( page != NULL )
[1]113        {
114            // remove page from mapper and release to PPM
[183]115            error = mapper_release_page( mapper , page );
[1]116
117            if ( error ) return error;
118
119            // update start_key value for next page
120            start_index = found_index;
121        }
122    }
123    while( page != NULL );
124
125    // release the memory allocated to radix-tree itself
126    grdxt_destroy( &mapper->radix );
127
128    // release memory for mapper descriptor
129    req.type = KMEM_MAPPER;
130    req.ptr  = mapper;
131    kmem_free( &req );
132
133    return 0;
[18]134
[204]135}  // end mapper_destroy()
136
[1]137////////////////////////////////////////////
138page_t * mapper_get_page( mapper_t * mapper,
139                          uint32_t   index )
140{
[183]141    kmem_req_t    req;
142    page_t      * page;
143    error_t       error;
[1]144
[407]145mapper_dmsg("\n[DBG] %s : core[%x,%d] enters for page %d / mapper %x\n",
146__FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , index , mapper );
[204]147
[1]148    thread_t * this = CURRENT_THREAD;
149
150    // take mapper lock in READ_MODE
151    rwlock_rd_lock( &mapper->lock );
152
153    // search page in radix tree
154    page = (page_t *)grdxt_lookup( &mapper->radix , index );
155
[18]156    // test if page available in mapper
[183]157    if( ( page == NULL) || page_is_flag( page , PG_INLOAD ) )  // page not available
[1]158    {
[204]159
[1]160        // release the lock in READ_MODE and take it in WRITE_MODE
161        rwlock_rd_unlock( &mapper->lock );
162        rwlock_wr_lock( &mapper->lock );
163
164        // second test on missing page because the page status can have been modified
165        // by another thread, when passing from READ_MODE to WRITE_MODE.
166        // from this point there is no concurrent accesses to mapper.
167
168        page = grdxt_lookup( &mapper->radix , index );
169
[238]170        if ( page == NULL )   // missing page => create it and load it from file system
[1]171        {
[204]172
[407]173mapper_dmsg("\n[DBG] %s : core[%x,%d] missing page => load from device\n",
174__FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid );
175
[1]176            // allocate one page from PPM
177            req.type  = KMEM_PAGE;
178            req.size  = 0;
179            req.flags = AF_NONE;
180            page = kmem_alloc( &req );
[18]181
[1]182            if( page == NULL )
183            {
184                printk("\n[ERROR] in %s : thread %x cannot allocate a page in cluster %x\n",
185                       __FUNCTION__ , this->trdid , local_cxy );
186                rwlock_wr_unlock( &mapper->lock );
187                return NULL;
188            }
189
190            // initialize the page descriptor
191            page_init( page );
[407]192            page_set_flag( page , PG_INIT | PG_INLOAD );
[1]193            page_refcount_up( page );
194            page->mapper = mapper;
195            page->index  = index;
196
197            // insert page in mapper radix tree
198            error = grdxt_insert( &mapper->radix, index , page );
199
200            // release mapper lock from WRITE_MODE
201            rwlock_wr_unlock( &mapper->lock );
202
[18]203            if( error )
[1]204            {
205                printk("\n[ERROR] in %s : thread %x cannot insert page in mapper\n",
206                       __FUNCTION__ , this->trdid );
[23]207                mapper_release_page( mapper , page );
[1]208                page_clear_flag( page , PG_ALL );
209                req.ptr  = page;
210                req.type = KMEM_PAGE;
211                kmem_free(&req);
212                return NULL;
213            }
[18]214
[1]215            // launch I/O operation to load page from file system
[367]216            error = vfs_mapper_move_page( page, 
217                                          true );   // to mapper
[1]218            if( error )
219            {
220                printk("\n[ERROR] in %s : thread %x cannot load page from device\n",
221                       __FUNCTION__ , this->trdid );
[23]222                mapper_release_page( mapper , page );
[1]223                page_clear_flag( page , PG_ALL );
224                req.ptr  = page;
225                req.type = KMEM_PAGE;
226                kmem_free( &req );
227                return NULL;
228            }
229
230            // reset the page INLOAD flag to make the page available to all readers
231            page_clear_flag( page , PG_INLOAD );
232
[407]233mapper_dmsg("\n[DBG] %s : missing page loaded / ppn = %x\n",
234__FUNCTION__ , ppm_page2ppn(XPTR(local_cxy,page)) );
235
[1]236        }
237        else if( page_is_flag( page , PG_INLOAD ) )   // page is loaded by another thread
238        {
239            // release mapper lock from WRITE_MODE
240            rwlock_wr_unlock( &mapper->lock );
241
[408]242            // wait load completion
[1]243            while( 1 )
244            {
245                // exit waiting loop when loaded
[408]246                if( page_is_flag( page , PG_INLOAD ) == false ) break;
[1]247
[18]248                // deschedule
[408]249                sched_yield("waiting page loading");
[1]250            }
251        }
252    }
[204]253    else                          // page available in mapper
[1]254    {
[204]255        rwlock_rd_unlock( &mapper->lock );
[1]256    }
257
[407]258mapper_dmsg("\n[DBG] %s : exit for page %d / mapper %x / page_desc = %x\n",
259__FUNCTION__ , index , mapper , page );
[204]260
261    return page;
262
263}  // end mapper_get_page()
264
[1]265///////////////////////////////////////////////
266error_t mapper_release_page( mapper_t * mapper,
267                             page_t   * page )
268{
269    error_t error;
270
271    // lauch IO operation to update page to file system
[238]272    error = vfs_mapper_move_page( page , false );    // from mapper
[1]273
274    if( error )
275    {
276        printk("\n[ERROR] in %s : cannot update file system\n", __FUNCTION__ );
277        return EIO;
278    }
[18]279
[1]280    // take mapper lock in WRITE_MODE
281    rwlock_wr_lock( &mapper->lock );
282
283    // remove physical page from radix tree
[183]284    grdxt_remove( &mapper->radix , page->index );
[1]285
286    // release mapper lock from WRITE_MODE
287    rwlock_wr_unlock( &mapper->lock );
288
289    // release page to PPM
[183]290    kmem_req_t   req;
291    req.type  = KMEM_PAGE;
[1]292    req.ptr   = page;
293    kmem_free( &req );
294
295    return 0;
296
[204]297}  // end mapper_release_page()
298
[313]299///////////////////////////////////////////////////
300error_t mapper_move_user( mapper_t * mapper,
301                          bool_t     to_buffer,
302                          uint32_t   file_offset,
303                          void     * buffer,
304                          uint32_t   size )
[1]305{
[23]306    uint32_t   page_offset;    // first byte to move to/from a mapper page
307    uint32_t   page_count;     // number of bytes to move to/from a mapper page
308    uint32_t   index;          // current mapper page index
309    uint32_t   done;           // number of moved bytes
310    page_t   * page;           // current mapper page descriptor
311    uint8_t  * map_ptr;        // current mapper  address
312    uint8_t  * buf_ptr;        // current buffer  address
[330]313
[407]314    mapper_dmsg("\n[DBG] %s : enters / to_buf = %d / buffer = %x\n",
[204]315                __FUNCTION__ , to_buffer , buffer );
[1]316
[23]317    // compute offsets of first and last bytes in file
318    uint32_t min_byte = file_offset;
319    uint32_t max_byte = file_offset + size -1;
[1]320
[23]321    // compute indexes of pages for first and last byte in mapper
322    uint32_t first = min_byte >> CONFIG_PPM_PAGE_SHIFT;
323    uint32_t last  = max_byte >> CONFIG_PPM_PAGE_SHIFT;
[1]324
[23]325    done = 0;
[1]326
[23]327    // loop on pages in mapper
328    for( index = first ; index <= last ; index++ )
[1]329    {
[183]330        // compute page_offset
[23]331        if( index == first ) page_offset = min_byte & CONFIG_PPM_PAGE_MASK;
332        else                 page_offset = 0;
[1]333
[313]334        // compute number of bytes in page
[23]335        if      ( first == last  ) page_count = size;
336        else if ( index == first ) page_count = CONFIG_PPM_PAGE_SIZE - page_offset;
337        else if ( index == last  ) page_count = (max_byte & CONFIG_PPM_PAGE_MASK) + 1;
338        else                       page_count = CONFIG_PPM_PAGE_SIZE;
[1]339
[407]340        mapper_dmsg("\n[DBG] %s : index = %d / offset = %d / count = %d\n",
[265]341                    __FUNCTION__ , index , page_offset , page_count );
342
[23]343        // get page descriptor
344        page = mapper_get_page( mapper , index );
[1]345
346        if ( page == NULL ) return EINVAL;
347
[23]348        // compute pointer in mapper
[315]349        xptr_t base_xp = ppm_page2base( XPTR( local_cxy, page ) );
350        map_ptr = (uint8_t *)GET_PTR( base_xp ) + page_offset;
[1]351
[23]352        // compute pointer in buffer
353        buf_ptr = (uint8_t *)buffer + done;
[1]354
[407]355        mapper_dmsg("\n[DBG] %s : index = %d / buf_ptr = %x / map_ptr = %x\n",
[265]356                    __FUNCTION__ , index , buf_ptr , map_ptr );
357
[1]358        // move fragment
[330]359        if( to_buffer )
[1]360        {
[313]361            hal_copy_to_uspace( buf_ptr , map_ptr , page_count );
[1]362        }
[330]363        else
[1]364        {
365            page_do_dirty( page );
[313]366            hal_copy_from_uspace( map_ptr , buf_ptr , page_count );
[1]367        }
368
[23]369        done += page_count;
[1]370    }
371
[407]372    mapper_dmsg("\n[DBG] %s : exit for buffer %x\n",
[204]373                __FUNCTION__, buffer );
374
[1]375    return 0;
376
[313]377}  // end mapper_move_user()
[204]378
[313]379////////////////////////////////////////////////
380error_t mapper_move_kernel( mapper_t  *  mapper,
381                            bool_t       to_buffer,
382                            uint32_t     file_offset,
383                            xptr_t       buffer_xp,
384                            uint32_t     size )
385{
386    uint32_t   page_offset;    // first byte to move to/from a mapper page
387    uint32_t   page_count;     // number of bytes to move to/from a mapper page
388    uint32_t   index;          // current mapper page index
389    uint32_t   done;           // number of moved bytes
390    page_t   * page;           // current mapper page descriptor
391
392    uint8_t  * src_ptr;        // source buffer local pointer
393    cxy_t      src_cxy;        // source cluster
394    uint8_t  * dst_ptr;        // destination buffer local pointer
395    cxy_t      dst_cxy;        // destination cluster
[330]396
[406]397    // get buffer cluster and local pointer
398    cxy_t     buffer_cxy = GET_CXY( buffer_xp );
399    uint8_t * buffer_ptr = (uint8_t *)GET_PTR( buffer_xp );
[313]400
[407]401mapper_dmsg("\n[DBG] %s : core[%x,%d] / to_buf = %d / buf_cxy = %x / buf_ptr = %x / size = %x\n",
402__FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, to_buffer, buffer_cxy, buffer_ptr, size );
[406]403
[313]404    // compute offsets of first and last bytes in file
405    uint32_t min_byte = file_offset;
406    uint32_t max_byte = file_offset + size -1;
407
408    // compute indexes for first and last pages in mapper
409    uint32_t first = min_byte >> CONFIG_PPM_PAGE_SHIFT;
410    uint32_t last  = max_byte >> CONFIG_PPM_PAGE_SHIFT;
411
[407]412mapper_dmsg("\n[DBG] %s : core[%x,%d] / first_page = %d / last_page = %d\n",
413__FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, first, last );
[313]414
415    // compute source and destination clusters
416    if( to_buffer )
417    {
418        dst_cxy = buffer_cxy;
419        src_cxy = local_cxy;
420    }
421    else
422    {
423        src_cxy = buffer_cxy;
424        dst_cxy = local_cxy;
425    }
426
427    done = 0;
428
429    // loop on pages in mapper
430    for( index = first ; index <= last ; index++ )
431    {
432        // compute page_offset
433        if( index == first ) page_offset = min_byte & CONFIG_PPM_PAGE_MASK;
434        else                 page_offset = 0;
435
436        // compute number of bytes to move in page
437        if      ( first == last  ) page_count = size;
438        else if ( index == first ) page_count = CONFIG_PPM_PAGE_SIZE - page_offset;
439        else if ( index == last  ) page_count = (max_byte & CONFIG_PPM_PAGE_MASK) + 1;
440        else                       page_count = CONFIG_PPM_PAGE_SIZE;
441
[407]442mapper_dmsg("\n[DBG] %s : core[%x;%d] / page_index = %d / offset = %d / bytes = %d\n",
443__FUNCTION__ , local_cxy, CURRENT_THREAD->core->lid, index, page_offset, page_count );
[313]444
445        // get page descriptor
446        page = mapper_get_page( mapper , index );
447
448        if ( page == NULL ) return EINVAL;
449
[315]450        // get page base address
[367]451        xptr_t    base_xp  = ppm_page2base( XPTR( local_cxy , page ) );
452        uint8_t * base_ptr = (uint8_t *)GET_PTR( base_xp );
[330]453
[313]454        // compute source and destination pointers
455        if( to_buffer )
456        {
[315]457            dst_ptr = buffer_ptr + done;
[367]458            src_ptr = base_ptr + page_offset;
[313]459        }
460        else
461        {
[315]462            src_ptr = buffer_ptr + done;
[367]463            dst_ptr = base_ptr + page_offset;
[313]464
465            page_do_dirty( page );
466        }
467
468        // move fragment
469        hal_remote_memcpy( XPTR( dst_cxy , dst_ptr ), XPTR( src_cxy , src_ptr ), page_count );
[330]470
[313]471        done += page_count;
472    }
473
[407]474mapper_dmsg("\n[DBG] %s : core_cxy[%x,%d] / exit / buf_cxy = %x / buf_ptr = %x / size = %x\n",
475__FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, buffer_cxy, buffer_ptr, size );
[313]476
477    return 0;
478
[406]479}  // end mapper_move_kernel()
[313]480
Note: See TracBrowser for help on using the repository browser.