1 | /* |
---|
2 | * mapper.c - Map memory, file or device in process virtual address space. |
---|
3 | * |
---|
4 | * Authors Mohamed Lamine Karaoui (2015) |
---|
5 | * Alain Greiner (2016,2017,2018) |
---|
6 | * |
---|
7 | * Copyright (c) UPMC Sorbonne Universites |
---|
8 | * |
---|
9 | * This file is part of ALMOS-MKH. |
---|
10 | * |
---|
11 | * ALMOS-MKH is free software; you can redistribute it and/or modify it |
---|
12 | * under the terms of the GNU General Public License as published by |
---|
13 | * the Free Software Foundation; version 2.0 of the License. |
---|
14 | * |
---|
15 | * ALMOS-MKH is distributed in the hope that it will be useful, but |
---|
16 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
---|
18 | * General Public License for more details. |
---|
19 | * |
---|
20 | * You should have received a copy of the GNU General Public License |
---|
21 | * along with ALMOS-MKH; if not, write to the Free Software Foundation, |
---|
22 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
---|
23 | */ |
---|
24 | |
---|
25 | #include <kernel_config.h> |
---|
26 | #include <hal_kernel_types.h> |
---|
27 | #include <hal_special.h> |
---|
28 | #include <hal_uspace.h> |
---|
29 | #include <grdxt.h> |
---|
30 | #include <rwlock.h> |
---|
31 | #include <printk.h> |
---|
32 | #include <memcpy.h> |
---|
33 | #include <thread.h> |
---|
34 | #include <core.h> |
---|
35 | #include <process.h> |
---|
36 | #include <kmem.h> |
---|
37 | #include <kcm.h> |
---|
38 | #include <ppm.h> |
---|
39 | #include <page.h> |
---|
40 | #include <cluster.h> |
---|
41 | #include <vfs.h> |
---|
42 | #include <mapper.h> |
---|
43 | |
---|
44 | |
---|
45 | ////////////////////////////////////////////// |
---|
46 | mapper_t * mapper_create( vfs_fs_type_t type ) |
---|
47 | { |
---|
48 | mapper_t * mapper; |
---|
49 | kmem_req_t req; |
---|
50 | error_t error; |
---|
51 | |
---|
52 | // allocate memory for associated mapper |
---|
53 | req.type = KMEM_MAPPER; |
---|
54 | req.size = sizeof(mapper_t); |
---|
55 | req.flags = AF_KERNEL | AF_ZERO; |
---|
56 | mapper = (mapper_t *)kmem_alloc( &req ); |
---|
57 | |
---|
58 | if( mapper == NULL ) |
---|
59 | { |
---|
60 | printk("\n[ERROR] in %s : no memory for mapper descriptor\n", __FUNCTION__ ); |
---|
61 | return NULL; |
---|
62 | } |
---|
63 | |
---|
64 | // initialize refcount & inode |
---|
65 | mapper->refcount = 0; |
---|
66 | mapper->inode = NULL; |
---|
67 | |
---|
68 | // initialize radix tree |
---|
69 | error = grdxt_init( &mapper->radix, |
---|
70 | CONFIG_VMM_GRDXT_W1, |
---|
71 | CONFIG_VMM_GRDXT_W2, |
---|
72 | CONFIG_VMM_GRDXT_W3 ); |
---|
73 | |
---|
74 | if( error ) |
---|
75 | { |
---|
76 | printk("\n[ERROR] in %s : cannot initialize radix tree\n", __FUNCTION__ ); |
---|
77 | req.type = KMEM_MAPPER; |
---|
78 | req.ptr = mapper; |
---|
79 | kmem_free( &req ); |
---|
80 | return NULL; |
---|
81 | } |
---|
82 | |
---|
83 | // initialize mapper type |
---|
84 | mapper->type = type; |
---|
85 | |
---|
86 | // initialize mapper lock |
---|
87 | rwlock_init( &mapper->lock , LOCK_MAPPER_STATE ); |
---|
88 | |
---|
89 | // initialize waiting threads xlist (empty) |
---|
90 | xlist_root_init( XPTR( local_cxy , &mapper->wait_root ) ); |
---|
91 | |
---|
92 | // initialize vsegs xlist (empty) |
---|
93 | xlist_root_init( XPTR( local_cxy , &mapper->vsegs_root ) ); |
---|
94 | |
---|
95 | return mapper; |
---|
96 | |
---|
97 | } // end mapper_create() |
---|
98 | |
---|
99 | /////////////////////////////////////////// |
---|
100 | error_t mapper_destroy( mapper_t * mapper ) |
---|
101 | { |
---|
102 | page_t * page; |
---|
103 | uint32_t found_index = 0; |
---|
104 | uint32_t start_index = 0; |
---|
105 | kmem_req_t req; |
---|
106 | error_t error; |
---|
107 | |
---|
108 | // scan radix three and release all registered pages to PPM |
---|
109 | do |
---|
110 | { |
---|
111 | // get page from radix tree |
---|
112 | page = (page_t *)grdxt_get_first( &mapper->radix , start_index , &found_index ); |
---|
113 | |
---|
114 | if( page != NULL ) |
---|
115 | { |
---|
116 | // remove page from mapper and release to PPM |
---|
117 | error = mapper_release_page( mapper , page ); |
---|
118 | |
---|
119 | if ( error ) return error; |
---|
120 | |
---|
121 | // update start_key value for next page |
---|
122 | start_index = found_index; |
---|
123 | } |
---|
124 | } |
---|
125 | while( page != NULL ); |
---|
126 | |
---|
127 | // release the memory allocated to radix-tree itself |
---|
128 | grdxt_destroy( &mapper->radix ); |
---|
129 | |
---|
130 | // release memory for mapper descriptor |
---|
131 | req.type = KMEM_MAPPER; |
---|
132 | req.ptr = mapper; |
---|
133 | kmem_free( &req ); |
---|
134 | |
---|
135 | return 0; |
---|
136 | |
---|
137 | } // end mapper_destroy() |
---|
138 | |
---|
139 | //////////////////////////////////////////// |
---|
140 | page_t * mapper_get_page( mapper_t * mapper, |
---|
141 | uint32_t index ) |
---|
142 | { |
---|
143 | kmem_req_t req; |
---|
144 | page_t * page; |
---|
145 | error_t error; |
---|
146 | |
---|
147 | #if DEBUG_MAPPER_GET_PAGE |
---|
148 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
149 | if( DEBUG_MAPPER_GET_PAGE < cycle ) |
---|
150 | printk("\n[DBG] %s : thread %x enter for page %d / mapper %x / cycle %d\n", |
---|
151 | __FUNCTION__ , CURRENT_THREAD , index , mapper , cycle ); |
---|
152 | #endif |
---|
153 | |
---|
154 | thread_t * this = CURRENT_THREAD; |
---|
155 | |
---|
156 | // take mapper lock in READ_MODE |
---|
157 | rwlock_rd_acquire( &mapper->lock ); |
---|
158 | |
---|
159 | // search page in radix tree |
---|
160 | page = (page_t *)grdxt_lookup( &mapper->radix , index ); |
---|
161 | |
---|
162 | // test if page available in mapper |
---|
163 | if( ( page == NULL) || page_is_flag( page , PG_INLOAD ) ) // page not available |
---|
164 | { |
---|
165 | |
---|
166 | // release the lock in READ_MODE and take it in WRITE_MODE |
---|
167 | rwlock_rd_release( &mapper->lock ); |
---|
168 | rwlock_wr_acquire( &mapper->lock ); |
---|
169 | |
---|
170 | // second test on missing page because the page status can have been modified |
---|
171 | // by another thread, when passing from READ_MODE to WRITE_MODE. |
---|
172 | // from this point there is no concurrent accesses to mapper. |
---|
173 | |
---|
174 | page = grdxt_lookup( &mapper->radix , index ); |
---|
175 | |
---|
176 | if ( page == NULL ) // missing page => create it and load it from file system |
---|
177 | { |
---|
178 | |
---|
179 | #if (DEBUG_MAPPER_GET_PAGE & 1) |
---|
180 | if( DEBUG_MAPPER_GET_PAGE < cycle ) |
---|
181 | printk("\n[DBG] %s : missing page => load from device\n", __FUNCTION__ ); |
---|
182 | #endif |
---|
183 | // allocate one page from PPM |
---|
184 | req.type = KMEM_PAGE; |
---|
185 | req.size = 0; |
---|
186 | req.flags = AF_NONE; |
---|
187 | page = kmem_alloc( &req ); |
---|
188 | |
---|
189 | if( page == NULL ) |
---|
190 | { |
---|
191 | printk("\n[ERROR] in %s : thread %x cannot allocate a page in cluster %x\n", |
---|
192 | __FUNCTION__ , this->trdid , local_cxy ); |
---|
193 | rwlock_wr_release( &mapper->lock ); |
---|
194 | return NULL; |
---|
195 | } |
---|
196 | |
---|
197 | // initialize the page descriptor |
---|
198 | page_init( page ); |
---|
199 | page_set_flag( page , PG_INIT | PG_INLOAD ); |
---|
200 | page_refcount_up( page ); |
---|
201 | page->mapper = mapper; |
---|
202 | page->index = index; |
---|
203 | |
---|
204 | // insert page in mapper radix tree |
---|
205 | error = grdxt_insert( &mapper->radix, index , page ); |
---|
206 | |
---|
207 | // release mapper lock from WRITE_MODE |
---|
208 | rwlock_wr_release( &mapper->lock ); |
---|
209 | |
---|
210 | if( error ) |
---|
211 | { |
---|
212 | printk("\n[ERROR] in %s : thread %x cannot insert page in mapper\n", |
---|
213 | __FUNCTION__ , this->trdid ); |
---|
214 | mapper_release_page( mapper , page ); |
---|
215 | page_clear_flag( page , PG_ALL ); |
---|
216 | req.ptr = page; |
---|
217 | req.type = KMEM_PAGE; |
---|
218 | kmem_free(&req); |
---|
219 | return NULL; |
---|
220 | } |
---|
221 | |
---|
222 | // launch I/O operation to load page from file system |
---|
223 | error = vfs_mapper_move_page( page, |
---|
224 | true ); // to mapper |
---|
225 | if( error ) |
---|
226 | { |
---|
227 | printk("\n[ERROR] in %s : thread %x cannot load page from device\n", |
---|
228 | __FUNCTION__ , this->trdid ); |
---|
229 | mapper_release_page( mapper , page ); |
---|
230 | page_clear_flag( page , PG_ALL ); |
---|
231 | req.ptr = page; |
---|
232 | req.type = KMEM_PAGE; |
---|
233 | kmem_free( &req ); |
---|
234 | return NULL; |
---|
235 | } |
---|
236 | |
---|
237 | // reset the page INLOAD flag to make the page available to all readers |
---|
238 | page_clear_flag( page , PG_INLOAD ); |
---|
239 | } |
---|
240 | else if( page_is_flag( page , PG_INLOAD ) ) // page is loaded by another thread |
---|
241 | { |
---|
242 | // release mapper lock from WRITE_MODE |
---|
243 | rwlock_wr_release( &mapper->lock ); |
---|
244 | |
---|
245 | // wait load completion |
---|
246 | while( page_is_flag( page , PG_INLOAD ) == false ) |
---|
247 | { |
---|
248 | // deschedule without blocking |
---|
249 | sched_yield("waiting page loading"); |
---|
250 | } |
---|
251 | } |
---|
252 | } |
---|
253 | else // page available in mapper |
---|
254 | { |
---|
255 | rwlock_rd_release( &mapper->lock ); |
---|
256 | } |
---|
257 | |
---|
258 | #if DEBUG_MAPPER_GET_PAGE |
---|
259 | cycle = (uint32_t)hal_get_cycles(); |
---|
260 | if( DEBUG_MAPPER_GET_PAGE < cycle ) |
---|
261 | printk("\n[DBG] %s : thread %x exit for page %d / ppn %x / cycle %d\n", |
---|
262 | __FUNCTION__, CURRENT_THREAD, index, ppm_page2ppn(XPTR(local_cxy, page)), cycle ); |
---|
263 | #endif |
---|
264 | |
---|
265 | return page; |
---|
266 | |
---|
267 | } // end mapper_get_page() |
---|
268 | |
---|
269 | /////////////////////////////////////////////// |
---|
270 | error_t mapper_release_page( mapper_t * mapper, |
---|
271 | page_t * page ) |
---|
272 | { |
---|
273 | error_t error; |
---|
274 | |
---|
275 | // lauch IO operation to update page to file system |
---|
276 | error = vfs_mapper_move_page( page , false ); // from mapper |
---|
277 | |
---|
278 | if( error ) |
---|
279 | { |
---|
280 | printk("\n[ERROR] in %s : cannot update file system\n", __FUNCTION__ ); |
---|
281 | return EIO; |
---|
282 | } |
---|
283 | |
---|
284 | // take mapper lock in WRITE_MODE |
---|
285 | rwlock_wr_acquire( &mapper->lock ); |
---|
286 | |
---|
287 | // remove physical page from radix tree |
---|
288 | grdxt_remove( &mapper->radix , page->index ); |
---|
289 | |
---|
290 | // release mapper lock from WRITE_MODE |
---|
291 | rwlock_wr_release( &mapper->lock ); |
---|
292 | |
---|
293 | // release page to PPM |
---|
294 | kmem_req_t req; |
---|
295 | req.type = KMEM_PAGE; |
---|
296 | req.ptr = page; |
---|
297 | kmem_free( &req ); |
---|
298 | |
---|
299 | return 0; |
---|
300 | |
---|
301 | } // end mapper_release_page() |
---|
302 | |
---|
303 | /////////////////////////////////////////////////// |
---|
304 | error_t mapper_move_user( mapper_t * mapper, |
---|
305 | bool_t to_buffer, |
---|
306 | uint32_t file_offset, |
---|
307 | void * buffer, |
---|
308 | uint32_t size ) |
---|
309 | { |
---|
310 | uint32_t page_offset; // first byte to move to/from a mapper page |
---|
311 | uint32_t page_count; // number of bytes to move to/from a mapper page |
---|
312 | uint32_t index; // current mapper page index |
---|
313 | uint32_t done; // number of moved bytes |
---|
314 | page_t * page; // current mapper page descriptor |
---|
315 | uint8_t * map_ptr; // current mapper address |
---|
316 | uint8_t * buf_ptr; // current buffer address |
---|
317 | |
---|
318 | #if DEBUG_MAPPER_MOVE_USER |
---|
319 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
320 | if( DEBUG_MAPPER_MOVE_USER < cycle ) |
---|
321 | printk("\n[DBG] %s : thread %x enter / to_buf %d / buffer %x / cycle %d\n", |
---|
322 | __FUNCTION__ , CURRENT_THREAD , to_buffer , buffer , cycle ); |
---|
323 | #endif |
---|
324 | |
---|
325 | // compute offsets of first and last bytes in file |
---|
326 | uint32_t min_byte = file_offset; |
---|
327 | uint32_t max_byte = file_offset + size -1; |
---|
328 | |
---|
329 | // compute indexes of pages for first and last byte in mapper |
---|
330 | uint32_t first = min_byte >> CONFIG_PPM_PAGE_SHIFT; |
---|
331 | uint32_t last = max_byte >> CONFIG_PPM_PAGE_SHIFT; |
---|
332 | |
---|
333 | done = 0; |
---|
334 | |
---|
335 | // loop on pages in mapper |
---|
336 | for( index = first ; index <= last ; index++ ) |
---|
337 | { |
---|
338 | // compute page_offset |
---|
339 | if( index == first ) page_offset = min_byte & CONFIG_PPM_PAGE_MASK; |
---|
340 | else page_offset = 0; |
---|
341 | |
---|
342 | // compute number of bytes in page |
---|
343 | if ( first == last ) page_count = size; |
---|
344 | else if ( index == first ) page_count = CONFIG_PPM_PAGE_SIZE - page_offset; |
---|
345 | else if ( index == last ) page_count = (max_byte & CONFIG_PPM_PAGE_MASK) + 1; |
---|
346 | else page_count = CONFIG_PPM_PAGE_SIZE; |
---|
347 | |
---|
348 | #if (DEBUG_MAPPER_MOVE_USER & 1) |
---|
349 | if( DEBUG_MAPPER_MOVE_USER < cycle ) |
---|
350 | printk("\n[DBG] %s : index = %d / offset = %d / count = %d\n", |
---|
351 | __FUNCTION__ , index , page_offset , page_count ); |
---|
352 | #endif |
---|
353 | |
---|
354 | // get page descriptor |
---|
355 | page = mapper_get_page( mapper , index ); |
---|
356 | |
---|
357 | if ( page == NULL ) return EINVAL; |
---|
358 | |
---|
359 | // compute pointer in mapper |
---|
360 | xptr_t base_xp = ppm_page2base( XPTR( local_cxy, page ) ); |
---|
361 | map_ptr = (uint8_t *)GET_PTR( base_xp ) + page_offset; |
---|
362 | |
---|
363 | // compute pointer in buffer |
---|
364 | buf_ptr = (uint8_t *)buffer + done; |
---|
365 | |
---|
366 | // move fragment |
---|
367 | if( to_buffer ) |
---|
368 | { |
---|
369 | hal_copy_to_uspace( buf_ptr , map_ptr , page_count ); |
---|
370 | } |
---|
371 | else |
---|
372 | { |
---|
373 | ppm_page_do_dirty( page ); |
---|
374 | hal_copy_from_uspace( map_ptr , buf_ptr , page_count ); |
---|
375 | } |
---|
376 | |
---|
377 | done += page_count; |
---|
378 | } |
---|
379 | |
---|
380 | #if DEBUG_MAPPER_MOVE_USER |
---|
381 | cycle = (uint32_t)hal_get_cycles(); |
---|
382 | if( DEBUG_MAPPER_MOVE_USER < cycle ) |
---|
383 | printk("\n[DBG] %s : thread %x exit / to_buf %d / buffer %x / cycle %d\n", |
---|
384 | __FUNCTION__ , CURRENT_THREAD , to_buffer , buffer , cycle ); |
---|
385 | #endif |
---|
386 | |
---|
387 | return 0; |
---|
388 | |
---|
389 | } // end mapper_move_user() |
---|
390 | |
---|
391 | //////////////////////////////////////////////// |
---|
392 | error_t mapper_move_kernel( mapper_t * mapper, |
---|
393 | bool_t to_buffer, |
---|
394 | uint32_t file_offset, |
---|
395 | xptr_t buffer_xp, |
---|
396 | uint32_t size ) |
---|
397 | { |
---|
398 | uint32_t page_offset; // first byte to move to/from a mapper page |
---|
399 | uint32_t page_count; // number of bytes to move to/from a mapper page |
---|
400 | uint32_t index; // current mapper page index |
---|
401 | uint32_t done; // number of moved bytes |
---|
402 | page_t * page; // current mapper page descriptor |
---|
403 | |
---|
404 | uint8_t * src_ptr; // source buffer local pointer |
---|
405 | cxy_t src_cxy; // source cluster |
---|
406 | uint8_t * dst_ptr; // destination buffer local pointer |
---|
407 | cxy_t dst_cxy; // destination cluster |
---|
408 | |
---|
409 | // get buffer cluster and local pointer |
---|
410 | cxy_t buffer_cxy = GET_CXY( buffer_xp ); |
---|
411 | uint8_t * buffer_ptr = (uint8_t *)GET_PTR( buffer_xp ); |
---|
412 | |
---|
413 | #if DEBUG_MAPPER_MOVE_KERNEL |
---|
414 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
415 | if( DEBUG_MAPPER_MOVE_KERNEL < cycle ) |
---|
416 | printk("\n[DBG] %s : thread %x enter / to_buf %d / buf_cxy %x / buf_ptr %x / cycle %d\n", |
---|
417 | __FUNCTION__ , CURRENT_THREAD , to_buffer , buffer_cxy , buffer_ptr , cycle ); |
---|
418 | #endif |
---|
419 | |
---|
420 | // compute offsets of first and last bytes in file |
---|
421 | uint32_t min_byte = file_offset; |
---|
422 | uint32_t max_byte = file_offset + size -1; |
---|
423 | |
---|
424 | // compute indexes for first and last pages in mapper |
---|
425 | uint32_t first = min_byte >> CONFIG_PPM_PAGE_SHIFT; |
---|
426 | uint32_t last = max_byte >> CONFIG_PPM_PAGE_SHIFT; |
---|
427 | |
---|
428 | #if (DEBUG_MAPPER_MOVE_KERNEL & 1) |
---|
429 | if( DEBUG_MAPPER_MOVE_KERNEL < cycle ) |
---|
430 | printk("\n[DBG] %s : first_page %d / last_page %d\n", __FUNCTION__, first, last ); |
---|
431 | #endif |
---|
432 | |
---|
433 | // compute source and destination clusters |
---|
434 | if( to_buffer ) |
---|
435 | { |
---|
436 | dst_cxy = buffer_cxy; |
---|
437 | src_cxy = local_cxy; |
---|
438 | } |
---|
439 | else |
---|
440 | { |
---|
441 | src_cxy = buffer_cxy; |
---|
442 | dst_cxy = local_cxy; |
---|
443 | } |
---|
444 | |
---|
445 | done = 0; |
---|
446 | |
---|
447 | // loop on pages in mapper |
---|
448 | for( index = first ; index <= last ; index++ ) |
---|
449 | { |
---|
450 | // compute page_offset |
---|
451 | if( index == first ) page_offset = min_byte & CONFIG_PPM_PAGE_MASK; |
---|
452 | else page_offset = 0; |
---|
453 | |
---|
454 | // compute number of bytes to move in page |
---|
455 | if ( first == last ) page_count = size; |
---|
456 | else if ( index == first ) page_count = CONFIG_PPM_PAGE_SIZE - page_offset; |
---|
457 | else if ( index == last ) page_count = (max_byte & CONFIG_PPM_PAGE_MASK) + 1; |
---|
458 | else page_count = CONFIG_PPM_PAGE_SIZE; |
---|
459 | |
---|
460 | #if (DEBUG_MAPPER_MOVE_KERNEL & 1) |
---|
461 | if( DEBUG_MAPPER_MOVE_KERNEL < cycle ) |
---|
462 | printk("\n[DBG] %s : page_index = %d / offset = %d / bytes = %d\n", |
---|
463 | __FUNCTION__ , index , page_offset , page_count ); |
---|
464 | #endif |
---|
465 | |
---|
466 | // get page descriptor |
---|
467 | page = mapper_get_page( mapper , index ); |
---|
468 | |
---|
469 | if ( page == NULL ) return EINVAL; |
---|
470 | |
---|
471 | // get page base address |
---|
472 | xptr_t base_xp = ppm_page2base( XPTR( local_cxy , page ) ); |
---|
473 | uint8_t * base_ptr = (uint8_t *)GET_PTR( base_xp ); |
---|
474 | |
---|
475 | // compute source and destination pointers |
---|
476 | if( to_buffer ) |
---|
477 | { |
---|
478 | dst_ptr = buffer_ptr + done; |
---|
479 | src_ptr = base_ptr + page_offset; |
---|
480 | } |
---|
481 | else |
---|
482 | { |
---|
483 | src_ptr = buffer_ptr + done; |
---|
484 | dst_ptr = base_ptr + page_offset; |
---|
485 | |
---|
486 | ppm_page_do_dirty( page ); |
---|
487 | } |
---|
488 | |
---|
489 | // move fragment |
---|
490 | hal_remote_memcpy( XPTR( dst_cxy , dst_ptr ), XPTR( src_cxy , src_ptr ), page_count ); |
---|
491 | |
---|
492 | done += page_count; |
---|
493 | } |
---|
494 | |
---|
495 | #if DEBUG_MAPPER_MOVE_KERNEL |
---|
496 | cycle = (uint32_t)hal_get_cycles(); |
---|
497 | if( DEBUG_MAPPER_MOVE_KERNEL < cycle ) |
---|
498 | printk("\n[DBG] %s : thread %x exit / to_buf %d / buf_cxy %x / buf_ptr %x / cycle %d\n", |
---|
499 | __FUNCTION__ , CURRENT_THREAD , to_buffer , buffer_cxy , buffer_ptr , cycle ); |
---|
500 | #endif |
---|
501 | |
---|
502 | return 0; |
---|
503 | |
---|
504 | } // end mapper_move_kernel() |
---|
505 | |
---|