[1] | 1 | /* |
---|
| 2 | * mapper.c - Map memory, file or device in process virtual address space. |
---|
| 3 | * |
---|
| 4 | * Authors Mohamed Lamine Karaoui (2015) |
---|
[440] | 5 | * Alain Greiner (2016,2017,2018) |
---|
[1] | 6 | * |
---|
| 7 | * Copyright (c) UPMC Sorbonne Universites |
---|
| 8 | * |
---|
| 9 | * This file is part of ALMOS-MKH. |
---|
| 10 | * |
---|
| 11 | * ALMOS-MKH is free software; you can redistribute it and/or modify it |
---|
| 12 | * under the terms of the GNU General Public License as published by |
---|
| 13 | * the Free Software Foundation; version 2.0 of the License. |
---|
| 14 | * |
---|
| 15 | * ALMOS-MKH is distributed in the hope that it will be useful, but |
---|
| 16 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
| 17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
---|
| 18 | * General Public License for more details. |
---|
| 19 | * |
---|
| 20 | * You should have received a copy of the GNU General Public License |
---|
| 21 | * along with ALMOS-MKH; if not, write to the Free Software Foundation, |
---|
| 22 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
---|
| 23 | */ |
---|
| 24 | |
---|
[14] | 25 | #include <kernel_config.h> |
---|
[457] | 26 | #include <hal_kernel_types.h> |
---|
[1] | 27 | #include <hal_special.h> |
---|
[23] | 28 | #include <hal_uspace.h> |
---|
[1] | 29 | #include <grdxt.h> |
---|
| 30 | #include <rwlock.h> |
---|
| 31 | #include <printk.h> |
---|
[279] | 32 | #include <memcpy.h> |
---|
[1] | 33 | #include <thread.h> |
---|
| 34 | #include <core.h> |
---|
| 35 | #include <process.h> |
---|
| 36 | #include <kmem.h> |
---|
| 37 | #include <kcm.h> |
---|
| 38 | #include <page.h> |
---|
| 39 | #include <cluster.h> |
---|
| 40 | #include <vfs.h> |
---|
| 41 | #include <mapper.h> |
---|
| 42 | |
---|
[246] | 43 | ////////////////////////////////////////////// |
---|
| 44 | mapper_t * mapper_create( vfs_fs_type_t type ) |
---|
[1] | 45 | { |
---|
| 46 | mapper_t * mapper; |
---|
| 47 | kmem_req_t req; |
---|
| 48 | error_t error; |
---|
| 49 | |
---|
| 50 | // allocate memory for associated mapper |
---|
[183] | 51 | req.type = KMEM_MAPPER; |
---|
| 52 | req.size = sizeof(mapper_t); |
---|
[1] | 53 | req.flags = AF_KERNEL | AF_ZERO; |
---|
[183] | 54 | mapper = (mapper_t *)kmem_alloc( &req ); |
---|
[1] | 55 | |
---|
| 56 | if( mapper == NULL ) |
---|
| 57 | { |
---|
| 58 | printk("\n[ERROR] in %s : no memory for mapper descriptor\n", __FUNCTION__ ); |
---|
| 59 | return NULL; |
---|
| 60 | } |
---|
| 61 | |
---|
| 62 | // initialize refcount & inode |
---|
[183] | 63 | mapper->refcount = 0; |
---|
[1] | 64 | mapper->inode = NULL; |
---|
| 65 | |
---|
| 66 | // initialize radix tree |
---|
[183] | 67 | error = grdxt_init( &mapper->radix, |
---|
[1] | 68 | CONFIG_VMM_GRDXT_W1, |
---|
| 69 | CONFIG_VMM_GRDXT_W2, |
---|
| 70 | CONFIG_VMM_GRDXT_W3 ); |
---|
| 71 | |
---|
| 72 | if( error ) |
---|
| 73 | { |
---|
| 74 | printk("\n[ERROR] in %s : cannot initialize radix tree\n", __FUNCTION__ ); |
---|
[183] | 75 | req.type = KMEM_MAPPER; |
---|
[1] | 76 | req.ptr = mapper; |
---|
| 77 | kmem_free( &req ); |
---|
| 78 | return NULL; |
---|
| 79 | } |
---|
| 80 | |
---|
[246] | 81 | // initialize mapper type |
---|
| 82 | mapper->type = type; |
---|
| 83 | |
---|
[1] | 84 | // initialize mapper lock |
---|
[183] | 85 | rwlock_init( &mapper->lock ); |
---|
[1] | 86 | |
---|
| 87 | // initialize waiting threads xlist (empty) |
---|
[183] | 88 | xlist_root_init( XPTR( local_cxy , &mapper->wait_root ) ); |
---|
[1] | 89 | |
---|
| 90 | // initialize vsegs xlist (empty) |
---|
[183] | 91 | xlist_root_init( XPTR( local_cxy , &mapper->vsegs_root ) ); |
---|
[1] | 92 | |
---|
| 93 | return mapper; |
---|
| 94 | |
---|
[204] | 95 | } // end mapper_create() |
---|
| 96 | |
---|
[1] | 97 | /////////////////////////////////////////// |
---|
| 98 | error_t mapper_destroy( mapper_t * mapper ) |
---|
| 99 | { |
---|
| 100 | page_t * page; |
---|
| 101 | uint32_t found_index = 0; |
---|
| 102 | uint32_t start_index = 0; |
---|
| 103 | kmem_req_t req; |
---|
| 104 | error_t error; |
---|
| 105 | |
---|
| 106 | // scan radix three and release all registered pages to PPM |
---|
| 107 | do |
---|
| 108 | { |
---|
| 109 | // get page from radix tree |
---|
| 110 | page = (page_t *)grdxt_get_first( &mapper->radix , start_index , &found_index ); |
---|
| 111 | |
---|
[18] | 112 | if( page != NULL ) |
---|
[1] | 113 | { |
---|
| 114 | // remove page from mapper and release to PPM |
---|
[183] | 115 | error = mapper_release_page( mapper , page ); |
---|
[1] | 116 | |
---|
| 117 | if ( error ) return error; |
---|
| 118 | |
---|
| 119 | // update start_key value for next page |
---|
| 120 | start_index = found_index; |
---|
| 121 | } |
---|
| 122 | } |
---|
| 123 | while( page != NULL ); |
---|
| 124 | |
---|
| 125 | // release the memory allocated to radix-tree itself |
---|
| 126 | grdxt_destroy( &mapper->radix ); |
---|
| 127 | |
---|
| 128 | // release memory for mapper descriptor |
---|
| 129 | req.type = KMEM_MAPPER; |
---|
| 130 | req.ptr = mapper; |
---|
| 131 | kmem_free( &req ); |
---|
| 132 | |
---|
| 133 | return 0; |
---|
[18] | 134 | |
---|
[204] | 135 | } // end mapper_destroy() |
---|
| 136 | |
---|
[1] | 137 | //////////////////////////////////////////// |
---|
| 138 | page_t * mapper_get_page( mapper_t * mapper, |
---|
| 139 | uint32_t index ) |
---|
| 140 | { |
---|
[183] | 141 | kmem_req_t req; |
---|
| 142 | page_t * page; |
---|
| 143 | error_t error; |
---|
[1] | 144 | |
---|
[438] | 145 | #if DEBUG_MAPPER_GET_PAGE |
---|
[435] | 146 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
[438] | 147 | if( DEBUG_MAPPER_GET_PAGE < cycle ) |
---|
[435] | 148 | printk("\n[DBG] %s : thread %x enter for page %d / mapper %x / cycle %d\n", |
---|
| 149 | __FUNCTION__ , CURRENT_THREAD , index , mapper , cycle ); |
---|
| 150 | #endif |
---|
[204] | 151 | |
---|
[1] | 152 | thread_t * this = CURRENT_THREAD; |
---|
| 153 | |
---|
| 154 | // take mapper lock in READ_MODE |
---|
| 155 | rwlock_rd_lock( &mapper->lock ); |
---|
| 156 | |
---|
| 157 | // search page in radix tree |
---|
| 158 | page = (page_t *)grdxt_lookup( &mapper->radix , index ); |
---|
| 159 | |
---|
[18] | 160 | // test if page available in mapper |
---|
[183] | 161 | if( ( page == NULL) || page_is_flag( page , PG_INLOAD ) ) // page not available |
---|
[1] | 162 | { |
---|
[204] | 163 | |
---|
[1] | 164 | // release the lock in READ_MODE and take it in WRITE_MODE |
---|
| 165 | rwlock_rd_unlock( &mapper->lock ); |
---|
| 166 | rwlock_wr_lock( &mapper->lock ); |
---|
| 167 | |
---|
| 168 | // second test on missing page because the page status can have been modified |
---|
| 169 | // by another thread, when passing from READ_MODE to WRITE_MODE. |
---|
| 170 | // from this point there is no concurrent accesses to mapper. |
---|
| 171 | |
---|
| 172 | page = grdxt_lookup( &mapper->radix , index ); |
---|
| 173 | |
---|
[238] | 174 | if ( page == NULL ) // missing page => create it and load it from file system |
---|
[1] | 175 | { |
---|
[204] | 176 | |
---|
[438] | 177 | #if (DEBUG_MAPPER_GET_PAGE & 1) |
---|
| 178 | if( DEBUG_MAPPER_GET_PAGE < cycle ) |
---|
[435] | 179 | printk("\n[DBG] %s : missing page => load from device\n", __FUNCTION__ ); |
---|
| 180 | #endif |
---|
[1] | 181 | // allocate one page from PPM |
---|
| 182 | req.type = KMEM_PAGE; |
---|
| 183 | req.size = 0; |
---|
| 184 | req.flags = AF_NONE; |
---|
| 185 | page = kmem_alloc( &req ); |
---|
[18] | 186 | |
---|
[1] | 187 | if( page == NULL ) |
---|
| 188 | { |
---|
| 189 | printk("\n[ERROR] in %s : thread %x cannot allocate a page in cluster %x\n", |
---|
| 190 | __FUNCTION__ , this->trdid , local_cxy ); |
---|
| 191 | rwlock_wr_unlock( &mapper->lock ); |
---|
| 192 | return NULL; |
---|
| 193 | } |
---|
| 194 | |
---|
| 195 | // initialize the page descriptor |
---|
| 196 | page_init( page ); |
---|
[407] | 197 | page_set_flag( page , PG_INIT | PG_INLOAD ); |
---|
[1] | 198 | page_refcount_up( page ); |
---|
| 199 | page->mapper = mapper; |
---|
| 200 | page->index = index; |
---|
| 201 | |
---|
| 202 | // insert page in mapper radix tree |
---|
| 203 | error = grdxt_insert( &mapper->radix, index , page ); |
---|
| 204 | |
---|
| 205 | // release mapper lock from WRITE_MODE |
---|
| 206 | rwlock_wr_unlock( &mapper->lock ); |
---|
| 207 | |
---|
[18] | 208 | if( error ) |
---|
[1] | 209 | { |
---|
| 210 | printk("\n[ERROR] in %s : thread %x cannot insert page in mapper\n", |
---|
| 211 | __FUNCTION__ , this->trdid ); |
---|
[23] | 212 | mapper_release_page( mapper , page ); |
---|
[1] | 213 | page_clear_flag( page , PG_ALL ); |
---|
| 214 | req.ptr = page; |
---|
| 215 | req.type = KMEM_PAGE; |
---|
| 216 | kmem_free(&req); |
---|
| 217 | return NULL; |
---|
| 218 | } |
---|
[18] | 219 | |
---|
[1] | 220 | // launch I/O operation to load page from file system |
---|
[367] | 221 | error = vfs_mapper_move_page( page, |
---|
| 222 | true ); // to mapper |
---|
[1] | 223 | if( error ) |
---|
| 224 | { |
---|
| 225 | printk("\n[ERROR] in %s : thread %x cannot load page from device\n", |
---|
| 226 | __FUNCTION__ , this->trdid ); |
---|
[23] | 227 | mapper_release_page( mapper , page ); |
---|
[1] | 228 | page_clear_flag( page , PG_ALL ); |
---|
| 229 | req.ptr = page; |
---|
| 230 | req.type = KMEM_PAGE; |
---|
| 231 | kmem_free( &req ); |
---|
| 232 | return NULL; |
---|
| 233 | } |
---|
| 234 | |
---|
| 235 | // reset the page INLOAD flag to make the page available to all readers |
---|
| 236 | page_clear_flag( page , PG_INLOAD ); |
---|
| 237 | } |
---|
| 238 | else if( page_is_flag( page , PG_INLOAD ) ) // page is loaded by another thread |
---|
| 239 | { |
---|
| 240 | // release mapper lock from WRITE_MODE |
---|
| 241 | rwlock_wr_unlock( &mapper->lock ); |
---|
| 242 | |
---|
[408] | 243 | // wait load completion |
---|
[1] | 244 | while( 1 ) |
---|
| 245 | { |
---|
| 246 | // exit waiting loop when loaded |
---|
[408] | 247 | if( page_is_flag( page , PG_INLOAD ) == false ) break; |
---|
[1] | 248 | |
---|
[18] | 249 | // deschedule |
---|
[408] | 250 | sched_yield("waiting page loading"); |
---|
[1] | 251 | } |
---|
| 252 | } |
---|
| 253 | } |
---|
[204] | 254 | else // page available in mapper |
---|
[1] | 255 | { |
---|
[204] | 256 | rwlock_rd_unlock( &mapper->lock ); |
---|
[1] | 257 | } |
---|
| 258 | |
---|
[438] | 259 | #if DEBUG_MAPPER_GET_PAGE |
---|
[435] | 260 | cycle = (uint32_t)hal_get_cycles(); |
---|
[438] | 261 | if( DEBUG_MAPPER_GET_PAGE < cycle ) |
---|
[435] | 262 | printk("\n[DBG] %s : thread %x exit for page %d / ppn %x / cycle %d\n", |
---|
| 263 | __FUNCTION__, CURRENT_THREAD, index, ppm_page2ppn(XPTR(local_cxy, page)), cycle ); |
---|
| 264 | #endif |
---|
[204] | 265 | |
---|
| 266 | return page; |
---|
| 267 | |
---|
| 268 | } // end mapper_get_page() |
---|
| 269 | |
---|
[1] | 270 | /////////////////////////////////////////////// |
---|
| 271 | error_t mapper_release_page( mapper_t * mapper, |
---|
| 272 | page_t * page ) |
---|
| 273 | { |
---|
| 274 | error_t error; |
---|
| 275 | |
---|
| 276 | // lauch IO operation to update page to file system |
---|
[238] | 277 | error = vfs_mapper_move_page( page , false ); // from mapper |
---|
[1] | 278 | |
---|
| 279 | if( error ) |
---|
| 280 | { |
---|
| 281 | printk("\n[ERROR] in %s : cannot update file system\n", __FUNCTION__ ); |
---|
| 282 | return EIO; |
---|
| 283 | } |
---|
[18] | 284 | |
---|
[1] | 285 | // take mapper lock in WRITE_MODE |
---|
| 286 | rwlock_wr_lock( &mapper->lock ); |
---|
| 287 | |
---|
| 288 | // remove physical page from radix tree |
---|
[183] | 289 | grdxt_remove( &mapper->radix , page->index ); |
---|
[1] | 290 | |
---|
| 291 | // release mapper lock from WRITE_MODE |
---|
| 292 | rwlock_wr_unlock( &mapper->lock ); |
---|
| 293 | |
---|
| 294 | // release page to PPM |
---|
[183] | 295 | kmem_req_t req; |
---|
| 296 | req.type = KMEM_PAGE; |
---|
[1] | 297 | req.ptr = page; |
---|
| 298 | kmem_free( &req ); |
---|
| 299 | |
---|
| 300 | return 0; |
---|
| 301 | |
---|
[204] | 302 | } // end mapper_release_page() |
---|
| 303 | |
---|
[313] | 304 | /////////////////////////////////////////////////// |
---|
| 305 | error_t mapper_move_user( mapper_t * mapper, |
---|
| 306 | bool_t to_buffer, |
---|
| 307 | uint32_t file_offset, |
---|
| 308 | void * buffer, |
---|
| 309 | uint32_t size ) |
---|
[1] | 310 | { |
---|
[23] | 311 | uint32_t page_offset; // first byte to move to/from a mapper page |
---|
| 312 | uint32_t page_count; // number of bytes to move to/from a mapper page |
---|
| 313 | uint32_t index; // current mapper page index |
---|
| 314 | uint32_t done; // number of moved bytes |
---|
| 315 | page_t * page; // current mapper page descriptor |
---|
| 316 | uint8_t * map_ptr; // current mapper address |
---|
| 317 | uint8_t * buf_ptr; // current buffer address |
---|
[330] | 318 | |
---|
[438] | 319 | #if DEBUG_MAPPER_MOVE_USER |
---|
[435] | 320 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
[438] | 321 | if( DEBUG_MAPPER_MOVE_USER < cycle ) |
---|
[435] | 322 | printk("\n[DBG] %s : thread %x enter / to_buf %d / buffer %x / cycle %d\n", |
---|
| 323 | __FUNCTION__ , CURRENT_THREAD , to_buffer , buffer , cycle ); |
---|
| 324 | #endif |
---|
[1] | 325 | |
---|
[23] | 326 | // compute offsets of first and last bytes in file |
---|
| 327 | uint32_t min_byte = file_offset; |
---|
| 328 | uint32_t max_byte = file_offset + size -1; |
---|
[1] | 329 | |
---|
[23] | 330 | // compute indexes of pages for first and last byte in mapper |
---|
| 331 | uint32_t first = min_byte >> CONFIG_PPM_PAGE_SHIFT; |
---|
| 332 | uint32_t last = max_byte >> CONFIG_PPM_PAGE_SHIFT; |
---|
[1] | 333 | |
---|
[23] | 334 | done = 0; |
---|
[1] | 335 | |
---|
[23] | 336 | // loop on pages in mapper |
---|
| 337 | for( index = first ; index <= last ; index++ ) |
---|
[1] | 338 | { |
---|
[183] | 339 | // compute page_offset |
---|
[23] | 340 | if( index == first ) page_offset = min_byte & CONFIG_PPM_PAGE_MASK; |
---|
| 341 | else page_offset = 0; |
---|
[1] | 342 | |
---|
[313] | 343 | // compute number of bytes in page |
---|
[23] | 344 | if ( first == last ) page_count = size; |
---|
| 345 | else if ( index == first ) page_count = CONFIG_PPM_PAGE_SIZE - page_offset; |
---|
| 346 | else if ( index == last ) page_count = (max_byte & CONFIG_PPM_PAGE_MASK) + 1; |
---|
| 347 | else page_count = CONFIG_PPM_PAGE_SIZE; |
---|
[1] | 348 | |
---|
[438] | 349 | #if (DEBUG_MAPPER_MOVE_USER & 1) |
---|
| 350 | if( DEBUG_MAPPER_MOVE_USER < cycle ) |
---|
[435] | 351 | printk("\n[DBG] %s : index = %d / offset = %d / count = %d\n", |
---|
| 352 | __FUNCTION__ , index , page_offset , page_count ); |
---|
| 353 | #endif |
---|
[265] | 354 | |
---|
[23] | 355 | // get page descriptor |
---|
| 356 | page = mapper_get_page( mapper , index ); |
---|
[1] | 357 | |
---|
| 358 | if ( page == NULL ) return EINVAL; |
---|
| 359 | |
---|
[23] | 360 | // compute pointer in mapper |
---|
[315] | 361 | xptr_t base_xp = ppm_page2base( XPTR( local_cxy, page ) ); |
---|
| 362 | map_ptr = (uint8_t *)GET_PTR( base_xp ) + page_offset; |
---|
[1] | 363 | |
---|
[23] | 364 | // compute pointer in buffer |
---|
| 365 | buf_ptr = (uint8_t *)buffer + done; |
---|
[1] | 366 | |
---|
| 367 | // move fragment |
---|
[330] | 368 | if( to_buffer ) |
---|
[1] | 369 | { |
---|
[313] | 370 | hal_copy_to_uspace( buf_ptr , map_ptr , page_count ); |
---|
[1] | 371 | } |
---|
[330] | 372 | else |
---|
[1] | 373 | { |
---|
| 374 | page_do_dirty( page ); |
---|
[313] | 375 | hal_copy_from_uspace( map_ptr , buf_ptr , page_count ); |
---|
[1] | 376 | } |
---|
| 377 | |
---|
[23] | 378 | done += page_count; |
---|
[1] | 379 | } |
---|
| 380 | |
---|
[438] | 381 | #if DEBUG_MAPPER_MOVE_USER |
---|
[435] | 382 | cycle = (uint32_t)hal_get_cycles(); |
---|
[438] | 383 | if( DEBUG_MAPPER_MOVE_USER < cycle ) |
---|
[435] | 384 | printk("\n[DBG] %s : thread %x exit / to_buf %d / buffer %x / cycle %d\n", |
---|
| 385 | __FUNCTION__ , CURRENT_THREAD , to_buffer , buffer , cycle ); |
---|
| 386 | #endif |
---|
[204] | 387 | |
---|
[1] | 388 | return 0; |
---|
| 389 | |
---|
[313] | 390 | } // end mapper_move_user() |
---|
[204] | 391 | |
---|
[313] | 392 | //////////////////////////////////////////////// |
---|
| 393 | error_t mapper_move_kernel( mapper_t * mapper, |
---|
| 394 | bool_t to_buffer, |
---|
| 395 | uint32_t file_offset, |
---|
| 396 | xptr_t buffer_xp, |
---|
| 397 | uint32_t size ) |
---|
| 398 | { |
---|
| 399 | uint32_t page_offset; // first byte to move to/from a mapper page |
---|
| 400 | uint32_t page_count; // number of bytes to move to/from a mapper page |
---|
| 401 | uint32_t index; // current mapper page index |
---|
| 402 | uint32_t done; // number of moved bytes |
---|
| 403 | page_t * page; // current mapper page descriptor |
---|
| 404 | |
---|
| 405 | uint8_t * src_ptr; // source buffer local pointer |
---|
| 406 | cxy_t src_cxy; // source cluster |
---|
| 407 | uint8_t * dst_ptr; // destination buffer local pointer |
---|
| 408 | cxy_t dst_cxy; // destination cluster |
---|
[330] | 409 | |
---|
[406] | 410 | // get buffer cluster and local pointer |
---|
| 411 | cxy_t buffer_cxy = GET_CXY( buffer_xp ); |
---|
| 412 | uint8_t * buffer_ptr = (uint8_t *)GET_PTR( buffer_xp ); |
---|
[313] | 413 | |
---|
[438] | 414 | #if DEBUG_MAPPER_MOVE_KERNEL |
---|
[435] | 415 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
[438] | 416 | if( DEBUG_MAPPER_MOVE_KERNEL < cycle ) |
---|
[435] | 417 | printk("\n[DBG] %s : thread %x enter / to_buf %d / buf_cxy %x / buf_ptr %x / cycle %d\n", |
---|
| 418 | __FUNCTION__ , CURRENT_THREAD , to_buffer , buffer_cxy , buffer_ptr , cycle ); |
---|
| 419 | #endif |
---|
[406] | 420 | |
---|
[313] | 421 | // compute offsets of first and last bytes in file |
---|
| 422 | uint32_t min_byte = file_offset; |
---|
| 423 | uint32_t max_byte = file_offset + size -1; |
---|
| 424 | |
---|
| 425 | // compute indexes for first and last pages in mapper |
---|
| 426 | uint32_t first = min_byte >> CONFIG_PPM_PAGE_SHIFT; |
---|
| 427 | uint32_t last = max_byte >> CONFIG_PPM_PAGE_SHIFT; |
---|
| 428 | |
---|
[438] | 429 | #if (DEBUG_MAPPER_MOVE_KERNEL & 1) |
---|
| 430 | if( DEBUG_MAPPER_MOVE_KERNEL < cycle ) |
---|
[435] | 431 | printk("\n[DBG] %s : first_page %d / last_page %d\n", __FUNCTION__, first, last ); |
---|
| 432 | #endif |
---|
[313] | 433 | |
---|
| 434 | // compute source and destination clusters |
---|
| 435 | if( to_buffer ) |
---|
| 436 | { |
---|
| 437 | dst_cxy = buffer_cxy; |
---|
| 438 | src_cxy = local_cxy; |
---|
| 439 | } |
---|
| 440 | else |
---|
| 441 | { |
---|
| 442 | src_cxy = buffer_cxy; |
---|
| 443 | dst_cxy = local_cxy; |
---|
| 444 | } |
---|
| 445 | |
---|
| 446 | done = 0; |
---|
| 447 | |
---|
| 448 | // loop on pages in mapper |
---|
| 449 | for( index = first ; index <= last ; index++ ) |
---|
| 450 | { |
---|
| 451 | // compute page_offset |
---|
| 452 | if( index == first ) page_offset = min_byte & CONFIG_PPM_PAGE_MASK; |
---|
| 453 | else page_offset = 0; |
---|
| 454 | |
---|
| 455 | // compute number of bytes to move in page |
---|
| 456 | if ( first == last ) page_count = size; |
---|
| 457 | else if ( index == first ) page_count = CONFIG_PPM_PAGE_SIZE - page_offset; |
---|
| 458 | else if ( index == last ) page_count = (max_byte & CONFIG_PPM_PAGE_MASK) + 1; |
---|
| 459 | else page_count = CONFIG_PPM_PAGE_SIZE; |
---|
| 460 | |
---|
[438] | 461 | #if (DEBUG_MAPPER_MOVE_KERNEL & 1) |
---|
| 462 | if( DEBUG_MAPPER_MOVE_KERNEL < cycle ) |
---|
[435] | 463 | printk("\n[DBG] %s : page_index = %d / offset = %d / bytes = %d\n", |
---|
| 464 | __FUNCTION__ , index , page_offset , page_count ); |
---|
| 465 | #endif |
---|
[313] | 466 | |
---|
| 467 | // get page descriptor |
---|
| 468 | page = mapper_get_page( mapper , index ); |
---|
| 469 | |
---|
| 470 | if ( page == NULL ) return EINVAL; |
---|
| 471 | |
---|
[315] | 472 | // get page base address |
---|
[367] | 473 | xptr_t base_xp = ppm_page2base( XPTR( local_cxy , page ) ); |
---|
| 474 | uint8_t * base_ptr = (uint8_t *)GET_PTR( base_xp ); |
---|
[330] | 475 | |
---|
[313] | 476 | // compute source and destination pointers |
---|
| 477 | if( to_buffer ) |
---|
| 478 | { |
---|
[315] | 479 | dst_ptr = buffer_ptr + done; |
---|
[367] | 480 | src_ptr = base_ptr + page_offset; |
---|
[313] | 481 | } |
---|
| 482 | else |
---|
| 483 | { |
---|
[315] | 484 | src_ptr = buffer_ptr + done; |
---|
[367] | 485 | dst_ptr = base_ptr + page_offset; |
---|
[313] | 486 | |
---|
| 487 | page_do_dirty( page ); |
---|
| 488 | } |
---|
| 489 | |
---|
| 490 | // move fragment |
---|
| 491 | hal_remote_memcpy( XPTR( dst_cxy , dst_ptr ), XPTR( src_cxy , src_ptr ), page_count ); |
---|
[330] | 492 | |
---|
[313] | 493 | done += page_count; |
---|
| 494 | } |
---|
| 495 | |
---|
[438] | 496 | #if DEBUG_MAPPER_MOVE_KERNEL |
---|
[435] | 497 | cycle = (uint32_t)hal_get_cycles(); |
---|
[438] | 498 | if( DEBUG_MAPPER_MOVE_KERNEL < cycle ) |
---|
[435] | 499 | printk("\n[DBG] %s : thread %x exit / to_buf %d / buf_cxy %x / buf_ptr %x / cycle %d\n", |
---|
| 500 | __FUNCTION__ , CURRENT_THREAD , to_buffer , buffer_cxy , buffer_ptr , cycle ); |
---|
| 501 | #endif |
---|
[313] | 502 | |
---|
| 503 | return 0; |
---|
| 504 | |
---|
[406] | 505 | } // end mapper_move_kernel() |
---|
[313] | 506 | |
---|