[1] | 1 | /* |
---|
| 2 | * mapper.c - Map memory, file or device in process virtual address space. |
---|
| 3 | * |
---|
| 4 | * Authors Mohamed Lamine Karaoui (2015) |
---|
[440] | 5 | * Alain Greiner (2016,2017,2018) |
---|
[1] | 6 | * |
---|
| 7 | * Copyright (c) UPMC Sorbonne Universites |
---|
| 8 | * |
---|
| 9 | * This file is part of ALMOS-MKH. |
---|
| 10 | * |
---|
| 11 | * ALMOS-MKH is free software; you can redistribute it and/or modify it |
---|
| 12 | * under the terms of the GNU General Public License as published by |
---|
| 13 | * the Free Software Foundation; version 2.0 of the License. |
---|
| 14 | * |
---|
| 15 | * ALMOS-MKH is distributed in the hope that it will be useful, but |
---|
| 16 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
| 17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
---|
| 18 | * General Public License for more details. |
---|
| 19 | * |
---|
| 20 | * You should have received a copy of the GNU General Public License |
---|
| 21 | * along with ALMOS-MKH; if not, write to the Free Software Foundation, |
---|
| 22 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
---|
| 23 | */ |
---|
| 24 | |
---|
[14] | 25 | #include <kernel_config.h> |
---|
[457] | 26 | #include <hal_kernel_types.h> |
---|
[1] | 27 | #include <hal_special.h> |
---|
[23] | 28 | #include <hal_uspace.h> |
---|
[1] | 29 | #include <grdxt.h> |
---|
| 30 | #include <rwlock.h> |
---|
| 31 | #include <printk.h> |
---|
[279] | 32 | #include <memcpy.h> |
---|
[1] | 33 | #include <thread.h> |
---|
| 34 | #include <core.h> |
---|
| 35 | #include <process.h> |
---|
| 36 | #include <kmem.h> |
---|
| 37 | #include <kcm.h> |
---|
[567] | 38 | #include <ppm.h> |
---|
[1] | 39 | #include <page.h> |
---|
| 40 | #include <cluster.h> |
---|
| 41 | #include <vfs.h> |
---|
| 42 | #include <mapper.h> |
---|
| 43 | |
---|
[567] | 44 | |
---|
[246] | 45 | ////////////////////////////////////////////// |
---|
| 46 | mapper_t * mapper_create( vfs_fs_type_t type ) |
---|
[1] | 47 | { |
---|
| 48 | mapper_t * mapper; |
---|
| 49 | kmem_req_t req; |
---|
| 50 | error_t error; |
---|
| 51 | |
---|
| 52 | // allocate memory for associated mapper |
---|
[183] | 53 | req.type = KMEM_MAPPER; |
---|
| 54 | req.size = sizeof(mapper_t); |
---|
[1] | 55 | req.flags = AF_KERNEL | AF_ZERO; |
---|
[183] | 56 | mapper = (mapper_t *)kmem_alloc( &req ); |
---|
[1] | 57 | |
---|
| 58 | if( mapper == NULL ) |
---|
| 59 | { |
---|
| 60 | printk("\n[ERROR] in %s : no memory for mapper descriptor\n", __FUNCTION__ ); |
---|
| 61 | return NULL; |
---|
| 62 | } |
---|
| 63 | |
---|
| 64 | // initialize refcount & inode |
---|
[183] | 65 | mapper->refcount = 0; |
---|
[1] | 66 | mapper->inode = NULL; |
---|
| 67 | |
---|
| 68 | // initialize radix tree |
---|
[183] | 69 | error = grdxt_init( &mapper->radix, |
---|
[1] | 70 | CONFIG_VMM_GRDXT_W1, |
---|
| 71 | CONFIG_VMM_GRDXT_W2, |
---|
| 72 | CONFIG_VMM_GRDXT_W3 ); |
---|
| 73 | |
---|
| 74 | if( error ) |
---|
| 75 | { |
---|
| 76 | printk("\n[ERROR] in %s : cannot initialize radix tree\n", __FUNCTION__ ); |
---|
[183] | 77 | req.type = KMEM_MAPPER; |
---|
[1] | 78 | req.ptr = mapper; |
---|
| 79 | kmem_free( &req ); |
---|
| 80 | return NULL; |
---|
| 81 | } |
---|
| 82 | |
---|
[246] | 83 | // initialize mapper type |
---|
| 84 | mapper->type = type; |
---|
| 85 | |
---|
[1] | 86 | // initialize mapper lock |
---|
[567] | 87 | rwlock_init( &mapper->lock , LOCK_MAPPER_STATE ); |
---|
[1] | 88 | |
---|
| 89 | // initialize waiting threads xlist (empty) |
---|
[183] | 90 | xlist_root_init( XPTR( local_cxy , &mapper->wait_root ) ); |
---|
[1] | 91 | |
---|
| 92 | // initialize vsegs xlist (empty) |
---|
[183] | 93 | xlist_root_init( XPTR( local_cxy , &mapper->vsegs_root ) ); |
---|
[1] | 94 | |
---|
| 95 | return mapper; |
---|
| 96 | |
---|
[204] | 97 | } // end mapper_create() |
---|
| 98 | |
---|
[1] | 99 | /////////////////////////////////////////// |
---|
| 100 | error_t mapper_destroy( mapper_t * mapper ) |
---|
| 101 | { |
---|
| 102 | page_t * page; |
---|
| 103 | uint32_t found_index = 0; |
---|
| 104 | uint32_t start_index = 0; |
---|
| 105 | kmem_req_t req; |
---|
| 106 | error_t error; |
---|
| 107 | |
---|
| 108 | // scan radix three and release all registered pages to PPM |
---|
| 109 | do |
---|
| 110 | { |
---|
| 111 | // get page from radix tree |
---|
| 112 | page = (page_t *)grdxt_get_first( &mapper->radix , start_index , &found_index ); |
---|
| 113 | |
---|
[18] | 114 | if( page != NULL ) |
---|
[1] | 115 | { |
---|
| 116 | // remove page from mapper and release to PPM |
---|
[183] | 117 | error = mapper_release_page( mapper , page ); |
---|
[1] | 118 | |
---|
| 119 | if ( error ) return error; |
---|
| 120 | |
---|
| 121 | // update start_key value for next page |
---|
| 122 | start_index = found_index; |
---|
| 123 | } |
---|
| 124 | } |
---|
| 125 | while( page != NULL ); |
---|
| 126 | |
---|
| 127 | // release the memory allocated to radix-tree itself |
---|
| 128 | grdxt_destroy( &mapper->radix ); |
---|
| 129 | |
---|
| 130 | // release memory for mapper descriptor |
---|
| 131 | req.type = KMEM_MAPPER; |
---|
| 132 | req.ptr = mapper; |
---|
| 133 | kmem_free( &req ); |
---|
| 134 | |
---|
| 135 | return 0; |
---|
[18] | 136 | |
---|
[204] | 137 | } // end mapper_destroy() |
---|
| 138 | |
---|
[1] | 139 | //////////////////////////////////////////// |
---|
| 140 | page_t * mapper_get_page( mapper_t * mapper, |
---|
| 141 | uint32_t index ) |
---|
| 142 | { |
---|
[183] | 143 | kmem_req_t req; |
---|
| 144 | page_t * page; |
---|
| 145 | error_t error; |
---|
[1] | 146 | |
---|
[438] | 147 | #if DEBUG_MAPPER_GET_PAGE |
---|
[435] | 148 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
[438] | 149 | if( DEBUG_MAPPER_GET_PAGE < cycle ) |
---|
[435] | 150 | printk("\n[DBG] %s : thread %x enter for page %d / mapper %x / cycle %d\n", |
---|
| 151 | __FUNCTION__ , CURRENT_THREAD , index , mapper , cycle ); |
---|
| 152 | #endif |
---|
[204] | 153 | |
---|
[1] | 154 | thread_t * this = CURRENT_THREAD; |
---|
| 155 | |
---|
| 156 | // take mapper lock in READ_MODE |
---|
[567] | 157 | rwlock_rd_acquire( &mapper->lock ); |
---|
[1] | 158 | |
---|
| 159 | // search page in radix tree |
---|
| 160 | page = (page_t *)grdxt_lookup( &mapper->radix , index ); |
---|
| 161 | |
---|
[18] | 162 | // test if page available in mapper |
---|
[183] | 163 | if( ( page == NULL) || page_is_flag( page , PG_INLOAD ) ) // page not available |
---|
[1] | 164 | { |
---|
[204] | 165 | |
---|
[1] | 166 | // release the lock in READ_MODE and take it in WRITE_MODE |
---|
[567] | 167 | rwlock_rd_release( &mapper->lock ); |
---|
| 168 | rwlock_wr_acquire( &mapper->lock ); |
---|
[1] | 169 | |
---|
| 170 | // second test on missing page because the page status can have been modified |
---|
| 171 | // by another thread, when passing from READ_MODE to WRITE_MODE. |
---|
| 172 | // from this point there is no concurrent accesses to mapper. |
---|
| 173 | |
---|
| 174 | page = grdxt_lookup( &mapper->radix , index ); |
---|
| 175 | |
---|
[238] | 176 | if ( page == NULL ) // missing page => create it and load it from file system |
---|
[1] | 177 | { |
---|
[204] | 178 | |
---|
[438] | 179 | #if (DEBUG_MAPPER_GET_PAGE & 1) |
---|
| 180 | if( DEBUG_MAPPER_GET_PAGE < cycle ) |
---|
[435] | 181 | printk("\n[DBG] %s : missing page => load from device\n", __FUNCTION__ ); |
---|
| 182 | #endif |
---|
[1] | 183 | // allocate one page from PPM |
---|
| 184 | req.type = KMEM_PAGE; |
---|
| 185 | req.size = 0; |
---|
| 186 | req.flags = AF_NONE; |
---|
| 187 | page = kmem_alloc( &req ); |
---|
[18] | 188 | |
---|
[1] | 189 | if( page == NULL ) |
---|
| 190 | { |
---|
| 191 | printk("\n[ERROR] in %s : thread %x cannot allocate a page in cluster %x\n", |
---|
| 192 | __FUNCTION__ , this->trdid , local_cxy ); |
---|
[567] | 193 | rwlock_wr_release( &mapper->lock ); |
---|
[1] | 194 | return NULL; |
---|
| 195 | } |
---|
| 196 | |
---|
| 197 | // initialize the page descriptor |
---|
| 198 | page_init( page ); |
---|
[407] | 199 | page_set_flag( page , PG_INIT | PG_INLOAD ); |
---|
[1] | 200 | page_refcount_up( page ); |
---|
| 201 | page->mapper = mapper; |
---|
| 202 | page->index = index; |
---|
| 203 | |
---|
| 204 | // insert page in mapper radix tree |
---|
| 205 | error = grdxt_insert( &mapper->radix, index , page ); |
---|
| 206 | |
---|
| 207 | // release mapper lock from WRITE_MODE |
---|
[567] | 208 | rwlock_wr_release( &mapper->lock ); |
---|
[1] | 209 | |
---|
[18] | 210 | if( error ) |
---|
[1] | 211 | { |
---|
| 212 | printk("\n[ERROR] in %s : thread %x cannot insert page in mapper\n", |
---|
| 213 | __FUNCTION__ , this->trdid ); |
---|
[23] | 214 | mapper_release_page( mapper , page ); |
---|
[1] | 215 | page_clear_flag( page , PG_ALL ); |
---|
| 216 | req.ptr = page; |
---|
| 217 | req.type = KMEM_PAGE; |
---|
| 218 | kmem_free(&req); |
---|
| 219 | return NULL; |
---|
| 220 | } |
---|
[18] | 221 | |
---|
[1] | 222 | // launch I/O operation to load page from file system |
---|
[367] | 223 | error = vfs_mapper_move_page( page, |
---|
| 224 | true ); // to mapper |
---|
[1] | 225 | if( error ) |
---|
| 226 | { |
---|
| 227 | printk("\n[ERROR] in %s : thread %x cannot load page from device\n", |
---|
| 228 | __FUNCTION__ , this->trdid ); |
---|
[23] | 229 | mapper_release_page( mapper , page ); |
---|
[1] | 230 | page_clear_flag( page , PG_ALL ); |
---|
| 231 | req.ptr = page; |
---|
| 232 | req.type = KMEM_PAGE; |
---|
| 233 | kmem_free( &req ); |
---|
| 234 | return NULL; |
---|
| 235 | } |
---|
| 236 | |
---|
| 237 | // reset the page INLOAD flag to make the page available to all readers |
---|
| 238 | page_clear_flag( page , PG_INLOAD ); |
---|
| 239 | } |
---|
| 240 | else if( page_is_flag( page , PG_INLOAD ) ) // page is loaded by another thread |
---|
| 241 | { |
---|
| 242 | // release mapper lock from WRITE_MODE |
---|
[567] | 243 | rwlock_wr_release( &mapper->lock ); |
---|
[1] | 244 | |
---|
[408] | 245 | // wait load completion |
---|
[567] | 246 | while( page_is_flag( page , PG_INLOAD ) == false ) |
---|
[1] | 247 | { |
---|
[567] | 248 | // deschedule without blocking |
---|
[408] | 249 | sched_yield("waiting page loading"); |
---|
[1] | 250 | } |
---|
| 251 | } |
---|
| 252 | } |
---|
[204] | 253 | else // page available in mapper |
---|
[1] | 254 | { |
---|
[567] | 255 | rwlock_rd_release( &mapper->lock ); |
---|
[1] | 256 | } |
---|
| 257 | |
---|
[438] | 258 | #if DEBUG_MAPPER_GET_PAGE |
---|
[435] | 259 | cycle = (uint32_t)hal_get_cycles(); |
---|
[438] | 260 | if( DEBUG_MAPPER_GET_PAGE < cycle ) |
---|
[435] | 261 | printk("\n[DBG] %s : thread %x exit for page %d / ppn %x / cycle %d\n", |
---|
| 262 | __FUNCTION__, CURRENT_THREAD, index, ppm_page2ppn(XPTR(local_cxy, page)), cycle ); |
---|
| 263 | #endif |
---|
[204] | 264 | |
---|
| 265 | return page; |
---|
| 266 | |
---|
| 267 | } // end mapper_get_page() |
---|
| 268 | |
---|
[1] | 269 | /////////////////////////////////////////////// |
---|
| 270 | error_t mapper_release_page( mapper_t * mapper, |
---|
| 271 | page_t * page ) |
---|
| 272 | { |
---|
| 273 | error_t error; |
---|
| 274 | |
---|
| 275 | // lauch IO operation to update page to file system |
---|
[238] | 276 | error = vfs_mapper_move_page( page , false ); // from mapper |
---|
[1] | 277 | |
---|
| 278 | if( error ) |
---|
| 279 | { |
---|
| 280 | printk("\n[ERROR] in %s : cannot update file system\n", __FUNCTION__ ); |
---|
| 281 | return EIO; |
---|
| 282 | } |
---|
[18] | 283 | |
---|
[1] | 284 | // take mapper lock in WRITE_MODE |
---|
[567] | 285 | rwlock_wr_acquire( &mapper->lock ); |
---|
[1] | 286 | |
---|
| 287 | // remove physical page from radix tree |
---|
[183] | 288 | grdxt_remove( &mapper->radix , page->index ); |
---|
[1] | 289 | |
---|
| 290 | // release mapper lock from WRITE_MODE |
---|
[567] | 291 | rwlock_wr_release( &mapper->lock ); |
---|
[1] | 292 | |
---|
| 293 | // release page to PPM |
---|
[183] | 294 | kmem_req_t req; |
---|
| 295 | req.type = KMEM_PAGE; |
---|
[1] | 296 | req.ptr = page; |
---|
| 297 | kmem_free( &req ); |
---|
| 298 | |
---|
| 299 | return 0; |
---|
| 300 | |
---|
[204] | 301 | } // end mapper_release_page() |
---|
| 302 | |
---|
[313] | 303 | /////////////////////////////////////////////////// |
---|
| 304 | error_t mapper_move_user( mapper_t * mapper, |
---|
| 305 | bool_t to_buffer, |
---|
| 306 | uint32_t file_offset, |
---|
| 307 | void * buffer, |
---|
| 308 | uint32_t size ) |
---|
[1] | 309 | { |
---|
[23] | 310 | uint32_t page_offset; // first byte to move to/from a mapper page |
---|
| 311 | uint32_t page_count; // number of bytes to move to/from a mapper page |
---|
| 312 | uint32_t index; // current mapper page index |
---|
| 313 | uint32_t done; // number of moved bytes |
---|
| 314 | page_t * page; // current mapper page descriptor |
---|
| 315 | uint8_t * map_ptr; // current mapper address |
---|
| 316 | uint8_t * buf_ptr; // current buffer address |
---|
[330] | 317 | |
---|
[438] | 318 | #if DEBUG_MAPPER_MOVE_USER |
---|
[435] | 319 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
[438] | 320 | if( DEBUG_MAPPER_MOVE_USER < cycle ) |
---|
[435] | 321 | printk("\n[DBG] %s : thread %x enter / to_buf %d / buffer %x / cycle %d\n", |
---|
| 322 | __FUNCTION__ , CURRENT_THREAD , to_buffer , buffer , cycle ); |
---|
| 323 | #endif |
---|
[1] | 324 | |
---|
[23] | 325 | // compute offsets of first and last bytes in file |
---|
| 326 | uint32_t min_byte = file_offset; |
---|
| 327 | uint32_t max_byte = file_offset + size -1; |
---|
[1] | 328 | |
---|
[23] | 329 | // compute indexes of pages for first and last byte in mapper |
---|
| 330 | uint32_t first = min_byte >> CONFIG_PPM_PAGE_SHIFT; |
---|
| 331 | uint32_t last = max_byte >> CONFIG_PPM_PAGE_SHIFT; |
---|
[1] | 332 | |
---|
[23] | 333 | done = 0; |
---|
[1] | 334 | |
---|
[23] | 335 | // loop on pages in mapper |
---|
| 336 | for( index = first ; index <= last ; index++ ) |
---|
[1] | 337 | { |
---|
[183] | 338 | // compute page_offset |
---|
[23] | 339 | if( index == first ) page_offset = min_byte & CONFIG_PPM_PAGE_MASK; |
---|
| 340 | else page_offset = 0; |
---|
[1] | 341 | |
---|
[313] | 342 | // compute number of bytes in page |
---|
[23] | 343 | if ( first == last ) page_count = size; |
---|
| 344 | else if ( index == first ) page_count = CONFIG_PPM_PAGE_SIZE - page_offset; |
---|
| 345 | else if ( index == last ) page_count = (max_byte & CONFIG_PPM_PAGE_MASK) + 1; |
---|
| 346 | else page_count = CONFIG_PPM_PAGE_SIZE; |
---|
[1] | 347 | |
---|
[438] | 348 | #if (DEBUG_MAPPER_MOVE_USER & 1) |
---|
| 349 | if( DEBUG_MAPPER_MOVE_USER < cycle ) |
---|
[435] | 350 | printk("\n[DBG] %s : index = %d / offset = %d / count = %d\n", |
---|
| 351 | __FUNCTION__ , index , page_offset , page_count ); |
---|
| 352 | #endif |
---|
[265] | 353 | |
---|
[23] | 354 | // get page descriptor |
---|
| 355 | page = mapper_get_page( mapper , index ); |
---|
[1] | 356 | |
---|
| 357 | if ( page == NULL ) return EINVAL; |
---|
| 358 | |
---|
[23] | 359 | // compute pointer in mapper |
---|
[315] | 360 | xptr_t base_xp = ppm_page2base( XPTR( local_cxy, page ) ); |
---|
| 361 | map_ptr = (uint8_t *)GET_PTR( base_xp ) + page_offset; |
---|
[1] | 362 | |
---|
[23] | 363 | // compute pointer in buffer |
---|
| 364 | buf_ptr = (uint8_t *)buffer + done; |
---|
[1] | 365 | |
---|
| 366 | // move fragment |
---|
[330] | 367 | if( to_buffer ) |
---|
[1] | 368 | { |
---|
[313] | 369 | hal_copy_to_uspace( buf_ptr , map_ptr , page_count ); |
---|
[1] | 370 | } |
---|
[330] | 371 | else |
---|
[1] | 372 | { |
---|
[567] | 373 | ppm_page_do_dirty( page ); |
---|
[313] | 374 | hal_copy_from_uspace( map_ptr , buf_ptr , page_count ); |
---|
[1] | 375 | } |
---|
| 376 | |
---|
[23] | 377 | done += page_count; |
---|
[1] | 378 | } |
---|
| 379 | |
---|
[438] | 380 | #if DEBUG_MAPPER_MOVE_USER |
---|
[435] | 381 | cycle = (uint32_t)hal_get_cycles(); |
---|
[438] | 382 | if( DEBUG_MAPPER_MOVE_USER < cycle ) |
---|
[435] | 383 | printk("\n[DBG] %s : thread %x exit / to_buf %d / buffer %x / cycle %d\n", |
---|
| 384 | __FUNCTION__ , CURRENT_THREAD , to_buffer , buffer , cycle ); |
---|
| 385 | #endif |
---|
[204] | 386 | |
---|
[1] | 387 | return 0; |
---|
| 388 | |
---|
[313] | 389 | } // end mapper_move_user() |
---|
[204] | 390 | |
---|
[313] | 391 | //////////////////////////////////////////////// |
---|
| 392 | error_t mapper_move_kernel( mapper_t * mapper, |
---|
| 393 | bool_t to_buffer, |
---|
| 394 | uint32_t file_offset, |
---|
| 395 | xptr_t buffer_xp, |
---|
| 396 | uint32_t size ) |
---|
| 397 | { |
---|
| 398 | uint32_t page_offset; // first byte to move to/from a mapper page |
---|
| 399 | uint32_t page_count; // number of bytes to move to/from a mapper page |
---|
| 400 | uint32_t index; // current mapper page index |
---|
| 401 | uint32_t done; // number of moved bytes |
---|
| 402 | page_t * page; // current mapper page descriptor |
---|
| 403 | |
---|
| 404 | uint8_t * src_ptr; // source buffer local pointer |
---|
| 405 | cxy_t src_cxy; // source cluster |
---|
| 406 | uint8_t * dst_ptr; // destination buffer local pointer |
---|
| 407 | cxy_t dst_cxy; // destination cluster |
---|
[330] | 408 | |
---|
[406] | 409 | // get buffer cluster and local pointer |
---|
| 410 | cxy_t buffer_cxy = GET_CXY( buffer_xp ); |
---|
| 411 | uint8_t * buffer_ptr = (uint8_t *)GET_PTR( buffer_xp ); |
---|
[313] | 412 | |
---|
[438] | 413 | #if DEBUG_MAPPER_MOVE_KERNEL |
---|
[435] | 414 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
[438] | 415 | if( DEBUG_MAPPER_MOVE_KERNEL < cycle ) |
---|
[435] | 416 | printk("\n[DBG] %s : thread %x enter / to_buf %d / buf_cxy %x / buf_ptr %x / cycle %d\n", |
---|
| 417 | __FUNCTION__ , CURRENT_THREAD , to_buffer , buffer_cxy , buffer_ptr , cycle ); |
---|
| 418 | #endif |
---|
[406] | 419 | |
---|
[313] | 420 | // compute offsets of first and last bytes in file |
---|
| 421 | uint32_t min_byte = file_offset; |
---|
| 422 | uint32_t max_byte = file_offset + size -1; |
---|
| 423 | |
---|
| 424 | // compute indexes for first and last pages in mapper |
---|
| 425 | uint32_t first = min_byte >> CONFIG_PPM_PAGE_SHIFT; |
---|
| 426 | uint32_t last = max_byte >> CONFIG_PPM_PAGE_SHIFT; |
---|
| 427 | |
---|
[438] | 428 | #if (DEBUG_MAPPER_MOVE_KERNEL & 1) |
---|
| 429 | if( DEBUG_MAPPER_MOVE_KERNEL < cycle ) |
---|
[435] | 430 | printk("\n[DBG] %s : first_page %d / last_page %d\n", __FUNCTION__, first, last ); |
---|
| 431 | #endif |
---|
[313] | 432 | |
---|
| 433 | // compute source and destination clusters |
---|
| 434 | if( to_buffer ) |
---|
| 435 | { |
---|
| 436 | dst_cxy = buffer_cxy; |
---|
| 437 | src_cxy = local_cxy; |
---|
| 438 | } |
---|
| 439 | else |
---|
| 440 | { |
---|
| 441 | src_cxy = buffer_cxy; |
---|
| 442 | dst_cxy = local_cxy; |
---|
| 443 | } |
---|
| 444 | |
---|
| 445 | done = 0; |
---|
| 446 | |
---|
| 447 | // loop on pages in mapper |
---|
| 448 | for( index = first ; index <= last ; index++ ) |
---|
| 449 | { |
---|
| 450 | // compute page_offset |
---|
| 451 | if( index == first ) page_offset = min_byte & CONFIG_PPM_PAGE_MASK; |
---|
| 452 | else page_offset = 0; |
---|
| 453 | |
---|
| 454 | // compute number of bytes to move in page |
---|
| 455 | if ( first == last ) page_count = size; |
---|
| 456 | else if ( index == first ) page_count = CONFIG_PPM_PAGE_SIZE - page_offset; |
---|
| 457 | else if ( index == last ) page_count = (max_byte & CONFIG_PPM_PAGE_MASK) + 1; |
---|
| 458 | else page_count = CONFIG_PPM_PAGE_SIZE; |
---|
| 459 | |
---|
[438] | 460 | #if (DEBUG_MAPPER_MOVE_KERNEL & 1) |
---|
| 461 | if( DEBUG_MAPPER_MOVE_KERNEL < cycle ) |
---|
[435] | 462 | printk("\n[DBG] %s : page_index = %d / offset = %d / bytes = %d\n", |
---|
| 463 | __FUNCTION__ , index , page_offset , page_count ); |
---|
| 464 | #endif |
---|
[313] | 465 | |
---|
| 466 | // get page descriptor |
---|
| 467 | page = mapper_get_page( mapper , index ); |
---|
| 468 | |
---|
| 469 | if ( page == NULL ) return EINVAL; |
---|
| 470 | |
---|
[315] | 471 | // get page base address |
---|
[367] | 472 | xptr_t base_xp = ppm_page2base( XPTR( local_cxy , page ) ); |
---|
| 473 | uint8_t * base_ptr = (uint8_t *)GET_PTR( base_xp ); |
---|
[330] | 474 | |
---|
[313] | 475 | // compute source and destination pointers |
---|
| 476 | if( to_buffer ) |
---|
| 477 | { |
---|
[315] | 478 | dst_ptr = buffer_ptr + done; |
---|
[367] | 479 | src_ptr = base_ptr + page_offset; |
---|
[313] | 480 | } |
---|
| 481 | else |
---|
| 482 | { |
---|
[315] | 483 | src_ptr = buffer_ptr + done; |
---|
[367] | 484 | dst_ptr = base_ptr + page_offset; |
---|
[313] | 485 | |
---|
[567] | 486 | ppm_page_do_dirty( page ); |
---|
[313] | 487 | } |
---|
| 488 | |
---|
| 489 | // move fragment |
---|
| 490 | hal_remote_memcpy( XPTR( dst_cxy , dst_ptr ), XPTR( src_cxy , src_ptr ), page_count ); |
---|
[330] | 491 | |
---|
[313] | 492 | done += page_count; |
---|
| 493 | } |
---|
| 494 | |
---|
[438] | 495 | #if DEBUG_MAPPER_MOVE_KERNEL |
---|
[435] | 496 | cycle = (uint32_t)hal_get_cycles(); |
---|
[438] | 497 | if( DEBUG_MAPPER_MOVE_KERNEL < cycle ) |
---|
[435] | 498 | printk("\n[DBG] %s : thread %x exit / to_buf %d / buf_cxy %x / buf_ptr %x / cycle %d\n", |
---|
| 499 | __FUNCTION__ , CURRENT_THREAD , to_buffer , buffer_cxy , buffer_ptr , cycle ); |
---|
| 500 | #endif |
---|
[313] | 501 | |
---|
| 502 | return 0; |
---|
| 503 | |
---|
[406] | 504 | } // end mapper_move_kernel() |
---|
[313] | 505 | |
---|