[1] | 1 | /* |
---|
| 2 | * mapper.c - Map memory, file or device in process virtual address space. |
---|
| 3 | * |
---|
| 4 | * Authors Mohamed Lamine Karaoui (2015) |
---|
[440] | 5 | * Alain Greiner (2016,2017,2018) |
---|
[1] | 6 | * |
---|
| 7 | * Copyright (c) UPMC Sorbonne Universites |
---|
| 8 | * |
---|
| 9 | * This file is part of ALMOS-MKH. |
---|
| 10 | * |
---|
| 11 | * ALMOS-MKH is free software; you can redistribute it and/or modify it |
---|
| 12 | * under the terms of the GNU General Public License as published by |
---|
| 13 | * the Free Software Foundation; version 2.0 of the License. |
---|
| 14 | * |
---|
| 15 | * ALMOS-MKH is distributed in the hope that it will be useful, but |
---|
| 16 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
| 17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
---|
| 18 | * General Public License for more details. |
---|
| 19 | * |
---|
| 20 | * You should have received a copy of the GNU General Public License |
---|
| 21 | * along with ALMOS-MKH; if not, write to the Free Software Foundation, |
---|
| 22 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
---|
| 23 | */ |
---|
| 24 | |
---|
[14] | 25 | #include <kernel_config.h> |
---|
[457] | 26 | #include <hal_kernel_types.h> |
---|
[1] | 27 | #include <hal_special.h> |
---|
[23] | 28 | #include <hal_uspace.h> |
---|
[1] | 29 | #include <grdxt.h> |
---|
| 30 | #include <rwlock.h> |
---|
| 31 | #include <printk.h> |
---|
[279] | 32 | #include <memcpy.h> |
---|
[1] | 33 | #include <thread.h> |
---|
| 34 | #include <core.h> |
---|
| 35 | #include <process.h> |
---|
| 36 | #include <kmem.h> |
---|
| 37 | #include <kcm.h> |
---|
[567] | 38 | #include <ppm.h> |
---|
[1] | 39 | #include <page.h> |
---|
| 40 | #include <cluster.h> |
---|
| 41 | #include <vfs.h> |
---|
| 42 | #include <mapper.h> |
---|
| 43 | |
---|
[567] | 44 | |
---|
[246] | 45 | ////////////////////////////////////////////// |
---|
| 46 | mapper_t * mapper_create( vfs_fs_type_t type ) |
---|
[1] | 47 | { |
---|
| 48 | mapper_t * mapper; |
---|
| 49 | kmem_req_t req; |
---|
| 50 | error_t error; |
---|
| 51 | |
---|
| 52 | // allocate memory for associated mapper |
---|
[183] | 53 | req.type = KMEM_MAPPER; |
---|
| 54 | req.size = sizeof(mapper_t); |
---|
[1] | 55 | req.flags = AF_KERNEL | AF_ZERO; |
---|
[183] | 56 | mapper = (mapper_t *)kmem_alloc( &req ); |
---|
[1] | 57 | |
---|
| 58 | if( mapper == NULL ) |
---|
| 59 | { |
---|
| 60 | printk("\n[ERROR] in %s : no memory for mapper descriptor\n", __FUNCTION__ ); |
---|
| 61 | return NULL; |
---|
| 62 | } |
---|
| 63 | |
---|
| 64 | // initialize refcount & inode |
---|
[183] | 65 | mapper->refcount = 0; |
---|
[1] | 66 | mapper->inode = NULL; |
---|
| 67 | |
---|
| 68 | // initialize radix tree |
---|
[183] | 69 | error = grdxt_init( &mapper->radix, |
---|
[1] | 70 | CONFIG_VMM_GRDXT_W1, |
---|
| 71 | CONFIG_VMM_GRDXT_W2, |
---|
| 72 | CONFIG_VMM_GRDXT_W3 ); |
---|
| 73 | |
---|
| 74 | if( error ) |
---|
| 75 | { |
---|
| 76 | printk("\n[ERROR] in %s : cannot initialize radix tree\n", __FUNCTION__ ); |
---|
[183] | 77 | req.type = KMEM_MAPPER; |
---|
[1] | 78 | req.ptr = mapper; |
---|
| 79 | kmem_free( &req ); |
---|
| 80 | return NULL; |
---|
| 81 | } |
---|
| 82 | |
---|
[246] | 83 | // initialize mapper type |
---|
| 84 | mapper->type = type; |
---|
| 85 | |
---|
[1] | 86 | // initialize mapper lock |
---|
[567] | 87 | rwlock_init( &mapper->lock , LOCK_MAPPER_STATE ); |
---|
[1] | 88 | |
---|
| 89 | // initialize waiting threads xlist (empty) |
---|
[183] | 90 | xlist_root_init( XPTR( local_cxy , &mapper->wait_root ) ); |
---|
[1] | 91 | |
---|
| 92 | // initialize vsegs xlist (empty) |
---|
[183] | 93 | xlist_root_init( XPTR( local_cxy , &mapper->vsegs_root ) ); |
---|
[1] | 94 | |
---|
| 95 | return mapper; |
---|
| 96 | |
---|
[204] | 97 | } // end mapper_create() |
---|
| 98 | |
---|
[1] | 99 | /////////////////////////////////////////// |
---|
| 100 | error_t mapper_destroy( mapper_t * mapper ) |
---|
| 101 | { |
---|
| 102 | page_t * page; |
---|
| 103 | uint32_t found_index = 0; |
---|
| 104 | uint32_t start_index = 0; |
---|
| 105 | kmem_req_t req; |
---|
| 106 | error_t error; |
---|
| 107 | |
---|
| 108 | // scan radix three and release all registered pages to PPM |
---|
| 109 | do |
---|
| 110 | { |
---|
| 111 | // get page from radix tree |
---|
| 112 | page = (page_t *)grdxt_get_first( &mapper->radix , start_index , &found_index ); |
---|
| 113 | |
---|
[18] | 114 | if( page != NULL ) |
---|
[1] | 115 | { |
---|
| 116 | // remove page from mapper and release to PPM |
---|
[183] | 117 | error = mapper_release_page( mapper , page ); |
---|
[1] | 118 | |
---|
| 119 | if ( error ) return error; |
---|
| 120 | |
---|
| 121 | // update start_key value for next page |
---|
| 122 | start_index = found_index; |
---|
| 123 | } |
---|
| 124 | } |
---|
| 125 | while( page != NULL ); |
---|
| 126 | |
---|
| 127 | // release the memory allocated to radix-tree itself |
---|
| 128 | grdxt_destroy( &mapper->radix ); |
---|
| 129 | |
---|
| 130 | // release memory for mapper descriptor |
---|
| 131 | req.type = KMEM_MAPPER; |
---|
| 132 | req.ptr = mapper; |
---|
| 133 | kmem_free( &req ); |
---|
| 134 | |
---|
| 135 | return 0; |
---|
[18] | 136 | |
---|
[204] | 137 | } // end mapper_destroy() |
---|
| 138 | |
---|
[1] | 139 | //////////////////////////////////////////// |
---|
| 140 | page_t * mapper_get_page( mapper_t * mapper, |
---|
| 141 | uint32_t index ) |
---|
| 142 | { |
---|
[183] | 143 | kmem_req_t req; |
---|
| 144 | page_t * page; |
---|
| 145 | error_t error; |
---|
[1] | 146 | |
---|
[438] | 147 | #if DEBUG_MAPPER_GET_PAGE |
---|
[435] | 148 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
[438] | 149 | if( DEBUG_MAPPER_GET_PAGE < cycle ) |
---|
[435] | 150 | printk("\n[DBG] %s : thread %x enter for page %d / mapper %x / cycle %d\n", |
---|
| 151 | __FUNCTION__ , CURRENT_THREAD , index , mapper , cycle ); |
---|
| 152 | #endif |
---|
[204] | 153 | |
---|
[1] | 154 | thread_t * this = CURRENT_THREAD; |
---|
| 155 | |
---|
[581] | 156 | // check thread can yield |
---|
| 157 | thread_assert_can_yield( this , __FUNCTION__ ); |
---|
| 158 | |
---|
[1] | 159 | // take mapper lock in READ_MODE |
---|
[567] | 160 | rwlock_rd_acquire( &mapper->lock ); |
---|
[1] | 161 | |
---|
| 162 | // search page in radix tree |
---|
| 163 | page = (page_t *)grdxt_lookup( &mapper->radix , index ); |
---|
| 164 | |
---|
[18] | 165 | // test if page available in mapper |
---|
[183] | 166 | if( ( page == NULL) || page_is_flag( page , PG_INLOAD ) ) // page not available |
---|
[1] | 167 | { |
---|
[204] | 168 | |
---|
[1] | 169 | // release the lock in READ_MODE and take it in WRITE_MODE |
---|
[567] | 170 | rwlock_rd_release( &mapper->lock ); |
---|
| 171 | rwlock_wr_acquire( &mapper->lock ); |
---|
[1] | 172 | |
---|
| 173 | // second test on missing page because the page status can have been modified |
---|
| 174 | // by another thread, when passing from READ_MODE to WRITE_MODE. |
---|
| 175 | // from this point there is no concurrent accesses to mapper. |
---|
| 176 | |
---|
| 177 | page = grdxt_lookup( &mapper->radix , index ); |
---|
| 178 | |
---|
[238] | 179 | if ( page == NULL ) // missing page => create it and load it from file system |
---|
[1] | 180 | { |
---|
[204] | 181 | |
---|
[438] | 182 | #if (DEBUG_MAPPER_GET_PAGE & 1) |
---|
| 183 | if( DEBUG_MAPPER_GET_PAGE < cycle ) |
---|
[435] | 184 | printk("\n[DBG] %s : missing page => load from device\n", __FUNCTION__ ); |
---|
| 185 | #endif |
---|
[1] | 186 | // allocate one page from PPM |
---|
| 187 | req.type = KMEM_PAGE; |
---|
| 188 | req.size = 0; |
---|
| 189 | req.flags = AF_NONE; |
---|
| 190 | page = kmem_alloc( &req ); |
---|
[18] | 191 | |
---|
[1] | 192 | if( page == NULL ) |
---|
| 193 | { |
---|
| 194 | printk("\n[ERROR] in %s : thread %x cannot allocate a page in cluster %x\n", |
---|
| 195 | __FUNCTION__ , this->trdid , local_cxy ); |
---|
[567] | 196 | rwlock_wr_release( &mapper->lock ); |
---|
[1] | 197 | return NULL; |
---|
| 198 | } |
---|
| 199 | |
---|
| 200 | // initialize the page descriptor |
---|
| 201 | page_init( page ); |
---|
[407] | 202 | page_set_flag( page , PG_INIT | PG_INLOAD ); |
---|
[1] | 203 | page_refcount_up( page ); |
---|
| 204 | page->mapper = mapper; |
---|
| 205 | page->index = index; |
---|
| 206 | |
---|
| 207 | // insert page in mapper radix tree |
---|
| 208 | error = grdxt_insert( &mapper->radix, index , page ); |
---|
| 209 | |
---|
| 210 | // release mapper lock from WRITE_MODE |
---|
[567] | 211 | rwlock_wr_release( &mapper->lock ); |
---|
[1] | 212 | |
---|
[18] | 213 | if( error ) |
---|
[1] | 214 | { |
---|
| 215 | printk("\n[ERROR] in %s : thread %x cannot insert page in mapper\n", |
---|
| 216 | __FUNCTION__ , this->trdid ); |
---|
[23] | 217 | mapper_release_page( mapper , page ); |
---|
[1] | 218 | page_clear_flag( page , PG_ALL ); |
---|
| 219 | req.ptr = page; |
---|
| 220 | req.type = KMEM_PAGE; |
---|
| 221 | kmem_free(&req); |
---|
| 222 | return NULL; |
---|
| 223 | } |
---|
[18] | 224 | |
---|
[1] | 225 | // launch I/O operation to load page from file system |
---|
[367] | 226 | error = vfs_mapper_move_page( page, |
---|
| 227 | true ); // to mapper |
---|
[1] | 228 | if( error ) |
---|
| 229 | { |
---|
| 230 | printk("\n[ERROR] in %s : thread %x cannot load page from device\n", |
---|
| 231 | __FUNCTION__ , this->trdid ); |
---|
[23] | 232 | mapper_release_page( mapper , page ); |
---|
[1] | 233 | page_clear_flag( page , PG_ALL ); |
---|
| 234 | req.ptr = page; |
---|
| 235 | req.type = KMEM_PAGE; |
---|
| 236 | kmem_free( &req ); |
---|
| 237 | return NULL; |
---|
| 238 | } |
---|
| 239 | |
---|
| 240 | // reset the page INLOAD flag to make the page available to all readers |
---|
| 241 | page_clear_flag( page , PG_INLOAD ); |
---|
| 242 | } |
---|
| 243 | else if( page_is_flag( page , PG_INLOAD ) ) // page is loaded by another thread |
---|
| 244 | { |
---|
| 245 | // release mapper lock from WRITE_MODE |
---|
[567] | 246 | rwlock_wr_release( &mapper->lock ); |
---|
[1] | 247 | |
---|
[408] | 248 | // wait load completion |
---|
[567] | 249 | while( page_is_flag( page , PG_INLOAD ) == false ) |
---|
[1] | 250 | { |
---|
[567] | 251 | // deschedule without blocking |
---|
[408] | 252 | sched_yield("waiting page loading"); |
---|
[1] | 253 | } |
---|
| 254 | } |
---|
| 255 | } |
---|
[204] | 256 | else // page available in mapper |
---|
[1] | 257 | { |
---|
[567] | 258 | rwlock_rd_release( &mapper->lock ); |
---|
[1] | 259 | } |
---|
| 260 | |
---|
[438] | 261 | #if DEBUG_MAPPER_GET_PAGE |
---|
[435] | 262 | cycle = (uint32_t)hal_get_cycles(); |
---|
[438] | 263 | if( DEBUG_MAPPER_GET_PAGE < cycle ) |
---|
[435] | 264 | printk("\n[DBG] %s : thread %x exit for page %d / ppn %x / cycle %d\n", |
---|
| 265 | __FUNCTION__, CURRENT_THREAD, index, ppm_page2ppn(XPTR(local_cxy, page)), cycle ); |
---|
| 266 | #endif |
---|
[204] | 267 | |
---|
| 268 | return page; |
---|
| 269 | |
---|
| 270 | } // end mapper_get_page() |
---|
| 271 | |
---|
[1] | 272 | /////////////////////////////////////////////// |
---|
| 273 | error_t mapper_release_page( mapper_t * mapper, |
---|
| 274 | page_t * page ) |
---|
| 275 | { |
---|
| 276 | error_t error; |
---|
| 277 | |
---|
| 278 | // lauch IO operation to update page to file system |
---|
[238] | 279 | error = vfs_mapper_move_page( page , false ); // from mapper |
---|
[1] | 280 | |
---|
| 281 | if( error ) |
---|
| 282 | { |
---|
| 283 | printk("\n[ERROR] in %s : cannot update file system\n", __FUNCTION__ ); |
---|
| 284 | return EIO; |
---|
| 285 | } |
---|
[18] | 286 | |
---|
[1] | 287 | // take mapper lock in WRITE_MODE |
---|
[567] | 288 | rwlock_wr_acquire( &mapper->lock ); |
---|
[1] | 289 | |
---|
| 290 | // remove physical page from radix tree |
---|
[183] | 291 | grdxt_remove( &mapper->radix , page->index ); |
---|
[1] | 292 | |
---|
| 293 | // release mapper lock from WRITE_MODE |
---|
[567] | 294 | rwlock_wr_release( &mapper->lock ); |
---|
[1] | 295 | |
---|
| 296 | // release page to PPM |
---|
[183] | 297 | kmem_req_t req; |
---|
| 298 | req.type = KMEM_PAGE; |
---|
[1] | 299 | req.ptr = page; |
---|
| 300 | kmem_free( &req ); |
---|
| 301 | |
---|
| 302 | return 0; |
---|
| 303 | |
---|
[204] | 304 | } // end mapper_release_page() |
---|
| 305 | |
---|
[313] | 306 | /////////////////////////////////////////////////// |
---|
| 307 | error_t mapper_move_user( mapper_t * mapper, |
---|
| 308 | bool_t to_buffer, |
---|
| 309 | uint32_t file_offset, |
---|
| 310 | void * buffer, |
---|
| 311 | uint32_t size ) |
---|
[1] | 312 | { |
---|
[23] | 313 | uint32_t page_offset; // first byte to move to/from a mapper page |
---|
| 314 | uint32_t page_count; // number of bytes to move to/from a mapper page |
---|
| 315 | uint32_t index; // current mapper page index |
---|
| 316 | uint32_t done; // number of moved bytes |
---|
| 317 | page_t * page; // current mapper page descriptor |
---|
| 318 | uint8_t * map_ptr; // current mapper address |
---|
| 319 | uint8_t * buf_ptr; // current buffer address |
---|
[330] | 320 | |
---|
[438] | 321 | #if DEBUG_MAPPER_MOVE_USER |
---|
[435] | 322 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
[438] | 323 | if( DEBUG_MAPPER_MOVE_USER < cycle ) |
---|
[435] | 324 | printk("\n[DBG] %s : thread %x enter / to_buf %d / buffer %x / cycle %d\n", |
---|
| 325 | __FUNCTION__ , CURRENT_THREAD , to_buffer , buffer , cycle ); |
---|
| 326 | #endif |
---|
[1] | 327 | |
---|
[23] | 328 | // compute offsets of first and last bytes in file |
---|
| 329 | uint32_t min_byte = file_offset; |
---|
| 330 | uint32_t max_byte = file_offset + size -1; |
---|
[1] | 331 | |
---|
[23] | 332 | // compute indexes of pages for first and last byte in mapper |
---|
| 333 | uint32_t first = min_byte >> CONFIG_PPM_PAGE_SHIFT; |
---|
| 334 | uint32_t last = max_byte >> CONFIG_PPM_PAGE_SHIFT; |
---|
[1] | 335 | |
---|
[23] | 336 | done = 0; |
---|
[1] | 337 | |
---|
[23] | 338 | // loop on pages in mapper |
---|
| 339 | for( index = first ; index <= last ; index++ ) |
---|
[1] | 340 | { |
---|
[183] | 341 | // compute page_offset |
---|
[23] | 342 | if( index == first ) page_offset = min_byte & CONFIG_PPM_PAGE_MASK; |
---|
| 343 | else page_offset = 0; |
---|
[1] | 344 | |
---|
[313] | 345 | // compute number of bytes in page |
---|
[23] | 346 | if ( first == last ) page_count = size; |
---|
| 347 | else if ( index == first ) page_count = CONFIG_PPM_PAGE_SIZE - page_offset; |
---|
| 348 | else if ( index == last ) page_count = (max_byte & CONFIG_PPM_PAGE_MASK) + 1; |
---|
| 349 | else page_count = CONFIG_PPM_PAGE_SIZE; |
---|
[1] | 350 | |
---|
[438] | 351 | #if (DEBUG_MAPPER_MOVE_USER & 1) |
---|
| 352 | if( DEBUG_MAPPER_MOVE_USER < cycle ) |
---|
[435] | 353 | printk("\n[DBG] %s : index = %d / offset = %d / count = %d\n", |
---|
| 354 | __FUNCTION__ , index , page_offset , page_count ); |
---|
| 355 | #endif |
---|
[265] | 356 | |
---|
[23] | 357 | // get page descriptor |
---|
| 358 | page = mapper_get_page( mapper , index ); |
---|
[1] | 359 | |
---|
| 360 | if ( page == NULL ) return EINVAL; |
---|
| 361 | |
---|
[23] | 362 | // compute pointer in mapper |
---|
[315] | 363 | xptr_t base_xp = ppm_page2base( XPTR( local_cxy, page ) ); |
---|
| 364 | map_ptr = (uint8_t *)GET_PTR( base_xp ) + page_offset; |
---|
[1] | 365 | |
---|
[23] | 366 | // compute pointer in buffer |
---|
| 367 | buf_ptr = (uint8_t *)buffer + done; |
---|
[1] | 368 | |
---|
| 369 | // move fragment |
---|
[330] | 370 | if( to_buffer ) |
---|
[1] | 371 | { |
---|
[313] | 372 | hal_copy_to_uspace( buf_ptr , map_ptr , page_count ); |
---|
[1] | 373 | } |
---|
[330] | 374 | else |
---|
[1] | 375 | { |
---|
[567] | 376 | ppm_page_do_dirty( page ); |
---|
[313] | 377 | hal_copy_from_uspace( map_ptr , buf_ptr , page_count ); |
---|
[1] | 378 | } |
---|
| 379 | |
---|
[23] | 380 | done += page_count; |
---|
[1] | 381 | } |
---|
| 382 | |
---|
[438] | 383 | #if DEBUG_MAPPER_MOVE_USER |
---|
[435] | 384 | cycle = (uint32_t)hal_get_cycles(); |
---|
[438] | 385 | if( DEBUG_MAPPER_MOVE_USER < cycle ) |
---|
[435] | 386 | printk("\n[DBG] %s : thread %x exit / to_buf %d / buffer %x / cycle %d\n", |
---|
| 387 | __FUNCTION__ , CURRENT_THREAD , to_buffer , buffer , cycle ); |
---|
| 388 | #endif |
---|
[204] | 389 | |
---|
[1] | 390 | return 0; |
---|
| 391 | |
---|
[313] | 392 | } // end mapper_move_user() |
---|
[204] | 393 | |
---|
[313] | 394 | //////////////////////////////////////////////// |
---|
| 395 | error_t mapper_move_kernel( mapper_t * mapper, |
---|
| 396 | bool_t to_buffer, |
---|
| 397 | uint32_t file_offset, |
---|
| 398 | xptr_t buffer_xp, |
---|
| 399 | uint32_t size ) |
---|
| 400 | { |
---|
| 401 | uint32_t page_offset; // first byte to move to/from a mapper page |
---|
| 402 | uint32_t page_count; // number of bytes to move to/from a mapper page |
---|
| 403 | uint32_t index; // current mapper page index |
---|
| 404 | uint32_t done; // number of moved bytes |
---|
| 405 | page_t * page; // current mapper page descriptor |
---|
| 406 | |
---|
| 407 | uint8_t * src_ptr; // source buffer local pointer |
---|
| 408 | cxy_t src_cxy; // source cluster |
---|
| 409 | uint8_t * dst_ptr; // destination buffer local pointer |
---|
| 410 | cxy_t dst_cxy; // destination cluster |
---|
[330] | 411 | |
---|
[406] | 412 | // get buffer cluster and local pointer |
---|
| 413 | cxy_t buffer_cxy = GET_CXY( buffer_xp ); |
---|
| 414 | uint8_t * buffer_ptr = (uint8_t *)GET_PTR( buffer_xp ); |
---|
[313] | 415 | |
---|
[438] | 416 | #if DEBUG_MAPPER_MOVE_KERNEL |
---|
[435] | 417 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
[438] | 418 | if( DEBUG_MAPPER_MOVE_KERNEL < cycle ) |
---|
[435] | 419 | printk("\n[DBG] %s : thread %x enter / to_buf %d / buf_cxy %x / buf_ptr %x / cycle %d\n", |
---|
| 420 | __FUNCTION__ , CURRENT_THREAD , to_buffer , buffer_cxy , buffer_ptr , cycle ); |
---|
| 421 | #endif |
---|
[406] | 422 | |
---|
[313] | 423 | // compute offsets of first and last bytes in file |
---|
| 424 | uint32_t min_byte = file_offset; |
---|
| 425 | uint32_t max_byte = file_offset + size -1; |
---|
| 426 | |
---|
| 427 | // compute indexes for first and last pages in mapper |
---|
| 428 | uint32_t first = min_byte >> CONFIG_PPM_PAGE_SHIFT; |
---|
| 429 | uint32_t last = max_byte >> CONFIG_PPM_PAGE_SHIFT; |
---|
| 430 | |
---|
[438] | 431 | #if (DEBUG_MAPPER_MOVE_KERNEL & 1) |
---|
| 432 | if( DEBUG_MAPPER_MOVE_KERNEL < cycle ) |
---|
[435] | 433 | printk("\n[DBG] %s : first_page %d / last_page %d\n", __FUNCTION__, first, last ); |
---|
| 434 | #endif |
---|
[313] | 435 | |
---|
| 436 | // compute source and destination clusters |
---|
| 437 | if( to_buffer ) |
---|
| 438 | { |
---|
| 439 | dst_cxy = buffer_cxy; |
---|
| 440 | src_cxy = local_cxy; |
---|
| 441 | } |
---|
| 442 | else |
---|
| 443 | { |
---|
| 444 | src_cxy = buffer_cxy; |
---|
| 445 | dst_cxy = local_cxy; |
---|
| 446 | } |
---|
| 447 | |
---|
| 448 | done = 0; |
---|
| 449 | |
---|
| 450 | // loop on pages in mapper |
---|
| 451 | for( index = first ; index <= last ; index++ ) |
---|
| 452 | { |
---|
| 453 | // compute page_offset |
---|
| 454 | if( index == first ) page_offset = min_byte & CONFIG_PPM_PAGE_MASK; |
---|
| 455 | else page_offset = 0; |
---|
| 456 | |
---|
| 457 | // compute number of bytes to move in page |
---|
| 458 | if ( first == last ) page_count = size; |
---|
| 459 | else if ( index == first ) page_count = CONFIG_PPM_PAGE_SIZE - page_offset; |
---|
| 460 | else if ( index == last ) page_count = (max_byte & CONFIG_PPM_PAGE_MASK) + 1; |
---|
| 461 | else page_count = CONFIG_PPM_PAGE_SIZE; |
---|
| 462 | |
---|
[438] | 463 | #if (DEBUG_MAPPER_MOVE_KERNEL & 1) |
---|
| 464 | if( DEBUG_MAPPER_MOVE_KERNEL < cycle ) |
---|
[435] | 465 | printk("\n[DBG] %s : page_index = %d / offset = %d / bytes = %d\n", |
---|
| 466 | __FUNCTION__ , index , page_offset , page_count ); |
---|
| 467 | #endif |
---|
[313] | 468 | |
---|
| 469 | // get page descriptor |
---|
| 470 | page = mapper_get_page( mapper , index ); |
---|
| 471 | |
---|
| 472 | if ( page == NULL ) return EINVAL; |
---|
| 473 | |
---|
[315] | 474 | // get page base address |
---|
[367] | 475 | xptr_t base_xp = ppm_page2base( XPTR( local_cxy , page ) ); |
---|
| 476 | uint8_t * base_ptr = (uint8_t *)GET_PTR( base_xp ); |
---|
[330] | 477 | |
---|
[313] | 478 | // compute source and destination pointers |
---|
| 479 | if( to_buffer ) |
---|
| 480 | { |
---|
[315] | 481 | dst_ptr = buffer_ptr + done; |
---|
[367] | 482 | src_ptr = base_ptr + page_offset; |
---|
[313] | 483 | } |
---|
| 484 | else |
---|
| 485 | { |
---|
[315] | 486 | src_ptr = buffer_ptr + done; |
---|
[367] | 487 | dst_ptr = base_ptr + page_offset; |
---|
[313] | 488 | |
---|
[567] | 489 | ppm_page_do_dirty( page ); |
---|
[313] | 490 | } |
---|
| 491 | |
---|
| 492 | // move fragment |
---|
| 493 | hal_remote_memcpy( XPTR( dst_cxy , dst_ptr ), XPTR( src_cxy , src_ptr ), page_count ); |
---|
[330] | 494 | |
---|
[313] | 495 | done += page_count; |
---|
| 496 | } |
---|
| 497 | |
---|
[438] | 498 | #if DEBUG_MAPPER_MOVE_KERNEL |
---|
[435] | 499 | cycle = (uint32_t)hal_get_cycles(); |
---|
[438] | 500 | if( DEBUG_MAPPER_MOVE_KERNEL < cycle ) |
---|
[435] | 501 | printk("\n[DBG] %s : thread %x exit / to_buf %d / buf_cxy %x / buf_ptr %x / cycle %d\n", |
---|
| 502 | __FUNCTION__ , CURRENT_THREAD , to_buffer , buffer_cxy , buffer_ptr , cycle ); |
---|
| 503 | #endif |
---|
[313] | 504 | |
---|
| 505 | return 0; |
---|
| 506 | |
---|
[406] | 507 | } // end mapper_move_kernel() |
---|
[313] | 508 | |
---|