Changeset 606 for trunk/kernel/mm
- Timestamp:
- Dec 3, 2018, 12:20:18 PM (6 years ago)
- Location:
- trunk/kernel/mm
- Files:
-
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/mm/mapper.c
r581 r606 1 1 /* 2 * mapper.c - Map memory, file or device in process virtual address space.2 * mapper.c - Kernel cache for FS files or directories implementation. 3 3 * 4 4 * Authors Mohamed Lamine Karaoui (2015) … … 50 50 error_t error; 51 51 52 // allocate memory for associatedmapper52 // allocate memory for mapper 53 53 req.type = KMEM_MAPPER; 54 54 req.size = sizeof(mapper_t); … … 67 67 68 68 // initialize radix tree 69 error = grdxt_init( &mapper->r adix,70 CONFIG_ VMM_GRDXT_W1,71 CONFIG_ VMM_GRDXT_W2,72 CONFIG_ VMM_GRDXT_W3 );69 error = grdxt_init( &mapper->rt, 70 CONFIG_MAPPER_GRDXT_W1, 71 CONFIG_MAPPER_GRDXT_W2, 72 CONFIG_MAPPER_GRDXT_W3 ); 73 73 74 74 if( error ) … … 85 85 86 86 // initialize mapper lock 87 r wlock_init( &mapper->lock, LOCK_MAPPER_STATE );87 remote_rwlock_init( XPTR( local_cxy , &mapper->lock ) , LOCK_MAPPER_STATE ); 88 88 89 89 // initialize waiting threads xlist (empty) … … 97 97 } // end mapper_create() 98 98 99 //////////////////////////////////////// ///100 error_tmapper_destroy( mapper_t * mapper )99 //////////////////////////////////////// 100 void mapper_destroy( mapper_t * mapper ) 101 101 { 102 102 page_t * page; … … 104 104 uint32_t start_index = 0; 105 105 kmem_req_t req; 106 error_t error; 107 108 // scan radix three and release all registered pages to PPM 106 107 // scan radix tree 109 108 do 110 109 { 111 110 // get page from radix tree 112 page = (page_t *)grdxt_get_first( &mapper->radix , start_index , &found_index ); 113 111 page = (page_t *)grdxt_get_first( &mapper->rt , start_index , &found_index ); 112 113 // release registered pages to PPM 114 114 if( page != NULL ) 115 115 { 116 116 // remove page from mapper and release to PPM 117 error = mapper_release_page( mapper , page ); 118 119 if ( error ) return error; 117 mapper_release_page( mapper , page ); 120 118 121 119 // update start_key value for next page … … 125 123 while( page != NULL ); 126 124 127 // release the memory allocated to radix -tree itself128 grdxt_destroy( &mapper->r adix);125 // release the memory allocated to radix tree itself 126 grdxt_destroy( &mapper->rt ); 129 127 130 128 // release memory for mapper descriptor … … 133 131 kmem_free( &req ); 134 132 135 return 0;136 137 133 } // end mapper_destroy() 138 134 139 //////////////////////////////////////////// 140 page_t * mapper_get_page( mapper_t * mapper, 141 uint32_t index ) 142 { 143 kmem_req_t req; 144 page_t * page; 135 //////////////////////////////////////////////////// 136 xptr_t mapper_remote_get_page( xptr_t mapper_xp, 137 uint32_t page_id ) 138 { 145 139 error_t error; 140 mapper_t * mapper_ptr; 141 cxy_t mapper_cxy; 142 xptr_t lock_xp; // extended pointer on mapper lock 143 xptr_t page_xp; // extended pointer on searched page descriptor 144 xptr_t rt_xp; // extended pointer on radix tree in mapper 145 146 thread_t * this = CURRENT_THREAD; 147 148 // get mapper cluster and local pointer 149 mapper_ptr = GET_PTR( mapper_xp ); 150 mapper_cxy = GET_CXY( mapper_xp ); 146 151 147 152 #if DEBUG_MAPPER_GET_PAGE 148 153 uint32_t cycle = (uint32_t)hal_get_cycles(); 154 char name[CONFIG_VFS_MAX_NAME_LENGTH]; 155 vfs_inode_t * inode = hal_remote_lpt( XPTR( mapper_cxy , &mapper_ptr->inode ) ); 156 vfs_inode_get_name( XPTR( mapper_cxy , inode ) , name ); 149 157 if( DEBUG_MAPPER_GET_PAGE < cycle ) 150 printk("\n[DBG] %s : thread %x enter for page %d / mapper %x / cycle %d\n", 151 __FUNCTION__ , CURRENT_THREAD , index , mapper , cycle ); 152 #endif 153 154 thread_t * this = CURRENT_THREAD; 158 printk("\n[%s] thread [%x,%x] enter for page %d of <%s> / cycle %d\n", 159 __FUNCTION__, this->process->pid, this->trdid, page_id, name, cycle ); 160 #endif 155 161 156 162 // check thread can yield 157 163 thread_assert_can_yield( this , __FUNCTION__ ); 158 164 165 // build extended pointer on mapper lock and mapper rt 166 lock_xp = XPTR( mapper_cxy , &mapper_ptr->lock ); 167 rt_xp = XPTR( mapper_cxy , &mapper_ptr->rt ); 168 159 169 // take mapper lock in READ_MODE 160 r wlock_rd_acquire( &mapper->lock);170 remote_rwlock_rd_acquire( lock_xp ); 161 171 162 172 // search page in radix tree 163 page = (page_t *)grdxt_lookup( &mapper->radix , index ); 164 165 // test if page available in mapper 166 if( ( page == NULL) || page_is_flag( page , PG_INLOAD ) ) // page not available 167 { 168 173 page_xp = grdxt_remote_lookup( rt_xp , page_id ); 174 175 // test mapper miss 176 if( page_xp == XPTR_NULL ) // miss => try to handle it 177 { 169 178 // release the lock in READ_MODE and take it in WRITE_MODE 170 r wlock_rd_release( &mapper->lock);171 r wlock_wr_acquire( &mapper->lock);172 173 // second test on missing page because the page status can have beenmodified179 remote_rwlock_rd_release( lock_xp ); 180 remote_rwlock_wr_acquire( lock_xp ); 181 182 // second test on missing page because the page status can be modified 174 183 // by another thread, when passing from READ_MODE to WRITE_MODE. 175 184 // from this point there is no concurrent accesses to mapper. 176 177 page = grdxt_lookup( &mapper->radix , index ); 178 179 if ( page == NULL ) // missing page => create it and load it from file system 185 page_xp = grdxt_remote_lookup( rt_xp , page_id ); 186 187 if ( page_xp == XPTR_NULL ) // miss confirmed => handle it 180 188 { 181 189 182 190 #if (DEBUG_MAPPER_GET_PAGE & 1) 183 191 if( DEBUG_MAPPER_GET_PAGE < cycle ) 184 printk("\n[DBG] %s : missing page => load from device\n", __FUNCTION__ ); 185 #endif 186 // allocate one page from PPM 187 req.type = KMEM_PAGE; 188 req.size = 0; 189 req.flags = AF_NONE; 190 page = kmem_alloc( &req ); 191 192 if( page == NULL ) 192 printk("\n[%s] missing page => load it from IOC device\n", __FUNCTION__ ); 193 #endif 194 if( mapper_cxy == local_cxy ) // mapper is local 193 195 { 194 printk("\n[ERROR] in %s : thread %x cannot allocate a page in cluster %x\n", 195 __FUNCTION__ , this->trdid , local_cxy ); 196 rwlock_wr_release( &mapper->lock ); 197 return NULL; 196 error = mapper_handle_miss( mapper_ptr, 197 page_id, 198 &page_xp ); 199 } 200 else 201 { 202 rpc_mapper_handle_miss_client( mapper_cxy, 203 mapper_ptr, 204 page_id, 205 &page_xp, 206 &error ); 198 207 } 199 208 200 // initialize the page descriptor 201 page_init( page ); 202 page_set_flag( page , PG_INIT | PG_INLOAD ); 203 page_refcount_up( page ); 204 page->mapper = mapper; 205 page->index = index; 206 207 // insert page in mapper radix tree 208 error = grdxt_insert( &mapper->radix, index , page ); 209 210 // release mapper lock from WRITE_MODE 211 rwlock_wr_release( &mapper->lock ); 212 213 if( error ) 209 if ( error ) 214 210 { 215 printk("\n[ERROR] in %s : thread %x cannot insert page in mapper\n", 216 __FUNCTION__ , this->trdid ); 217 mapper_release_page( mapper , page ); 218 page_clear_flag( page , PG_ALL ); 219 req.ptr = page; 220 req.type = KMEM_PAGE; 221 kmem_free(&req); 222 return NULL; 223 } 224 225 // launch I/O operation to load page from file system 226 error = vfs_mapper_move_page( page, 227 true ); // to mapper 228 if( error ) 229 { 230 printk("\n[ERROR] in %s : thread %x cannot load page from device\n", 231 __FUNCTION__ , this->trdid ); 232 mapper_release_page( mapper , page ); 233 page_clear_flag( page , PG_ALL ); 234 req.ptr = page; 235 req.type = KMEM_PAGE; 236 kmem_free( &req ); 237 return NULL; 238 } 239 240 // reset the page INLOAD flag to make the page available to all readers 241 page_clear_flag( page , PG_INLOAD ); 242 } 243 else if( page_is_flag( page , PG_INLOAD ) ) // page is loaded by another thread 244 { 245 // release mapper lock from WRITE_MODE 246 rwlock_wr_release( &mapper->lock ); 247 248 // wait load completion 249 while( page_is_flag( page , PG_INLOAD ) == false ) 250 { 251 // deschedule without blocking 252 sched_yield("waiting page loading"); 211 printk("\n[ERROR] in %s : thread[%x,%x] cannot handle mapper miss\n", 212 __FUNCTION__ , this->process->pid, this->trdid ); 213 remote_rwlock_wr_release( lock_xp ); 214 return XPTR_NULL; 253 215 } 254 216 } 255 } 256 else // page available in mapper 257 { 258 rwlock_rd_release( &mapper->lock ); 217 218 // release mapper lock from WRITE_MODE 219 remote_rwlock_wr_release( lock_xp ); 220 } 221 else // hit 222 { 223 // release mapper lock from READ_MODE 224 remote_rwlock_rd_release( lock_xp ); 259 225 } 260 226 … … 262 228 cycle = (uint32_t)hal_get_cycles(); 263 229 if( DEBUG_MAPPER_GET_PAGE < cycle ) 264 printk("\n[DBG] %s : thread %x exit for page %d / ppn %x / cycle %d\n", 265 __FUNCTION__, CURRENT_THREAD, index, ppm_page2ppn(XPTR(local_cxy, page)), cycle ); 266 #endif 267 268 return page; 269 270 } // end mapper_get_page() 271 272 /////////////////////////////////////////////// 273 error_t mapper_release_page( mapper_t * mapper, 274 page_t * page ) 275 { 276 error_t error; 277 278 // lauch IO operation to update page to file system 279 error = vfs_mapper_move_page( page , false ); // from mapper 230 printk("\n[%s] thread[%x,%x] exit for page %d of <%s> / ppn %x / cycle %d\n", 231 __FUNCTION__, this->process->pid, this->trdid, 232 page_id, name, ppm_page2ppn( page_xp ), cycle ); 233 #endif 234 235 return page_xp; 236 237 } // end mapper_remote_get_page() 238 239 ////////////////////////////////////////////// 240 error_t mapper_handle_miss( mapper_t * mapper, 241 uint32_t page_id, 242 xptr_t * page_xp ) 243 { 244 kmem_req_t req; 245 page_t * page; 246 error_t error; 247 248 thread_t * this = CURRENT_THREAD; 249 250 #if DEBUG_MAPPER_HANDLE_MISS 251 uint32_t cycle = (uint32_t)hal_get_cycles(); 252 char name[CONFIG_VFS_MAX_NAME_LENGTH]; 253 vfs_inode_t * inode = mapper->inode; 254 vfs_inode_get_name( XPTR( local_cxy , inode ) , name ); 255 if( DEBUG_MAPPER_HANDLE_MISS < cycle ) 256 printk("\n[%s] enter for page %d in <%s> / cycle %d\n", 257 __FUNCTION__, page_id, name, cycle ); 258 if( DEBUG_MAPPER_HANDLE_MISS & 1 ) 259 grdxt_display( &mapper->rt , name ); 260 #endif 261 262 // allocate one page from the mapper cluster 263 req.type = KMEM_PAGE; 264 req.size = 0; 265 req.flags = AF_NONE; 266 page = kmem_alloc( &req ); 267 268 if( page == NULL ) 269 { 270 printk("\n[ERROR] in %s : thread [%x,%x] cannot allocate page in cluster %x\n", 271 __FUNCTION__ , this->process->pid, this->trdid , local_cxy ); 272 return -1; 273 } 274 275 // initialize the page descriptor 276 page_init( page ); 277 page_set_flag( page , PG_INIT ); 278 page_refcount_up( page ); 279 page->mapper = mapper; 280 page->index = page_id; 281 282 // insert page in mapper radix tree 283 error = grdxt_insert( &mapper->rt , page_id , page ); 280 284 281 285 if( error ) 282 286 { 283 printk("\n[ERROR] in %s : cannot update file system\n", __FUNCTION__ ); 284 return EIO; 285 } 287 printk("\n[ERROR] in %s : thread[%x,%x] cannot insert page in mapper\n", 288 __FUNCTION__ , this->process->pid, this->trdid ); 289 mapper_release_page( mapper , page ); 290 req.ptr = page; 291 req.type = KMEM_PAGE; 292 kmem_free(&req); 293 return -1; 294 } 295 296 // launch I/O operation to load page from device to mapper 297 error = vfs_fs_move_page( XPTR( local_cxy , page ) , true ); 298 299 if( error ) 300 { 301 printk("\n[ERROR] in %s : thread[%x,%x] cannot load page from device\n", 302 __FUNCTION__ , this->process->pid, this->trdid ); 303 mapper_release_page( mapper , page ); 304 req.ptr = page; 305 req.type = KMEM_PAGE; 306 kmem_free( &req ); 307 return -1; 308 } 309 310 // set extended pointer on allocated page 311 *page_xp = XPTR( local_cxy , page ); 312 313 #if DEBUG_MAPPER_HANDLE_MISS 314 cycle = (uint32_t)hal_get_cycles(); 315 if( DEBUG_MAPPER_HANDLE_MISS < cycle ) 316 printk("\n[%s] exit for page %d in <%s> / ppn %x / cycle %d\n", 317 __FUNCTION__, page_id, name, ppm_page2ppn( *page_xp ), cycle ); 318 if( DEBUG_MAPPER_HANDLE_MISS & 1 ) 319 grdxt_display( &mapper->rt , name ); 320 #endif 321 322 return 0; 323 324 } // end mapper_handle_miss() 325 326 //////////////////////////////////////////// 327 void mapper_release_page( mapper_t * mapper, 328 page_t * page ) 329 { 330 // build extended pointer on mapper lock 331 xptr_t mapper_lock_xp = XPTR( local_cxy , &mapper->lock ); 286 332 287 333 // take mapper lock in WRITE_MODE 288 r wlock_wr_acquire( &mapper->lock);334 remote_rwlock_wr_acquire( mapper_lock_xp ); 289 335 290 336 // remove physical page from radix tree 291 grdxt_remove( &mapper->r adix, page->index );337 grdxt_remove( &mapper->rt , page->index ); 292 338 293 339 // release mapper lock from WRITE_MODE 294 r wlock_wr_release( &mapper->lock);340 remote_rwlock_wr_release( mapper_lock_xp ); 295 341 296 342 // release page to PPM … … 300 346 kmem_free( &req ); 301 347 302 return 0;303 304 348 } // end mapper_release_page() 305 349 306 //////////////////////////////////////////// ///////350 //////////////////////////////////////////// 307 351 error_t mapper_move_user( mapper_t * mapper, 308 352 bool_t to_buffer, … … 311 355 uint32_t size ) 312 356 { 357 xptr_t mapper_xp; // extended pointer on local mapper 313 358 uint32_t page_offset; // first byte to move to/from a mapper page 314 359 uint32_t page_count; // number of bytes to move to/from a mapper page 315 uint32_t index;// current mapper page index360 uint32_t page_id; // current mapper page index 316 361 uint32_t done; // number of moved bytes 317 page_t * page; // current mapper page descriptor 318 uint8_t * map_ptr; // current mapper address 319 uint8_t * buf_ptr; // current buffer address 362 xptr_t page_xp; // extended pointer on current mapper page descriptor 320 363 321 364 #if DEBUG_MAPPER_MOVE_USER 322 uint32_t cycle = (uint32_t)hal_get_cycles(); 365 uint32_t cycle = (uint32_t)hal_get_cycles(); 366 thread_t * this = CURRENT_THREAD; 323 367 if( DEBUG_MAPPER_MOVE_USER < cycle ) 324 printk("\n[DBG] %s : thread %x enter / to_buf %d / buffer %x / cycle %d\n", 325 __FUNCTION__ , CURRENT_THREAD , to_buffer , buffer , cycle ); 326 #endif 368 printk("\n[%s] thread[%x,%x] : to_buf %d / buffer %x / size %d / offset %d / cycle %d\n", 369 __FUNCTION__, this->process->pid, this->trdid, 370 to_buffer, buffer, size, file_offset, cycle ); 371 #endif 372 373 // build extended pointer on mapper 374 mapper_xp = XPTR( local_cxy , mapper ); 327 375 328 376 // compute offsets of first and last bytes in file 329 377 uint32_t min_byte = file_offset; 330 uint32_t max_byte = file_offset + size - 1;378 uint32_t max_byte = file_offset + size - 1; 331 379 332 380 // compute indexes of pages for first and last byte in mapper … … 334 382 uint32_t last = max_byte >> CONFIG_PPM_PAGE_SHIFT; 335 383 336 done = 0;337 338 // loop on pages in mapper339 for( index = first ; index <= last ; index++ )340 {341 // compute page_offset342 if( index == first ) page_offset = min_byte & CONFIG_PPM_PAGE_MASK;343 else page_offset = 0;344 345 // compute number of bytes in page346 if ( first == last ) page_count = size;347 else if ( index == first ) page_count = CONFIG_PPM_PAGE_SIZE - page_offset;348 else if ( index == last ) page_count = (max_byte & CONFIG_PPM_PAGE_MASK) + 1;349 else page_count = CONFIG_PPM_PAGE_SIZE;350 351 384 #if (DEBUG_MAPPER_MOVE_USER & 1) 352 385 if( DEBUG_MAPPER_MOVE_USER < cycle ) 353 printk("\n[DBG] %s : index = %d / offset = %d / count = %d\n", 354 __FUNCTION__ , index , page_offset , page_count ); 355 #endif 356 357 // get page descriptor 358 page = mapper_get_page( mapper , index ); 359 360 if ( page == NULL ) return EINVAL; 386 printk("\n[%s] first_page %d / last_page %d\n", __FUNCTION__, first, last ); 387 #endif 388 389 done = 0; 390 391 // loop on pages in mapper 392 for( page_id = first ; page_id <= last ; page_id++ ) 393 { 394 // compute page_offset 395 if( page_id == first ) page_offset = min_byte & CONFIG_PPM_PAGE_MASK; 396 else page_offset = 0; 397 398 // compute number of bytes in page 399 if ( first == last ) page_count = size; 400 else if ( page_id == first ) page_count = CONFIG_PPM_PAGE_SIZE - page_offset; 401 else if ( page_id == last ) page_count = (max_byte & CONFIG_PPM_PAGE_MASK) + 1; 402 else page_count = CONFIG_PPM_PAGE_SIZE; 403 404 #if (DEBUG_MAPPER_MOVE_USER & 1) 405 if( DEBUG_MAPPER_MOVE_USER < cycle ) 406 printk("\n[%s] page_id = %d / page_offset = %d / page_count = %d\n", 407 __FUNCTION__ , page_id , page_offset , page_count ); 408 #endif 409 410 // get extended pointer on page descriptor 411 page_xp = mapper_remote_get_page( mapper_xp , page_id ); 412 413 if ( page_xp == XPTR_NULL ) return -1; 361 414 362 415 // compute pointer in mapper 363 xptr_t base_xp = ppm_page2base( XPTR( local_cxy, page ));364 map_ptr = (uint8_t *)GET_PTR( base_xp ) + page_offset;416 xptr_t base_xp = ppm_page2base( page_xp ); 417 uint8_t * map_ptr = (uint8_t *)GET_PTR( base_xp ) + page_offset; 365 418 366 419 // compute pointer in buffer 367 buf_ptr = (uint8_t *)buffer + done;420 uint8_t * buf_ptr = (uint8_t *)buffer + done; 368 421 369 422 // move fragment 370 423 if( to_buffer ) 371 424 { 372 hal_copy_to_uspace( buf_ptr , map_ptr , page_count ); 425 hal_copy_to_uspace( buf_ptr , map_ptr , page_count ); 373 426 } 374 427 else 375 428 { 376 ppm_page_do_dirty( page );377 hal_copy_from_uspace( map_ptr , buf_ptr , page_count ); 429 ppm_page_do_dirty( page_xp ); 430 hal_copy_from_uspace( map_ptr , buf_ptr , page_count ); 378 431 } 379 432 … … 384 437 cycle = (uint32_t)hal_get_cycles(); 385 438 if( DEBUG_MAPPER_MOVE_USER < cycle ) 386 printk("\n[ DBG] %s : thread %x exit / to_buf %d / buffer %x/ cycle %d\n",387 __FUNCTION__ , CURRENT_THREAD , to_buffer , buffer, cycle );439 printk("\n[%s] thread[%x,%x] exit / cycle %d\n", 440 __FUNCTION__, this->process->pid, this->trdid, cycle ); 388 441 #endif 389 442 … … 393 446 394 447 //////////////////////////////////////////////// 395 error_t mapper_move_kernel( mapper_t * mapper,396 bool_t 397 uint32_t 398 xptr_t 399 uint32_t 448 error_t mapper_move_kernel( xptr_t mapper_xp, 449 bool_t to_buffer, 450 uint32_t file_offset, 451 xptr_t buffer_xp, 452 uint32_t size ) 400 453 { 401 454 uint32_t page_offset; // first byte to move to/from a mapper page 402 455 uint32_t page_count; // number of bytes to move to/from a mapper page 403 uint32_t index;// current mapper page index456 uint32_t page_id; // current mapper page index 404 457 uint32_t done; // number of moved bytes 405 page_t * page; //current mapper page descriptor458 xptr_t page_xp; // extended pointer on current mapper page descriptor 406 459 407 460 uint8_t * src_ptr; // source buffer local pointer … … 412 465 // get buffer cluster and local pointer 413 466 cxy_t buffer_cxy = GET_CXY( buffer_xp ); 414 uint8_t * buffer_ptr = (uint8_t *)GET_PTR( buffer_xp ); 467 uint8_t * buffer_ptr = GET_PTR( buffer_xp ); 468 469 // get mapper cluster 470 cxy_t mapper_cxy = GET_CXY( mapper_xp ); 415 471 416 472 #if DEBUG_MAPPER_MOVE_KERNEL 417 uint32_t cycle = (uint32_t)hal_get_cycles(); 473 uint32_t cycle = (uint32_t)hal_get_cycles(); 474 thread_t * this = CURRENT_THREAD; 418 475 if( DEBUG_MAPPER_MOVE_KERNEL < cycle ) 419 printk("\n[ DBG] %s : thread %xenter / to_buf %d / buf_cxy %x / buf_ptr %x / cycle %d\n",420 __FUNCTION__ , CURRENT_THREAD , to_buffer , buffer_cxy , buffer_ptr, cycle );476 printk("\n[%s] thread[%x,%x] enter / to_buf %d / buf_cxy %x / buf_ptr %x / cycle %d\n", 477 __FUNCTION__, this->process->pid, this->trdid, to_buffer, buffer_cxy, buffer_ptr, cycle ); 421 478 #endif 422 479 … … 431 488 #if (DEBUG_MAPPER_MOVE_KERNEL & 1) 432 489 if( DEBUG_MAPPER_MOVE_KERNEL < cycle ) 433 printk("\n[ DBG] %s :first_page %d / last_page %d\n", __FUNCTION__, first, last );490 printk("\n[%s] first_page %d / last_page %d\n", __FUNCTION__, first, last ); 434 491 #endif 435 492 … … 438 495 { 439 496 dst_cxy = buffer_cxy; 440 src_cxy = local_cxy;497 src_cxy = mapper_cxy; 441 498 } 442 499 else 443 500 { 444 501 src_cxy = buffer_cxy; 445 dst_cxy = local_cxy;502 dst_cxy = mapper_cxy; 446 503 } 447 504 … … 449 506 450 507 // loop on pages in mapper 451 for( index = first ; index <= last ; index++ )508 for( page_id = first ; page_id <= last ; page_id++ ) 452 509 { 453 510 // compute page_offset 454 if( index== first ) page_offset = min_byte & CONFIG_PPM_PAGE_MASK;455 else page_offset = 0;511 if( page_id == first ) page_offset = min_byte & CONFIG_PPM_PAGE_MASK; 512 else page_offset = 0; 456 513 457 514 // compute number of bytes to move in page 458 if ( first == last ) page_count = size;459 else if ( index== first ) page_count = CONFIG_PPM_PAGE_SIZE - page_offset;460 else if ( index== last ) page_count = (max_byte & CONFIG_PPM_PAGE_MASK) + 1;461 else page_count = CONFIG_PPM_PAGE_SIZE;515 if ( first == last ) page_count = size; 516 else if ( page_id == first ) page_count = CONFIG_PPM_PAGE_SIZE - page_offset; 517 else if ( page_id == last ) page_count = (max_byte & CONFIG_PPM_PAGE_MASK) + 1; 518 else page_count = CONFIG_PPM_PAGE_SIZE; 462 519 463 520 #if (DEBUG_MAPPER_MOVE_KERNEL & 1) 464 521 if( DEBUG_MAPPER_MOVE_KERNEL < cycle ) 465 printk("\n[ DBG] %s : page_index= %d / offset = %d / bytes = %d\n",466 __FUNCTION__ , index, page_offset , page_count );467 #endif 468 469 // get page descriptor470 page = mapper_get_page( mapper , index);471 472 if ( page == NULL ) return EINVAL;522 printk("\n[%s] page_id = %d / offset = %d / bytes = %d\n", 523 __FUNCTION__ , page_id , page_offset , page_count ); 524 #endif 525 526 // get extended pointer on page descriptor 527 page_xp = mapper_remote_get_page( mapper_xp , page_id ); 528 529 if ( page_xp == XPTR_NULL ) return -1; 473 530 474 531 // get page base address 475 xptr_t base_xp = ppm_page2base( XPTR( local_cxy , page ));532 xptr_t base_xp = ppm_page2base( page_xp ); 476 533 uint8_t * base_ptr = (uint8_t *)GET_PTR( base_xp ); 477 534 … … 487 544 dst_ptr = base_ptr + page_offset; 488 545 489 ppm_page_do_dirty( page );546 ppm_page_do_dirty( page_xp ); 490 547 } 491 548 … … 499 556 cycle = (uint32_t)hal_get_cycles(); 500 557 if( DEBUG_MAPPER_MOVE_KERNEL < cycle ) 501 printk("\n[ DBG] %s : thread %xexit / to_buf %d / buf_cxy %x / buf_ptr %x / cycle %d\n",502 __FUNCTION__ , CURRENT_THREAD , to_buffer , buffer_cxy , buffer_ptr, cycle );558 printk("\n[%s] thread[%x,%x] exit / to_buf %d / buf_cxy %x / buf_ptr %x / cycle %d\n", 559 __FUNCTION__, this->process->pid, this->trdid, to_buffer, buffer_cxy, buffer_ptr, cycle ); 503 560 #endif 504 561 … … 507 564 } // end mapper_move_kernel() 508 565 566 /////////////////////////////////////////////////// 567 error_t mapper_remote_get_32( xptr_t mapper_xp, 568 uint32_t word_id, 569 uint32_t * p_value ) 570 { 571 uint32_t page_id; // page index in file 572 uint32_t local_id; // word index in page 573 xptr_t page_xp; // extended pointer on searched page descriptor 574 xptr_t base_xp; // extended pointer on searched page base 575 576 577 // get page index and local word index 578 page_id = word_id >> 10; 579 local_id = word_id & 0x3FF; 580 581 // get page containing the searched word 582 page_xp = mapper_remote_get_page( mapper_xp , page_id ); 583 584 if( page_xp == XPTR_NULL ) return -1; 585 586 // get page base 587 base_xp = ppm_page2base( page_xp ); 588 589 // get the value from mapper 590 *p_value = hal_remote_l32( base_xp + (local_id<<2) ); 591 592 return 0; 593 594 } // end mapper_remote_get_32() 595 596 /////////////////////////////////////////////////// 597 error_t mapper_remote_set_32( xptr_t mapper_xp, 598 uint32_t word_id, 599 uint32_t value ) 600 { 601 602 uint32_t page_id; // page index in file 603 uint32_t local_id; // word index in page 604 xptr_t page_xp; // extended pointer on searched page descriptor 605 xptr_t base_xp; // extended pointer on searched page base 606 607 // get page index and local vord index 608 page_id = word_id >> 10; 609 local_id = word_id & 0x3FF; 610 611 // get page containing the searched word 612 page_xp = mapper_remote_get_page( mapper_xp , page_id ); 613 614 if( page_xp == XPTR_NULL ) return -1; 615 616 // get page base 617 base_xp = ppm_page2base( page_xp ); 618 619 // set value to mapper 620 hal_remote_s32( (base_xp + (local_id << 2)) , value ); 621 622 // set the dirty flag 623 ppm_page_do_dirty( page_xp ); 624 625 return 0; 626 627 } // end mapper_remote_set_32() 628 629 -
trunk/kernel/mm/mapper.h
r513 r606 1 1 /* 2 * mapper.h - Map memory, file or device in process virtual address space.2 * mapper.h - Kernel cache for FS files or directories definition. 3 3 * 4 4 * Authors Mohamed Lamine Karaoui (2015) … … 72 72 struct vfs_inode_s * inode; /*! owner inode */ 73 73 uint32_t type; /*! file system type */ 74 grdxt_t r adix; /*! pages cache implemented as a radix tree*/75 r wlock_tlock; /*! several readers / only one writer */74 grdxt_t rt; /*! embedded pages cache descriptor (radix tree) */ 75 remote_rwlock_t lock; /*! several readers / only one writer */ 76 76 uint32_t refcount; /*! several vsegs can refer the same file */ 77 77 xlist_entry_t vsegs_root; /*! root of list of vsegs refering this mapper */ … … 109 109 110 110 /******************************************************************************************* 111 * This function releases all physical pages allocated for themapper.112 * It synchronizes all dirty pages (i.e. update the file on disk) if required.113 * The mapper descriptor and the radix tree themselves are released.111 * This function releases all physical memory allocated for a mapper. 112 * Both the mapper descriptor and the radix tree are released. 113 * It does NOT synchronize dirty pages. Use the vfs_sync_inode() function if required. 114 114 * It must be executed by a thread running in the cluster containing the mapper. 115 115 ******************************************************************************************* 116 116 * @ mapper : target mapper. 117 * @ return 0 if success / return EIO if a dirty page cannot be updated on device. 118 ******************************************************************************************/ 119 error_t mapper_destroy( mapper_t * mapper ); 120 121 /******************************************************************************************* 122 * This function move data between a mapper and a - possibly distributed - user buffer. 123 * It must be called by a thread running in the cluster containing the mapper. 124 * It is called by the vfs_user_move() function to implement sys_read() and sys_write(). 117 ******************************************************************************************/ 118 void mapper_destroy( mapper_t * mapper ); 119 120 /******************************************************************************************* 121 * This function load from device a missing page identified by the <page_id> argument 122 * into the mapper identified by the <mapper> local pointer. 123 * It allocates a physical page from the local cluster, initialise by accessing device, 124 * and register the page in the mapper radix tree. 125 * It must be executed by a thread running in the cluster containing the mapper. 126 * WARNING : the calling function mapper_remote_get_page() is supposed to take and release 127 * the lock protecting the mapper in WRITE_MODE. 128 ******************************************************************************************* 129 * @ mapper : [in] target mapper. 130 * @ page_id : [in] missing page index in file. 131 * @ page_xp : [out] buffer for extended pointer on missing page descriptor. 132 * @ return 0 if success / return -1 if a dirty page cannot be updated on device. 133 ******************************************************************************************/ 134 error_t mapper_handle_miss( mapper_t * mapper, 135 uint32_t page_id, 136 xptr_t * page_xp ); 137 138 /******************************************************************************************* 139 * This function move data between a local mapper, and a distributed user buffer. 140 * It must be called by a thread running in cluster containing the mapper. 141 * It is called by the vfs_user_move() to implement sys_read() and sys_write() syscalls. 125 142 * If required, the data transfer is split in "fragments", where one fragment contains 126 143 * contiguous bytes in the same mapper page. 127 144 * It uses "hal_uspace" accesses to move a fragment to/from the user buffer. 128 145 * In case of write, the dirty bit is set for all pages written in the mapper. 129 * The offset in the file descriptor is not modified by this function. 146 * The mapper being an extendable cache, it is automatically extended when required 147 * for both read and write accesses. 148 * The "offset" field in the file descriptor, and the "size" field in inode descriptor 149 * are not modified by this function. 130 150 ******************************************************************************************* 131 151 * @ mapper : local pointer on mapper. … … 134 154 * @ u_buf : user space pointer on user buffer. 135 155 * @ size : number of bytes to move. 136 * returns O if success / returns EINVALif error.156 * returns O if success / returns -1 if error. 137 157 ******************************************************************************************/ 138 158 error_t mapper_move_user( mapper_t * mapper, … … 142 162 uint32_t size ); 143 163 144 /******************************************************************************************* 145 * This function move data between a mapper and a remote kernel buffer.146 * It must be called by a thread running in the cluster containing the mapper.164 /******************************************************************************************** 165 * This function move data between a remote mapper and a remote kernel buffer. 166 * It can be called by a thread running any cluster. 147 167 * If required, the data transfer is split in "fragments", where one fragment contains 148 168 * contiguous bytes in the same mapper page. 149 169 * It uses a "remote_memcpy" to move a fragment to/from the kernel buffer. 150 170 * In case of write, the dirty bit is set for all pages written in the mapper. 151 * The offset in the file descriptor is not modified by this function. 152 ******************************************************************************************* 153 * @ mapper : local pointer on mapper. 171 ******************************************************************************************* 172 * @ mapper_xp : extended pointer on mapper. 154 173 * @ to_buffer : mapper -> buffer if true / buffer -> mapper if false. 155 174 * @ file_offset : first byte to move in file. 156 175 * @ buffer_xp : extended pointer on kernel buffer. 157 176 * @ size : number of bytes to move. 158 * returns O if success / returns EINVALif error.159 ******************************************************************************************/ 160 error_t mapper_move_kernel( mapper_t * mapper,177 * returns O if success / returns -1 if error. 178 ******************************************************************************************/ 179 error_t mapper_move_kernel( xptr_t mapper_xp, 161 180 bool_t to_buffer, 162 181 uint32_t file_offset, … … 164 183 uint32_t size ); 165 184 166 167 /******************************************************************************************* 168 * This function removes a physical page from the mapper, update the FS if the page 169 * is dirty, and releases the page to PPM. It is called by the mapper_destroy() function. 170 * It must be executed by a thread running in the cluster containing the mapper. 171 * It takes both the page lock and the mapper lock in WRITE_MODE to release the page. 185 /******************************************************************************************* 186 * This function removes a physical page from the mapper, and releases 187 * the page to the local PPM. It is called by the mapper_destroy() function. 188 * It must be executed by a thread running in the cluster containing the mapper. 189 * It takes the mapper lock in WRITE_MODE to update the mapper. 172 190 ******************************************************************************************* 173 191 * @ mapper : local pointer on the mapper. 174 192 * @ page : pointer on page to remove. 175 * @ return 0 if success / return EIO if a dirty page cannot be copied to FS. 176 ******************************************************************************************/ 177 error_t mapper_release_page( mapper_t * mapper, 178 struct page_s * page ); 179 180 /******************************************************************************************* 181 * This function searches a physical page descriptor from its index in mapper. 182 * It must be executed by a thread running in the cluster containing the mapper. 193 ******************************************************************************************/ 194 void mapper_release_page( mapper_t * mapper, 195 struct page_s * page ); 196 197 /******************************************************************************************* 198 * This function returns an extended pointer on a mapper page, identified by <page_id>, 199 * index in the file. The - possibly remote - mapper is identified by the <mapper_xp> 200 * argument. It can be executed by a thread running in any cluster, as it uses remote 201 * access primitives to scan the mapper. 202 * In case of miss, this function takes the mapper lock in WRITE_MODE, and call the 203 * mapper_handle_miss() to load the missing page from device to mapper, using an RPC 204 * when the mapper is remote. 205 ******************************************************************************************* 206 * @ mapper_xp : extended pointer on the mapper. 207 * @ page_id : page index in file 208 * @ returns extended pointer on page base if success / return XPTR_NULL if error. 209 ******************************************************************************************/ 210 xptr_t mapper_remote_get_page( xptr_t mapper_xp, 211 uint32_t page_id ); 212 213 /******************************************************************************************* 214 * This function allows to read a single word in a mapper seen as and array of uint32_t. 215 * It has bee designed to support remote access tho the FAT mapper of the FATFS. 216 * It can be called by any thread running in any cluster. 183 217 * In case of miss, it takes the mapper lock in WRITE_MODE, load the missing 184 * page from device to the mapper, and release the mapper lock. 185 ******************************************************************************************* 186 * @ mapper : local pointer on the mapper. 187 * @ index : page index in file 188 * @ returns pointer on page descriptor if success / return NULL if error. 189 ******************************************************************************************/ 190 struct page_s * mapper_get_page( mapper_t * mapper, 191 uint32_t index ); 192 193 218 * page from device to mapper, and release the mapper lock. 219 ******************************************************************************************* 220 * @ mapper_xp : [in] extended pointer on the mapper. 221 * @ index : [in] 32 bits word index in file. 222 * @ p_value : [out] local pointer on destination buffer. 223 * @ returns 0 if success / return -1 if error. 224 ******************************************************************************************/ 225 error_t mapper_remote_get_32( xptr_t mapper_xp, 226 uint32_t word_id, 227 uint32_t * p_value ); 228 229 /******************************************************************************************* 230 * This function allows to write a single word to a mapper seen as and array of uint32_t. 231 * It has bee designed to support remote access tho the FAT mapper of the FATFS. 232 * It can be called by any thread running in any cluster. 233 * In case of miss, it takes the mapper lock in WRITE_MODE, load the missing 234 * page from device to mapper, and release the mapper lock. 235 ******************************************************************************************* 236 * @ mapper_xp : [in] extended pointer on the mapper. 237 * @ index : [in] 32 bits word index in file. 238 * @ p_value : [in] value to be written. 239 * @ returns 0 if success / return -1 if error. 240 ******************************************************************************************/ 241 error_t mapper_remote_set_32( xptr_t mapper_xp, 242 uint32_t word_id, 243 uint32_t value ); 194 244 195 245 #endif /* _MAPPER_H_ */ -
trunk/kernel/mm/page.h
r567 r606 37 37 /************************************************************************************* 38 38 * This defines the flags that can be attached to a physical page. 39 * TODO : the PG_BUFFER and PG_IO_ERR flags semantic is not defined 39 * TODO : the PG_BUFFER and PG_IO_ERR flags semantic is not defined [AG] 40 40 ************************************************************************************/ 41 41 … … 43 43 #define PG_RESERVED 0x0002 // cannot be allocated by PPM 44 44 #define PG_FREE 0x0004 // page can be allocated by PPM 45 #define PG_INLOAD 0x0008 // on-going load from disk46 45 #define PG_IO_ERR 0x0010 // mapper signals access error TODO ??? [AG] 47 46 #define PG_BUFFER 0x0020 // used in blockio.c TODO ??? [AG] … … 49 48 #define PG_COW 0x0080 // page is copy-on-write 50 49 51 #define PG_ALL 0xFFFF // All flags52 53 50 /************************************************************************************* 54 51 * This structure defines a physical page descriptor. 55 * The busylock is used to test/modify the forks counter. 56 * NOTE: Size is 44 bytes for a 32 bits core... 57 * TODO : the refcount use has to be clarified [AG] 52 * - The remote_busylock is used to allows any remote thread to atomically 53 * test/modify the forks counter or the page flags. 54 * - The list entry is used to register the page in a free list or in dirty list. 55 * NOTE: Size is 48 bytes for a 32 bits core. 56 * TODO : the refcount use is not defined [AG] 58 57 ************************************************************************************/ 59 58 … … 67 66 uint32_t refcount; /*! reference counter TODO ??? [AG] (4) */ 68 67 uint32_t forks; /*! number of pending forks (4) */ 69 remote_busylock_t lock; /*! protect all accesses to page (12) */68 remote_busylock_t lock; /*! protect forks or flags modifs (16) */ 70 69 } 71 70 page_t; -
trunk/kernel/mm/ppm.c
r585 r606 39 39 #include <mapper.h> 40 40 #include <ppm.h> 41 #include <vfs.h> 41 42 42 43 //////////////////////////////////////////////////////////////////////////////////////// … … 395 396 ////////////////////////////////////////////////////////////////////////////////////// 396 397 397 ///////////////////////////////////////// 398 bool_t ppm_page_do_dirty( page_t * page)398 ////////////////////////////////////////// 399 bool_t ppm_page_do_dirty( xptr_t page_xp ) 399 400 { 400 401 bool_t done = false; 401 402 403 // get page cluster and local pointer 404 page_t * page_ptr = GET_PTR( page_xp ); 405 cxy_t page_cxy = GET_CXY( page_xp ); 406 407 // get local pointer on PPM (same in all clusters) 402 408 ppm_t * ppm = &LOCAL_CLUSTER->ppm; 403 409 404 // lock the PPM dirty_list 405 queuelock_acquire( &ppm->dirty_lock ); 406 407 if( !page_is_flag( page , PG_DIRTY ) ) 410 // build extended pointers on page lock, page flags, and PPM dirty list lock 411 xptr_t page_lock_xp = XPTR( page_cxy , &page_ptr->lock ); 412 xptr_t page_flags_xp = XPTR( page_cxy , &page_ptr->flags ); 413 xptr_t dirty_lock_xp = XPTR( page_cxy , &ppm->dirty_lock ); 414 415 // lock the remote PPM dirty_list 416 remote_queuelock_acquire( dirty_lock_xp ); 417 418 // lock the remote page 419 remote_busylock_acquire( page_lock_xp ); 420 421 // get remote page flags 422 uint32_t flags = hal_remote_l32( page_flags_xp ); 423 424 if( (flags & PG_DIRTY) == 0 ) 408 425 { 409 426 // set dirty flag in page descriptor 410 page_set_flag( page , PG_DIRTY ); 411 412 // register page in PPM dirty list 413 list_add_first( &ppm->dirty_root , &page->list ); 427 hal_remote_s32( page_flags_xp , flags | PG_DIRTY ); 428 429 // The PPM dirty list is a LOCAL list !!! 430 // We must update 4 pointers to insert a new page in this list. 431 // We can use the standard LIST API when the page is local, 432 // but we cannot use the standard API if the page is remote... 433 434 if( page_cxy == local_cxy ) // locally update the PPM dirty list 435 { 436 list_add_first( &ppm->dirty_root , &page_ptr->list ); 437 } 438 else // remotely update the PPM dirty list 439 { 440 // get local and remote pointers on "root" list entry 441 list_entry_t * root = &ppm->dirty_root; 442 xptr_t root_xp = XPTR( page_cxy , root ); 443 444 // get local and remote pointers on "page" list entry 445 list_entry_t * list = &page_ptr->list; 446 xptr_t list_xp = XPTR( page_cxy , list ); 447 448 // get local and remote pointers on first dirty page 449 list_entry_t * dirt = hal_remote_lpt( XPTR( page_cxy, &root->next ) ); 450 xptr_t dirt_xp = XPTR( page_cxy , dirt ); 451 452 // set root.next, list.next, list pred, curr.pred in remote cluster 453 hal_remote_spt( root_xp , list ); 454 hal_remote_spt( list_xp , dirt ); 455 hal_remote_spt( list_xp + sizeof(intptr_t) , root ); 456 hal_remote_spt( dirt_xp + sizeof(intptr_t) , list ); 457 } 458 414 459 done = true; 415 460 } 416 461 417 // unlock the PPM dirty_list 418 queuelock_release( &ppm->dirty_lock ); 462 // unlock the remote page 463 remote_busylock_release( page_lock_xp ); 464 465 // unlock the remote PPM dirty_list 466 remote_queuelock_release( dirty_lock_xp ); 419 467 420 468 return done; 421 } 422 423 /////////////////////////////////////////// 424 bool_t ppm_page_undo_dirty( page_t * page ) 469 470 } // end ppm_page_do_dirty() 471 472 //////////////////////////////////////////// 473 bool_t ppm_page_undo_dirty( xptr_t page_xp ) 425 474 { 426 475 bool_t done = false; 427 476 477 // get page cluster and local pointer 478 page_t * page_ptr = GET_PTR( page_xp ); 479 cxy_t page_cxy = GET_CXY( page_xp ); 480 481 // get local pointer on PPM (same in all clusters) 428 482 ppm_t * ppm = &LOCAL_CLUSTER->ppm; 429 483 430 // lock the dirty_list 431 queuelock_acquire( &ppm->dirty_lock ); 432 433 if( page_is_flag( page , PG_DIRTY) ) 434 { 435 // clear dirty flag in page descriptor 436 page_clear_flag( page , PG_DIRTY ); 437 438 // remove page from PPM dirty list 439 list_unlink( &page->list ); 484 // build extended pointers on page lock, page flags, and PPM dirty list lock 485 xptr_t page_lock_xp = XPTR( page_cxy , &page_ptr->lock ); 486 xptr_t page_flags_xp = XPTR( page_cxy , &page_ptr->flags ); 487 xptr_t dirty_lock_xp = XPTR( page_cxy , &ppm->dirty_lock ); 488 489 // lock the remote PPM dirty_list 490 remote_queuelock_acquire( XPTR( page_cxy , &ppm->dirty_lock ) ); 491 492 // lock the remote page 493 remote_busylock_acquire( page_lock_xp ); 494 495 // get remote page flags 496 uint32_t flags = hal_remote_l32( page_flags_xp ); 497 498 if( (flags & PG_DIRTY) ) // page is dirty 499 { 500 // reset dirty flag in page descriptor 501 hal_remote_s32( page_flags_xp , flags & (~PG_DIRTY) ); 502 503 // The PPM dirty list is a LOCAL list !!! 504 // We must update 4 pointers to remove a page from this list. 505 // we can use the standard LIST API when the page is local, 506 // but we cannot use the standard API if the page is remote... 507 508 if( page_cxy == local_cxy ) // locally update the PPM dirty list 509 { 510 list_unlink( &page_ptr->list ); 511 } 512 else // remotely update the PPM dirty list 513 { 514 // get local and remote pointers on "page" list entry 515 list_entry_t * list = &page_ptr->list; 516 xptr_t list_xp = XPTR( page_cxy , list ); 517 518 // get local and remote pointers on "next" page list entry 519 list_entry_t * next = hal_remote_lpt( list_xp ); 520 xptr_t next_xp = XPTR( page_cxy , next ); 521 522 // get local and remote pointers on "pred" page list entry 523 list_entry_t * pred = hal_remote_lpt( list_xp + sizeof(intptr_t) ); 524 xptr_t pred_xp = XPTR( page_cxy , pred ); 525 526 // set root.next, list.next, list pred, curr.pred in remote cluster 527 hal_remote_spt( pred_xp , next ); 528 hal_remote_spt( list_xp , NULL ); 529 hal_remote_spt( list_xp + sizeof(intptr_t) , NULL ); 530 hal_remote_spt( next_xp + sizeof(intptr_t) , pred ); 531 } 532 440 533 done = true; 441 534 } 442 535 443 // unlock the dirty_list 444 queuelock_release( &ppm->dirty_lock ); 536 // unlock the remote page 537 remote_busylock_release( page_lock_xp ); 538 539 // unlock the remote PPM dirty_list 540 remote_queuelock_release( dirty_lock_xp ); 445 541 446 542 return done; 447 } 448 449 /////////////////////////////// 450 void ppm_sync_all_pages( void ) 451 { 452 page_t * page; 453 ppm_t * ppm = &LOCAL_CLUSTER->ppm; 543 544 } // end ppm_page_undo_dirty() 545 546 ///////////////////////////////// 547 void ppm_sync_dirty_pages( void ) 548 { 549 ppm_t * ppm = &LOCAL_CLUSTER->ppm; 550 551 // get local pointer on PPM dirty_root 552 list_entry_t * dirty_root = &ppm->dirty_root; 553 554 // build extended pointer on PPM dirty_lock 555 xptr_t dirty_lock_xp = XPTR( local_cxy , &ppm->dirty_lock ); 454 556 455 557 // get the PPM dirty_list lock 456 queuelock_acquire( &ppm->dirty_lock);558 remote_queuelock_acquire( dirty_lock_xp ); 457 559 458 560 while( !list_is_empty( &ppm->dirty_root ) ) 459 561 { 460 page = LIST_FIRST( &ppm->dirty_root , page_t , list ); 562 page_t * page = LIST_FIRST( dirty_root , page_t , list ); 563 xptr_t page_xp = XPTR( local_cxy , page ); 564 565 // build extended pointer on page lock 566 xptr_t page_lock_xp = XPTR( local_cxy , &page->lock ); 461 567 462 568 // get the page lock 463 remote_busylock_acquire( XPTR( local_cxy, &page->lock ));569 remote_busylock_acquire( page_lock_xp ); 464 570 465 571 // sync the page 466 vfs_ mapper_move_page( page , false ); // from mapper572 vfs_fs_move_page( page_xp , false ); // from mapper to device 467 573 468 574 // release the page lock 469 remote_busylock_release( XPTR( local_cxy , &page->lock ));575 remote_busylock_release( page_lock_xp ); 470 576 } 471 577 472 578 // release the PPM dirty_list lock 473 queuelock_release( &ppm->dirty_lock ); 474 } 475 579 remote_queuelock_release( dirty_lock_xp ); 580 581 } // end ppm_sync_dirty_pages() 582 -
trunk/kernel/mm/ppm.h
r567 r606 29 29 #include <list.h> 30 30 #include <busylock.h> 31 #include < queuelock.h>31 #include <remote_queuelock.h> 32 32 #include <boot_info.h> 33 33 #include <page.h> … … 39 39 * contains an integer number of pages, defined by the <pages_nr> field in the 40 40 * boot_info structure. It is split in three parts: 41 * 41 42 * - the "kernel_code" section contains the kernel code, loaded by the boot-loader. 42 43 * It starts at PPN = 0 and the size is defined by the <pages_offset> field in the … … 60 61 * Another service is to register the dirty pages in a specific dirty_list, that is 61 62 * also rooted in the PPM, in order to be able to save all dirty pages on disk. 62 * This dirty list is protected by a specific local queuelock. 63 * This dirty list is protected by a specific remote_queuelock, because it can be 64 * modified by a remote thread, but it is implemented as a local list, because it 65 * contains only local pages. 63 66 ****************************************************************************************/ 64 67 65 68 typedef struct ppm_s 66 69 { 67 busylock_t free_lock;/*! lock protecting free_pages[] lists */68 list_entry_t free_pages_root[CONFIG_PPM_MAX_ORDER]; /*! roots of free lists*/69 uint32_t free_pages_nr[CONFIG_PPM_MAX_ORDER]; /*! numbers of free pages*/70 page_t * pages_tbl;/*! pointer on page descriptors array */71 uint32_t pages_nr;/*! total number of small physical page */72 queuelock_t dirty_lock;/*! lock protecting dirty pages list */73 list_entry_t dirty_root;/*! root of dirty pages list */74 void * vaddr_base;/*! pointer on local physical memory base */70 busylock_t free_lock; /*! lock protecting free_pages[] lists */ 71 list_entry_t free_pages_root[CONFIG_PPM_MAX_ORDER]; /*! roots of free lists */ 72 uint32_t free_pages_nr[CONFIG_PPM_MAX_ORDER]; /*! free pages number */ 73 page_t * pages_tbl; /*! pointer on page descriptors array */ 74 uint32_t pages_nr; /*! total number of small physical page */ 75 remote_queuelock_t dirty_lock; /*! lock protecting dirty pages list */ 76 list_entry_t dirty_root; /*! root of dirty pages list */ 77 void * vaddr_base; /*! pointer on local physical memory base */ 75 78 } 76 79 ppm_t; … … 135 138 inline xptr_t ppm_base2page( xptr_t base_xp ); 136 139 137 138 139 140 /***************************************************************************************** 140 141 * Get extended pointer on page base from global PPN. … … 153 154 inline ppn_t ppm_base2ppn( xptr_t base_xp ); 154 155 155 156 157 156 /***************************************************************************************** 158 157 * Get global PPN from extended pointer on page descriptor. … … 172 171 173 172 173 /*********** debug functions **********************************************************/ 174 174 175 175 /***************************************************************************************** … … 190 190 191 191 /***************************************************************************************** 192 * This function registers a physical page as dirty. 192 * This function registers a page identified by the <page_xp> argument as dirty. 193 * It can be called by a thread running in any cluster. 193 194 * - it takes the queuelock protecting the PPM dirty_list. 194 195 * - it test the PG_DIRTY flag in the page descriptor. … … 197 198 * - it releases the queuelock protecting the PPM dirty_list. 198 199 ***************************************************************************************** 199 * @ page :pointer on page descriptor.200 * @ page_xp : extended pointer on page descriptor. 200 201 * @ returns true if page was not dirty / returns false if page was dirty 201 202 ****************************************************************************************/ 202 bool_t ppm_page_do_dirty( page_t * page ); 203 204 /***************************************************************************************** 205 * This function unregisters a physical page as dirty. 203 bool_t ppm_page_do_dirty( xptr_t page_xp ); 204 205 /***************************************************************************************** 206 * This function unregisters a page identified by the <page_xp> argument as dirty. 207 * It can be called by a thread running in any cluster. 206 208 * - it takes the queuelock protecting the PPM dirty_list. 207 209 * - it test the PG_DIRTY flag in the page descriptor. … … 210 212 * - it releases the queuelock protecting the PPM dirty_list. 211 213 ***************************************************************************************** 212 * @ page :pointer on page descriptor.214 * @ page_xp : extended pointer on page descriptor. 213 215 * @ returns true if page was dirty / returns false if page was not dirty 214 216 ****************************************************************************************/ 215 bool_t ppm_page_undo_dirty( page_t * page);216 217 /***************************************************************************************** 218 * This function synchronizes (i.e. update the disk) all dirty pages in a cluster.217 bool_t ppm_page_undo_dirty( xptr_t page_xp ); 218 219 /***************************************************************************************** 220 * This function synchronizes (i.e. update the IOC device) all dirty pages in a cluster. 219 221 * - it takes the queuelock protecting the PPM dirty_list. 220 222 * - it scans the PPM dirty list, and for each page: … … 226 228 $ The PPM dirty_list is empty when the sync operation completes. 227 229 ****************************************************************************************/ 228 void ppm_sync_ all_pages( void );230 void ppm_sync_dirty_pages( void ); 229 231 230 232 #endif /* _PPM_H_ */ -
trunk/kernel/mm/vmm.c
r595 r606 1507 1507 error_t error; 1508 1508 xptr_t page_xp; // extended pointer on physical page descriptor 1509 page_t * page_ptr; // local pointer on physical page descriptor 1510 uint32_t index; // missing page index in vseg mapper 1509 uint32_t page_id; // missing page index in vseg mapper 1511 1510 uint32_t type; // vseg type; 1512 1511 1513 1512 type = vseg->type; 1514 index= vpn - vseg->vpn_base;1513 page_id = vpn - vseg->vpn_base; 1515 1514 1516 1515 #if DEBUG_VMM_GET_ONE_PPN … … 1518 1517 thread_t * this = CURRENT_THREAD; 1519 1518 if( DEBUG_VMM_GET_ONE_PPN < cycle ) 1520 printk("\n[%s] thread[%x,%x] enter for vpn %x / type %s / index%d / cycle %d\n",1521 __FUNCTION__, this->process->pid, this->trdid, vpn, vseg_type_str(type), index, cycle );1519 printk("\n[%s] thread[%x,%x] enter for vpn %x / type %s / page_id %d / cycle %d\n", 1520 __FUNCTION__, this->process->pid, this->trdid, vpn, vseg_type_str(type), page_id, cycle ); 1522 1521 #endif 1523 1522 … … 1531 1530 "mapper not defined for a FILE vseg\n" ); 1532 1531 1533 // get mapper cluster and local pointer 1534 cxy_t mapper_cxy = GET_CXY( mapper_xp ); 1535 mapper_t * mapper_ptr = GET_PTR( mapper_xp ); 1536 1537 // get page descriptor from mapper 1538 if( mapper_cxy == local_cxy ) // mapper is local 1539 { 1540 page_ptr = mapper_get_page( mapper_ptr , index ); 1541 } 1542 else // mapper is remote 1543 { 1544 rpc_mapper_get_page_client( mapper_cxy , mapper_ptr , index , &page_ptr ); 1545 } 1546 1547 if ( page_ptr == NULL ) return EINVAL; 1548 1549 page_xp = XPTR( mapper_cxy , page_ptr ); 1532 // get extended pointer on page descriptor 1533 page_xp = mapper_remote_get_page( mapper_xp , page_id ); 1534 1535 if ( page_xp == XPTR_NULL ) return EINVAL; 1550 1536 } 1551 1537 … … 1569 1555 "mapper not defined for a CODE or DATA vseg\n" ); 1570 1556 1571 // get mapper cluster and local pointer1572 cxy_t mapper_cxy = GET_CXY( mapper_xp );1573 mapper_t * mapper_ptr = GET_PTR( mapper_xp );1574 1575 1557 // compute missing page offset in vseg 1576 uint32_t offset = index<< CONFIG_PPM_PAGE_SHIFT;1558 uint32_t offset = page_id << CONFIG_PPM_PAGE_SHIFT; 1577 1559 1578 1560 // compute missing page offset in .elf file … … 1615 1597 __FUNCTION__, this->process->pid, this->trdid, vpn ); 1616 1598 #endif 1617 if( mapper_cxy == local_cxy ) 1618 { 1619 error = mapper_move_kernel( mapper_ptr, 1620 true, // to_buffer 1621 elf_offset, 1622 base_xp, 1623 CONFIG_PPM_PAGE_SIZE ); 1624 } 1625 else 1626 { 1627 rpc_mapper_move_buffer_client( mapper_cxy, 1628 mapper_ptr, 1629 true, // to buffer 1630 false, // kernel buffer 1631 elf_offset, 1632 base_xp, 1633 CONFIG_PPM_PAGE_SIZE, 1634 &error ); 1635 } 1599 error = mapper_move_kernel( mapper_xp, 1600 true, // to_buffer 1601 elf_offset, 1602 base_xp, 1603 CONFIG_PPM_PAGE_SIZE ); 1636 1604 if( error ) return EINVAL; 1637 1605 } … … 1649 1617 #endif 1650 1618 // initialize mapper part 1651 if( mapper_cxy == local_cxy ) 1652 { 1653 error = mapper_move_kernel( mapper_ptr, 1654 true, // to buffer 1655 elf_offset, 1656 base_xp, 1657 file_size - offset ); 1658 } 1659 else 1660 { 1661 rpc_mapper_move_buffer_client( mapper_cxy, 1662 mapper_ptr, 1663 true, // to buffer 1664 false, // kernel buffer 1665 elf_offset, 1666 base_xp, 1667 file_size - offset, 1668 &error ); 1669 } 1619 error = mapper_move_kernel( mapper_xp, 1620 true, // to buffer 1621 elf_offset, 1622 base_xp, 1623 file_size - offset ); 1670 1624 if( error ) return EINVAL; 1671 1625
Note: See TracChangeset
for help on using the changeset viewer.