Changeset 657 for trunk/kernel/mm
- Timestamp:
- Mar 18, 2020, 11:16:59 PM (5 years ago)
- Location:
- trunk/kernel/mm
- Files:
-
- 10 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/mm/kcm.c
r656 r657 43 43 ////////////////////////////////////////////////////////////////////////////////////// 44 44 // This static function must be called by a local thread. 45 // It returns a pointer on a block allocated from a non-fullkcm_page.46 // It makes a panic if no block is available in selected page.45 // It returns a pointer on a block allocated from an active kcm_page. 46 // It makes a panic if no block is available in the selected page. 47 47 // It changes the page status as required. 48 48 ////////////////////////////////////////////////////////////////////////////////////// 49 49 // @ kcm : pointer on KCM allocator. 50 // @ kcm_page : pointer on a non-fullkcm_page.50 // @ kcm_page : pointer on an active kcm_page. 51 51 // @ return pointer on allocated block. 52 52 ///////////////////////////////////////////////////////////////////////////////////// … … 64 64 uint32_t index = 1; 65 65 uint64_t mask = (uint64_t)0x2; 66 uint32_t found = 0;67 66 68 67 // allocate first free block in kcm_page, update status, … … 70 69 while( index <= max ) 71 70 { 72 if( (status & mask) == 0 ) // block non allocated71 if( (status & mask) == 0 ) // block found 73 72 { 73 // update page count and status 74 74 kcm_page->status = status | mask; 75 75 kcm_page->count = count + 1; 76 found = 1;77 78 76 break; 79 77 } … … 83 81 } 84 82 85 // change the page list if almost full83 // change the page list if found block is the last 86 84 if( count == max-1 ) 87 85 { … … 162 160 163 161 ///////////////////////////////////////////////////////////////////////////////////// 164 // This privatestatic function must be called by a local thread.165 // It returns one non-full kcm_page with t e following policy :162 // This static function must be called by a local thread. 163 // It returns one non-full kcm_page with the following policy : 166 164 // - if the "active_list" is non empty, it returns the first "active" page, 167 165 // without modifying the KCM state. 168 // - if the "active_list" is empty, it allocates a new page from mPPM, inserts166 // - if the "active_list" is empty, it allocates a new page from PPM, inserts 169 167 // this page in the active_list, and returns it. 170 168 ///////////////////////////////////////////////////////////////////////////////////// … … 275 273 // release KCM lock 276 274 remote_busylock_release( lock_xp ); 277 } 275 276 } // end kcm_destroy() 278 277 279 278 ////////////////////////////////// … … 284 283 void * block_ptr; 285 284 286 285 // min block size is 64 bytes 287 286 if( order < 6 ) order = 6; 288 287 … … 301 300 kcm_page = kcm_get_page( kcm_ptr ); 302 301 302 #if DEBUG_KCM 303 thread_t * this = CURRENT_THREAD; 304 uint32_t cycle = (uint32_t)hal_get_cycles(); 305 if( (DEBUG_KCM < cycle) && (local_cxy == 1) ) 306 { 307 printk("\n[%s] thread[%x,%x] enters / order %d / page %x / kcm %x / page_status (%x|%x)\n", 308 __FUNCTION__, this->process->pid, this->trdid, order, kcm_page, kcm_ptr, 309 GET_CXY( kcm_page->status ), GET_PTR( kcm_page->status ) ); 310 kcm_remote_display( local_cxy , kcm_ptr ); 311 } 312 #endif 313 303 314 if( kcm_page == NULL ) 304 315 { … … 314 325 315 326 #if DEBUG_KCM 316 thread_t * this = CURRENT_THREAD; 317 uint32_t cycle = (uint32_t)hal_get_cycles(); 318 if( DEBUG_KCM < cycle ) 319 printk("\n[%s] thread[%x,%x] allocated block %x / order %d / kcm %x / status[%x,%x] / count %d\n", 320 __FUNCTION__, this->process->pid, this->trdid, block_ptr, order, kcm_ptr, 321 GET_CXY(kcm_page->status), GET_PTR(kcm_page->status), kcm_page->count ); 327 if( (DEBUG_KCM < cycle) && (local_cxy == 1) ) 328 { 329 printk("\n[%s] thread[%x,%x] exit / order %d / block %x / kcm %x / page_status (%x|%x)\n", 330 __FUNCTION__, this->process->pid, this->trdid, order, block_ptr, kcm_ptr, 331 GET_CXY( kcm_page->status ), GET_PTR( kcm_page->status ) ); 332 kcm_remote_display( local_cxy , kcm_ptr ); 333 } 322 334 #endif 323 335 … … 344 356 thread_t * this = CURRENT_THREAD; 345 357 uint32_t cycle = (uint32_t)hal_get_cycles(); 346 if( DEBUG_KCM < cycle ) 347 printk("\n[%s] thread[%x,%x] release block %x / order %d / kcm %x / status [%x,%x] / count %d\n", 348 __FUNCTION__, this->process->pid, this->trdid, block_ptr, kcm_ptr->order, kcm_ptr, 349 GET_CXY(kcm_page->status), GET_PTR(kcm_page->status), kcm_page->count ); 358 if( (DEBUG_KCM < cycle) && (local_cxy == 1) ) 359 { 360 printk("\n[%s] thread[%x,%x] enters / order %d / block %x / page %x / kcm %x / status [%x,%x]\n", 361 __FUNCTION__, this->process->pid, this->trdid, kcm_ptr->order, block_ptr, kcm_page, kcm_ptr, 362 GET_CXY(kcm_page->status), GET_PTR(kcm_page->status) ); 363 kcm_remote_display( local_cxy , kcm_ptr ); 364 } 350 365 #endif 351 366 … … 361 376 // release lock 362 377 remote_busylock_release( lock_xp ); 378 379 #if DEBUG_KCM 380 if( (DEBUG_KCM < cycle) && (local_cxy == 1) ) 381 { 382 printk("\n[%s] thread[%x,%x] exit / order %d / page %x / status [%x,%x]\n", 383 __FUNCTION__, this->process->pid, this->trdid, kcm_ptr->order, kcm_ptr, 384 GET_CXY(kcm_page->status), GET_PTR(kcm_page->status) ); 385 kcm_remote_display( local_cxy , kcm_ptr ); 363 386 } 387 #endif 388 389 } // end kcm_free() 364 390 365 391 ///////////////////////////////////////////////////////////////////////////////////// … … 369 395 ///////////////////////////////////////////////////////////////////////////////////// 370 396 // This static function can be called by any thread running in any cluster. 371 // It returns a local pointer on a block allocated from an non-fullkcm_page.372 // It makes a panic if no block available in selectedpage.397 // It returns a local pointer on a block allocated from an active kcm_page. 398 // It makes a panic if no block available in the selected kcm_page. 373 399 // It changes the page status as required. 374 400 ///////////////////////////////////////////////////////////////////////////////////// 375 // @ kcm_cxy : remote KCM cluster identi dfier.401 // @ kcm_cxy : remote KCM cluster identifier. 376 402 // @ kcm_ptr : local pointer on remote KCM allocator. 377 // @ kcm_page : pointer on active kcmpage to use.403 // @ kcm_page : local pointer on remote active kcm_page to use. 378 404 // @ return a local pointer on the allocated block. 379 405 ///////////////////////////////////////////////////////////////////////////////////// … … 392 418 uint32_t index = 1; 393 419 uint64_t mask = (uint64_t)0x2; 394 uint32_t found = 0;395 420 396 421 // allocate first free block in kcm_page, update status, … … 398 423 while( index <= max ) 399 424 { 400 if( (status & mask) == 0 ) // block non allocated425 if( (status & mask) == 0 ) // block found 401 426 { 402 427 hal_remote_s64( XPTR( kcm_cxy , &kcm_page->status ) , status | mask ); 403 hal_remote_s64( XPTR( kcm_cxy , &kcm_page->count ) , count + 1 ); 404 found = 1; 428 hal_remote_s32( XPTR( kcm_cxy , &kcm_page->count ) , count + 1 ); 405 429 break; 406 430 } … … 410 434 } 411 435 412 // change the page list if almost full436 // change the page list if found block is the last 413 437 if( count == max-1 ) 414 438 { … … 631 655 kcm_t * kcm_ptr ) 632 656 { 657 list_entry_t * iter; 658 kcm_page_t * kcm_page; 659 uint64_t status; 660 uint32_t count; 661 633 662 uint32_t order = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->order) ); 634 663 uint32_t full_pages_nr = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->full_pages_nr ) ); 635 664 uint32_t active_pages_nr = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->active_pages_nr ) ); 636 665 637 printk("*** KCM / cxy %x / order %d / full_pages %d / empty_pages %d / active_pages%d\n",666 printk("*** KCM : cxy %x / order %d / full_pages_nr %d / active_pages_nr %d\n", 638 667 kcm_cxy, order, full_pages_nr, active_pages_nr ); 639 } 668 669 if( active_pages_nr ) 670 { 671 LIST_REMOTE_FOREACH( kcm_cxy , &kcm_ptr->active_root , iter ) 672 { 673 kcm_page = LIST_ELEMENT( iter , kcm_page_t , list ); 674 status = hal_remote_l64( XPTR( kcm_cxy , &kcm_page->status ) ); 675 count = hal_remote_l32( XPTR( kcm_cxy , &kcm_page->count ) ); 676 677 printk("- active page %x / status (%x,%x) / count %d\n", 678 kcm_page, GET_CXY( status ), GET_PTR( status ), count ); 679 } 680 } 681 682 if( full_pages_nr ) 683 { 684 LIST_REMOTE_FOREACH( kcm_cxy , &kcm_ptr->full_root , iter ) 685 { 686 kcm_page = LIST_ELEMENT( iter , kcm_page_t , list ); 687 status = hal_remote_l64( XPTR( kcm_cxy , &kcm_page->status ) ); 688 count = hal_remote_l32( XPTR( kcm_cxy , &kcm_page->count ) ); 689 690 printk("- full page %x / status (%x,%x) / count %d\n", 691 kcm_page, GET_CXY( status ), GET_PTR( status ), count ); 692 } 693 } 694 } // end kcm remote_display() -
trunk/kernel/mm/kcm.h
r635 r657 92 92 * It initializes a Kernel Cache Manager, depending on block size. 93 93 **************************************************************************************** 94 * @ kcm : pointer on KCM manager to initialize.94 * @ kcm : pointer on KCM to be initialized. 95 95 * @ order : ln(block_size). 96 96 ***************************************************************************************/ … … 122 122 ***************************************************************************************/ 123 123 void kcm_free( void * block_ptr ); 124 125 126 124 127 125 128 /**************************************************************************************** -
trunk/kernel/mm/kmem.c
r656 r657 2 2 * kmem.c - kernel memory allocator implementation. 3 3 * 4 * Authors Alain Greiner (2016,2017,2018,2019 )4 * Authors Alain Greiner (2016,2017,2018,2019,2020) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites -
trunk/kernel/mm/mapper.c
r656 r657 3 3 * 4 4 * Authors Mohamed Lamine Karaoui (2015) 5 * Alain Greiner (2016,2017,2018,2019 )5 * Alain Greiner (2016,2017,2018,2019,2020) 6 6 * 7 7 * Copyright (c) UPMC Sorbonne Universites … … 46 46 47 47 48 ////////////////////////////////////////////// 49 mapper_t * mapper_create( vfs_fs_type_t type ) 50 { 51 mapper_t * mapper; 48 ///////////////////////////////////// 49 xptr_t mapper_create( cxy_t cxy, 50 uint32_t type ) 51 { 52 mapper_t * mapper_ptr; 52 53 kmem_req_t req; 53 54 error_t error; 54 55 55 56 // allocate memory for mapper descriptor 56 req.type = KMEM_KCM;57 req.order = bits_log2( sizeof(mapper_t) );58 req.flags = AF_KERNEL | AF_ZERO;59 mapper = kmem_alloc(&req );60 61 if( mapper == NULL )57 req.type = KMEM_KCM; 58 req.order = bits_log2( sizeof(mapper_t) ); 59 req.flags = AF_KERNEL | AF_ZERO; 60 mapper_ptr = kmem_remote_alloc( cxy , &req ); 61 62 if( mapper_ptr == NULL ) 62 63 { 63 64 printk("\n[ERROR] in %s : no memory for mapper descriptor\n", __FUNCTION__ ); 64 return NULL;65 } 66 67 // initialize refcount & inode68 mapper->refcount = 0;69 mapper->inode = NULL;65 return XPTR_NULL; 66 } 67 68 // initialize refcount and type 69 hal_remote_s32( XPTR( cxy , &mapper_ptr->refcount ) , 0 ); 70 hal_remote_s32( XPTR( cxy , &mapper_ptr->fs_type ) , type ); 70 71 71 72 // initialize radix tree 72 error = grdxt_ init( &mapper->rt,73 CONFIG_MAPPER_GRDXT_W1,74 CONFIG_MAPPER_GRDXT_W2,75 CONFIG_MAPPER_GRDXT_W3 );73 error = grdxt_remote_init( XPTR( cxy , &mapper_ptr->rt ), 74 CONFIG_MAPPER_GRDXT_W1, 75 CONFIG_MAPPER_GRDXT_W2, 76 CONFIG_MAPPER_GRDXT_W3 ); 76 77 if( error ) 77 78 { 78 79 printk("\n[ERROR] in %s : cannot initialize radix tree\n", __FUNCTION__ ); 79 80 req.type = KMEM_KCM; 80 req.ptr = mapper; 81 kmem_free( &req ); 82 return NULL; 83 } 84 85 // initialize mapper type 86 mapper->type = type; 81 req.ptr = mapper_ptr; 82 kmem_remote_free( cxy , &req ); 83 return XPTR_NULL; 84 } 87 85 88 86 // initialize mapper lock 89 remote_rwlock_init( XPTR( local_cxy , &mapper->lock ) , LOCK_MAPPER_STATE );87 remote_rwlock_init( XPTR( cxy , &mapper_ptr->lock ) , LOCK_MAPPER_STATE ); 90 88 91 89 // initialize waiting threads xlist (empty) 92 xlist_root_init( XPTR( local_cxy , &mapper->wait_root ) );90 xlist_root_init( XPTR( cxy , &mapper_ptr->wait_root ) ); 93 91 94 92 // initialize vsegs xlist (empty) 95 xlist_root_init( XPTR( local_cxy , &mapper->vsegs_root ) );96 97 return mapper;93 xlist_root_init( XPTR( cxy , &mapper_ptr->vsegs_root ) ); 94 95 return XPTR( cxy , mapper_ptr ); 98 96 99 97 } // end mapper_create() 100 98 101 99 //////////////////////////////////////// 102 void mapper_destroy( mapper_t * mapper ) 103 { 100 void mapper_destroy( xptr_t mapper_xp ) 101 { 102 xptr_t page_xp; 104 103 page_t * page; 105 104 uint32_t found_index = 0; … … 107 106 kmem_req_t req; 108 107 108 cxy_t mapper_cxy = GET_CXY( mapper_xp ); 109 mapper_t * mapper_ptr = GET_PTR( mapper_xp ); 110 111 // build extended pointer on radix tree 112 xptr_t rt_xp = XPTR( mapper_cxy , &mapper_ptr->rt ); 113 109 114 // scan radix tree 110 115 do 111 116 { 112 117 // get page from radix tree 113 page = (page_t *)grdxt_get_first( &mapper->rt , start_index , &found_index ); 114 118 page_xp = grdxt_remote_get_first( rt_xp, 119 start_index , 120 &found_index ); 121 page = GET_PTR( page_xp ); 122 115 123 // release registered pages to PPM 116 124 if( page != NULL ) 117 125 { 118 126 // remove page from mapper and release to PPM 119 mapper_remote_release_page( XPTR( local_cxy , mapper ), page );127 mapper_remote_release_page( mapper_xp , page ); 120 128 121 129 // update start_key value for next page … … 126 134 127 135 // release the memory allocated to radix tree itself 128 grdxt_ destroy( &mapper->rt);136 grdxt_remote_destroy( rt_xp ); 129 137 130 138 // release memory for mapper descriptor 131 139 req.type = KMEM_KCM; 132 req.ptr = mapper ;133 kmem_ free(&req );140 req.ptr = mapper_ptr; 141 kmem_remote_free( mapper_cxy , &req ); 134 142 135 143 } // end mapper_destroy() 136 144 137 ///////////////////////////////////////////////// ///////138 error_t mapper_ remote_handle_miss( xptr_t mapper_xp,139 140 145 ///////////////////////////////////////////////// 146 error_t mapper_handle_miss( xptr_t mapper_xp, 147 uint32_t page_id, 148 xptr_t * page_xp_ptr ) 141 149 { 142 150 error_t error; 143 151 144 uint32_t inode_size ;145 uint32_t inode_type ;152 uint32_t inode_size = 0; 153 uint32_t inode_type = 0; 146 154 147 155 thread_t * this = CURRENT_THREAD; … … 159 167 inode_size = hal_remote_l32( XPTR( mapper_cxy , &inode->size ) ); 160 168 inode_type = hal_remote_l32( XPTR( mapper_cxy , &inode->type ) ); 161 }162 else163 {164 inode_size = 0;165 inode_type = 0;166 169 } 167 170 … … 267 270 return 0; 268 271 269 } // end mapper_ remote_handle_miss()270 271 ///////////////////////////////////////////// ///////272 xptr_t mapper_ remote_get_page( xptr_t mapper_xp,273 272 } // end mapper_handle_miss() 273 274 ///////////////////////////////////////////// 275 xptr_t mapper_get_page( xptr_t mapper_xp, 276 uint32_t page_id ) 274 277 { 275 278 error_t error; … … 281 284 cxy_t mapper_cxy = GET_CXY( mapper_xp ); 282 285 286 assert( (hal_remote_lpt( XPTR( mapper_cxy , &mapper_ptr->inode ) ) != NULL ), 287 "should not be used for the FAT mapper"); 288 283 289 #if DEBUG_MAPPER_GET_PAGE 284 vfs_inode_t * inode = hal_remote_lpt( XPTR( mapper_cxy , &mapper_ptr->inode ) );285 290 uint32_t cycle = (uint32_t)hal_get_cycles(); 286 291 char name[CONFIG_VFS_MAX_NAME_LENGTH]; 287 if( (DEBUG_MAPPER_GET_PAGE < cycle) && (inode == NULL) ) // FAT mapper 288 { 289 printk("\n[%s] thread[%x,%x] enter for page %d of FAT mapper / cycle %d\n", 290 __FUNCTION__, this->process->pid, this->trdid, page_id, cycle ); 291 } 292 if( (DEBUG_MAPPER_GET_PAGE < cycle) && (inode != NULL) ) // file mapper 293 { 292 if( DEBUG_MAPPER_GET_PAGE < cycle ) 293 { 294 vfs_inode_t * inode = hal_remote_lpt( XPTR( mapper_cxy , &mapper_ptr->inode ) ); 294 295 vfs_inode_get_name( XPTR( mapper_cxy , inode ) , name ); 295 296 printk("\n[%s] thread[%x,%x] enter for page %d of <%s> mapper / cycle %d\n", … … 330 331 if ( page_xp == XPTR_NULL ) // miss confirmed => handle it 331 332 { 332 error = mapper_ remote_handle_miss( mapper_xp,333 334 333 error = mapper_handle_miss( mapper_xp, 334 page_id, 335 &page_xp ); 335 336 if( error ) 336 337 { … … 343 344 344 345 #if (DEBUG_MAPPER_GET_PAGE & 1) 345 if( (DEBUG_MAPPER_GET_PAGE < cycle) && (inode != NULL) ) 346 { 347 printk("\n[%s] thread[%x,%x] introduced missing page in <%s> mapper / ppn %x\n", 348 __FUNCTION__, this->process->pid, this->trdid, name, ppm_page2ppn(page_xp) ); 349 } 350 if( (DEBUG_MAPPER_GET_PAGE < cycle) && (inode == NULL) ) 351 { 352 printk("\n[%s] thread[%x,%x] introduced missing page in FAT mapper / ppn %x\n", 353 __FUNCTION__, this->process->pid, this->trdid, ppm_page2ppn(page_xp) ); 354 } 346 if( DEBUG_MAPPER_GET_PAGE < cycle ) 347 printk("\n[%s] thread[%x,%x] introduced missing page %d in <%s> mapper / ppn %x\n", 348 __FUNCTION__, this->process->pid, this->trdid, page_id, name, ppm_page2ppn(page_xp) ); 355 349 #endif 356 350 … … 365 359 366 360 #if DEBUG_MAPPER_GET_PAGE 367 cycle = (uint32_t)hal_get_cycles(); 368 if( (DEBUG_MAPPER_GET_PAGE < cycle) && (inode != NULL) ) 369 { 370 printk("\n[%s] thread[%x,%x] exit for page %d of <%s> mapper / ppn %x\n", 371 __FUNCTION__, this->process->pid, this->trdid, page_id, name, ppm_page2ppn(page_xp) ); 372 } 373 if( (DEBUG_MAPPER_GET_PAGE < cycle) && (inode == NULL) ) 374 { 375 printk("\n[%s] thread[%x,%x] exit for page %d of FAT mapper / ppn %x\n", 376 __FUNCTION__, this->process->pid, this->trdid, page_id, ppm_page2ppn(page_xp) ); 377 } 361 if( DEBUG_MAPPER_GET_PAGE < cycle ) 362 printk("\n[%s] thread[%x,%x] exit for page %d of <%s> mapper / ppn %x\n", 363 __FUNCTION__, this->process->pid, this->trdid, page_id, name, ppm_page2ppn(page_xp) ); 378 364 #endif 379 365 … … 385 371 return page_xp; 386 372 387 } // end mapper_remote_get_page() 373 } // end mapper_get_page() 374 375 ///////////////////////////////////////////////// 376 xptr_t mapper_get_fat_page( xptr_t mapper_xp, 377 uint32_t page_id ) 378 { 379 error_t error; 380 381 thread_t * this = CURRENT_THREAD; 382 383 // get mapper cluster and local pointer 384 mapper_t * mapper_ptr = GET_PTR( mapper_xp ); 385 cxy_t mapper_cxy = GET_CXY( mapper_xp ); 386 387 assert( (hal_remote_lpt( XPTR( mapper_cxy , &mapper_ptr->inode ) ) == NULL ), 388 "should be used for the FAT mapper"); 389 390 #if DEBUG_MAPPER_GET_FAT_PAGE 391 uint32_t cycle = (uint32_t)hal_get_cycles(); 392 if( DEBUG_MAPPER_GET_FAT_PAGE < cycle ) 393 printk("\n[%s] thread[%x,%x] enter for page %d of FAT mapper / cycle %d\n", 394 __FUNCTION__, this->process->pid, this->trdid, page_id, cycle ); 395 #endif 396 397 #if( DEBUG_MAPPER_GET_FAT_PAGE & 2 ) 398 if( DEBUG_MAPPER_GET_FAT_PAGE < cycle ) 399 ppm_remote_display( local_cxy ); 400 #endif 401 402 // check thread can yield 403 thread_assert_can_yield( this , __FUNCTION__ ); 404 405 // build extended pointer on mapper lock and mapper rt 406 xptr_t lock_xp = XPTR( mapper_cxy , &mapper_ptr->lock ); 407 xptr_t rt_xp = XPTR( mapper_cxy , &mapper_ptr->rt ); 408 409 // take mapper lock in READ_MODE 410 remote_rwlock_rd_acquire( lock_xp ); 411 412 // search page in radix tree 413 xptr_t page_xp = grdxt_remote_lookup( rt_xp , page_id ); 414 415 // test mapper miss 416 if( page_xp == XPTR_NULL ) // miss => handle it 417 { 418 // release the lock in READ_MODE and take it in WRITE_MODE 419 remote_rwlock_rd_release( lock_xp ); 420 remote_rwlock_wr_acquire( lock_xp ); 421 422 // second test on missing page because the page status can be modified 423 // by another thread, when passing from READ_MODE to WRITE_MODE. 424 // from this point there is no concurrent accesses to mapper. 425 page_xp = grdxt_remote_lookup( rt_xp , page_id ); 426 427 if ( page_xp == XPTR_NULL ) // miss confirmed => handle it 428 { 429 error = mapper_handle_miss( mapper_xp, 430 page_id, 431 &page_xp ); 432 if( error ) 433 { 434 printk("\n[ERROR] in %s : thread[%x,%x] cannot handle mapper miss\n", 435 __FUNCTION__ , this->process->pid, this->trdid ); 436 remote_rwlock_wr_release( lock_xp ); 437 return XPTR_NULL; 438 } 439 } 440 441 #if (DEBUG_MAPPER_GET_FAT_PAGE & 1) 442 if( DEBUG_MAPPER_GET_FAT_PAGE < cycle ) 443 printk("\n[%s] thread[%x,%x] introduced missing page %d in FAT mapper / ppn %x\n", 444 __FUNCTION__, this->process->pid, this->trdid, page_id, ppm_page2ppn(page_xp) ); 445 #endif 446 447 // release mapper lock from WRITE_MODE 448 remote_rwlock_wr_release( lock_xp ); 449 } 450 else // hit 451 { 452 // release mapper lock from READ_MODE 453 remote_rwlock_rd_release( lock_xp ); 454 } 455 456 #if DEBUG_MAPPER_GET_FAT_PAGE 457 if( DEBUG_MAPPER_GET_FAT_PAGE < cycle ) 458 printk("\n[%s] thread[%x,%x] exit for page %d of FAT mapper / ppn %x\n", 459 __FUNCTION__, this->process->pid, this->trdid, page_id, ppm_page2ppn(page_xp) ); 460 #endif 461 462 #if( DEBUG_MAPPER_GET_FAT_PAGE & 2) 463 if( DEBUG_MAPPER_GET_FAT_PAGE < cycle ) 464 ppm_remote_display( local_cxy ); 465 #endif 466 467 return page_xp; 468 469 } // end mapper_get_fat_page() 388 470 389 471 //////////////////////////////////////////////////// … … 481 563 482 564 // get extended pointer on page descriptor in mapper 483 page_xp = mapper_ remote_get_page( mapper_xp , page_id );565 page_xp = mapper_get_page( mapper_xp , page_id ); 484 566 485 567 if ( page_xp == XPTR_NULL ) return -1; … … 519 601 __FUNCTION__, this->process->pid, this->trdid, page_bytes, 520 602 local_cxy, buf_ptr, name, GET_CXY(map_xp), GET_PTR(map_xp) ); 521 mapper_display_page( mapper_xp , page_ xp, 128 );603 mapper_display_page( mapper_xp , page_id , 128 ); 522 604 #endif 523 605 … … 617 699 618 700 // get extended pointer on page descriptor 619 page_xp = mapper_ remote_get_page( mapper_xp , page_id );701 page_xp = mapper_get_page( mapper_xp , page_id ); 620 702 621 703 if ( page_xp == XPTR_NULL ) return -1; … … 678 760 679 761 // get page containing the searched word 680 page_xp = mapper_ remote_get_page( mapper_xp , page_id );762 page_xp = mapper_get_page( mapper_xp , page_id ); 681 763 682 764 if( page_xp == XPTR_NULL ) return -1; … … 702 784 703 785 // get page containing the searched word 704 page_xp = mapper_ remote_get_page( mapper_xp , page_id );786 page_xp = mapper_get_page( mapper_xp , page_id ); 705 787 706 788 if( page_xp == XPTR_NULL ) return -1; … … 719 801 } // end mapper_remote_set_32() 720 802 721 ///////////////////////////////////////// 722 error_t mapper_sync( mapper_t * mapper ) 723 { 724 page_t * page; // local pointer on current page descriptor 725 xptr_t page_xp; // extended pointer on current page descriptor 726 grdxt_t * rt; // pointer on radix_tree descriptor 727 uint32_t start_key; // start page index in mapper 728 uint32_t found_key; // current page index in mapper 803 //////////////////////////////////////// 804 error_t mapper_sync( xptr_t mapper_xp ) 805 { 806 uint32_t found_key; // unused, required by grdxt_remote_get_first() 729 807 error_t error; 808 809 // get mapper cluster and local pointer 810 mapper_t * mapper_ptr = GET_PTR( mapper_xp ); 811 cxy_t mapper_cxy = GET_CXY( mapper_xp ); 730 812 731 813 #if DEBUG_MAPPER_SYNC … … 733 815 uint32_t cycle = (uint32_t)hal_get_cycles(); 734 816 char name[CONFIG_VFS_MAX_NAME_LENGTH]; 735 vfs_inode_get_name( XPTR( local_cxy , mapper->inode ) , name );736 #endif 737 738 // getpointer on radix tree739 rt = &mapper->rt;817 vfs_inode_get_name( XPTR( mapper_cxy , &mapper_ptr->inode ) , name ); 818 #endif 819 820 // build extended pointer on radix tree 821 xptr_t rt_xp = XPTR( mapper_cxy , &mapper_ptr->rt ); 740 822 741 823 // initialise loop variable 742 start_key = 0;824 uint32_t start_key = 0; 743 825 744 826 // scan radix-tree until last page found … … 746 828 { 747 829 // get page descriptor from radix tree 748 page = (page_t *)grdxt_get_first( rt, start_key , &found_key );830 xptr_t page_xp = grdxt_remote_get_first( rt_xp , start_key , &found_key ); 749 831 750 if( page == NULL ) break; 751 752 assert( (page->index == found_key ), "page_index (%d) != key (%d)", page->index, found_key ); 753 assert( (page->order == 0), "page_order (%d] != 0", page->order ); 754 755 // build extended pointer on page descriptor 756 page_xp = XPTR( local_cxy , page ); 832 page_t * page_ptr = GET_PTR( page_xp ); 833 834 // exit loop when last page found 835 if( page_ptr == NULL ) break; 836 837 // get page flags & index fields 838 uint32_t flags = hal_remote_l32( XPTR( mapper_cxy , &page_ptr->flags ) ); 839 uint32_t index = hal_remote_l32( XPTR( mapper_cxy , &page_ptr->index ) ); 757 840 758 841 // synchronize page if dirty 759 if( (page->flags & PG_DIRTY) != 0)842 if( flags & PG_DIRTY ) 760 843 { 761 844 … … 763 846 if( cycle > DEBUG_MAPPER_SYNC ) 764 847 printk("\n[%s] thread[%x,%x] synchonise page %d of <%s> to IOC device\n", 765 __FUNCTION__, this->process->pid, this->trdid, page ->index, name );848 __FUNCTION__, this->process->pid, this->trdid, page_ptr->index, name ); 766 849 #endif 767 850 // copy page to file system … … 771 854 { 772 855 printk("\n[ERROR] in %s : cannot synchonize dirty page %d\n", 773 __FUNCTION__, page ->index );856 __FUNCTION__, page_ptr->index ); 774 857 return -1; 775 858 } … … 784 867 if( cycle > DEBUG_MAPPER_SYNC ) 785 868 printk("\n[%s] thread[%x,%x] skip page %d for <%s>\n", 786 __FUNCTION__, this->process->pid, this->trdid, page ->index, name );869 __FUNCTION__, this->process->pid, this->trdid, page_ptr->index, name ); 787 870 #endif 788 871 } 789 872 790 873 // update loop variable 791 start_key = page->index + 1;874 start_key = index + 1; 792 875 } // end while 793 876 … … 798 881 /////////////////////////////////////////////// 799 882 void mapper_display_page( xptr_t mapper_xp, 800 xptr_t page_xp,883 uint32_t page_id, 801 884 uint32_t nbytes ) 802 885 { … … 809 892 assert( (nbytes <= 4096) , "nbytes cannot be larger than 4096"); 810 893 assert( (mapper_xp != XPTR_NULL) , "mapper_xp argument cannot be null"); 811 assert( (page_xp != XPTR_NULL) , "page_xp argument cannot be null");812 894 813 895 // get mapper cluster and local pointer … … 815 897 mapper_t * mapper_ptr = GET_PTR( mapper_xp ); 816 898 817 // get page cluster an local pointer 899 // get extended pointer on page descriptor 900 xptr_t page_xp = mapper_get_page( mapper_xp , page_id ); 901 902 // get page cluster and local pointer 818 903 cxy_t page_cxy = GET_CXY( page_xp ); 819 904 page_t * page_ptr = GET_PTR( page_xp ); 820 905 821 906 // get page_id and mapper from page descriptor 822 uint32_t page_id= hal_remote_l32( XPTR( page_cxy , &page_ptr->index ) );907 uint32_t index = hal_remote_l32( XPTR( page_cxy , &page_ptr->index ) ); 823 908 mapper_t * mapper = hal_remote_lpt( XPTR( page_cxy , &page_ptr->mapper ) ); 824 909 825 910 assert( (mapper_cxy == page_cxy ) , "mapper and page must be in same cluster"); 826 assert( (mapper_ptr == mapper ) , "unconsistent mapper_xp & page_xp arguments"); 911 assert( (mapper_ptr == mapper ) , "unconsistent mapper field in page descriptor"); 912 assert( (page_id == index ) , "unconsistent index field in page descriptor"); 827 913 828 914 // get inode -
trunk/kernel/mm/mapper.h
r656 r657 61 61 * - In the present implementation the cache size for a given file increases on demand, 62 62 * and the allocated memory is only released when the mapper/inode is destroyed. 63 *64 * TODO the "type" field in mapper descriptor is redundant and probably unused.65 63 ******************************************************************************************/ 66 64 … … 73 71 { 74 72 struct vfs_inode_s * inode; /*! owner inode */ 75 uint32_t type;/*! file system type */73 uint32_t fs_type; /*! file system type */ 76 74 grdxt_t rt; /*! embedded pages cache descriptor (radix tree) */ 77 75 remote_rwlock_t lock; /*! several readers / only one writer */ … … 84 82 85 83 /******************************************************************************************* 86 * This function allocates physical memory for a mapper descriptor, and initializes it 87 * (refcount <= 0) / inode <= NULL). 88 * It must be executed by a thread running in the cluster containing the mapper. 89 ******************************************************************************************* 90 * @ type : type of the mapper to create. 91 * @ return : pointer on created mapper if success / return NULL if no memory 92 ******************************************************************************************/ 93 mapper_t * mapper_create( vfs_fs_type_t type ); 94 95 /******************************************************************************************* 96 * This function releases all physical memory allocated for a mapper. 97 * Both the mapper descriptor and the radix tree are released. 84 * This function allocates physical memory for a mapper descriptor, in cluster 85 * identified by the <cxy> argument. It initializes it (refcount <= 0) / inode <= NULL). 86 * It can be executed by any thread running in any cluster. 87 ******************************************************************************************* 88 * @ cxy : target cluster identifier. 89 * @ type : FS type. 90 * @ return an extended pointer on created mapper if success / return NULL if no memory 91 ******************************************************************************************/ 92 xptr_t mapper_create( cxy_t cxy, 93 uint32_t type ); 94 95 /******************************************************************************************* 96 * This function releases all physical memory allocated for a mapper, identified 97 * by the <mapper_xp> argument. Both the mapper descriptor and the radix tree are released. 98 98 * It does NOT synchronize dirty pages. Use the vfs_sync_inode() function if required. 99 * It must be executed by a thread running in the cluster containing the mapper.100 ******************************************************************************************* 101 * @ mapper :target mapper.102 ******************************************************************************************/ 103 void mapper_destroy( mapper_t * mapper);99 * It can be executed by any thread running in any cluster. 100 ******************************************************************************************* 101 * @ mapper_xp : extended pointer on target mapper. 102 ******************************************************************************************/ 103 void mapper_destroy( xptr_t mapper_xp ); 104 104 105 105 /******************************************************************************************* … … 117 117 * @ return 0 if success / return -1 if IOC cannot be accessed. 118 118 ******************************************************************************************/ 119 error_t mapper_ remote_handle_miss( xptr_t mapper_xp,120 121 119 error_t mapper_handle_miss( xptr_t mapper_xp, 120 uint32_t page_id, 121 xptr_t * page_xp ); 122 122 123 123 /******************************************************************************************* … … 180 180 181 181 /******************************************************************************************* 182 * This function returns an extended pointer on a page descriptor .183 * The - possibly remote - mapper isidentified by the <mapper_xp> argument.182 * This function returns an extended pointer on a page descriptor for a regular mapper 183 * (i.e. this mapper is NOT the FAT mapper), identified by the <mapper_xp> argument. 184 184 * The page is identified by <page_id> argument (page index in the file). 185 185 * It can be executed by a thread running in any cluster, as it uses remote … … 193 193 * @ returns extended pointer on page descriptor if success / return XPTR_NULL if error. 194 194 ******************************************************************************************/ 195 xptr_t mapper_remote_get_page( xptr_t mapper_xp, 196 uint32_t page_id ); 195 xptr_t mapper_get_page( xptr_t mapper_xp, 196 uint32_t page_id ); 197 198 /******************************************************************************************* 199 * This function returns an extended pointer on a page descriptor for the FAT mapper. 200 * The page is identified by <page_id> argument (page index in the FAT mapper). 201 * It can be executed by a thread running in any cluster, as it uses remote 202 * access primitives to scan the mapper. 203 * In case of miss, this function takes the mapper lock in WRITE_MODE, and call the 204 * mapper_handle_miss() to load the missing page from device to mapper, using an RPC 205 * when the mapper is remote. 206 ******************************************************************************************* 207 * @ mapper_xp : extended pointer on the mapper. 208 * @ page_id : page index in file 209 * @ returns extended pointer on page descriptor if success / return XPTR_NULL if error. 210 ******************************************************************************************/ 211 xptr_t mapper_get_fat_page( xptr_t mapper_xp, 212 uint32_t page_id ); 197 213 198 214 /******************************************************************************************* … … 234 250 235 251 /******************************************************************************************* 236 * This function scan all pages present in the mapper identified by the <mapper > argument,237 * a nd synchronize all pages marked as "dirty" on disk.252 * This function scan all pages present in the mapper identified by the <mapper_xp> 253 * argument, and synchronize all pages marked as "dirty" on disk. 238 254 * These pages are unmarked and removed from the local PPM dirty_list. 239 * This function must be called by a local thread running in same cluster as the mapper. 240 * A remote thread must call the RPC_MAPPER_SYNC function. 241 ******************************************************************************************* 242 * @ mapper : [in] local pointer on local mapper. 243 * @ returns 0 if success / return -1 if error. 244 ******************************************************************************************/ 245 error_t mapper_sync( mapper_t * mapper ); 255 * It can be called by any thread running in any cluster. 256 ******************************************************************************************* 257 * @ mapper_xp : [in] extended pointer on local mapper. 258 * @ returns 0 if success / return -1 if error. 259 ******************************************************************************************/ 260 error_t mapper_sync( xptr_t mapper_xp ); 246 261 247 262 /******************************************************************************************* 248 263 * This debug function displays the content of a given page of a given mapper, identified 249 * by the <mapper_xp> and <page_ xp> arguments.264 * by the <mapper_xp> and <page_id> arguments. 250 265 * The number of bytes to display in page is defined by the <nbytes> argument. 251 266 * The format is eigth (32 bits) words per line in hexadecimal. … … 253 268 ******************************************************************************************* 254 269 * @ mapper_xp : [in] extended pointer on the mapper. 255 * @ page_ xp : [in] extended pointer on page descriptor.270 * @ page_id : [in] page_index in mapper. 256 271 * @ nbytes : [in] number of bytes in page. 257 272 * @ returns 0 if success / return -1 if error. 258 273 ******************************************************************************************/ 259 274 void mapper_display_page( xptr_t mapper_xp, 260 xptr_t page_xp,275 uint32_t page_id, 261 276 uint32_t nbytes ); 262 277 -
trunk/kernel/mm/ppm.c
r656 r657 3 3 * 4 4 * Authors Ghassan Almaless (2008,2009,2010,2011,2012) 5 * Alain Greiner (2016,2017,2018,2019 )5 * Alain Greiner (2016,2017,2018,2019,2020) 6 6 * 7 7 * Copyright (c) UPMC Sorbonne Universites -
trunk/kernel/mm/vmm.c
r656 r657 3 3 * 4 4 * Authors Ghassan Almaless (2008,2009,2010,2011, 2012) 5 * Alain Greiner (2016,2017,2018,2019 )5 * Alain Greiner (2016,2017,2018,2019,2020) 6 6 * 7 7 * Copyright (c) UPMC Sorbonne Universites … … 1988 1988 xptr_t gpt_xp = XPTR( local_cxy , &process->vmm.gpt ); 1989 1989 1990 // loop on PTEs in GPT to unmap PTE if (old d_vpn_min <= vpn < new_vpn_min)1990 // loop on PTEs in GPT to unmap PTE if (old_vpn_min <= vpn < new_vpn_min) 1991 1991 for( vpn = old_vpn_min ; vpn < new_vpn_min ; vpn++ ) 1992 1992 { … … 2292 2292 2293 2293 // get extended pointer on page descriptor 2294 page_xp = mapper_ remote_get_page( mapper_xp , page_id );2294 page_xp = mapper_get_page( mapper_xp , page_id ); 2295 2295 2296 2296 if ( page_xp == XPTR_NULL ) return EINVAL; -
trunk/kernel/mm/vmm.h
r656 r657 3 3 * 4 4 * Authors Ghassan Almaless (2008,2009,2010,2011, 2012) 5 * Mohamed Lamine Karaoui (2015) 6 * Alain Greiner (2016,2017,2018,2019) 5 * Alain Greiner (2016,2017,2018,2019,2020)) 7 6 * 8 7 * Copyright (c) UPMC Sorbonne Universites … … 31 30 #include <list.h> 32 31 #include <queuelock.h> 32 #include <remote_queuelock.h> 33 33 #include <hal_gpt.h> 34 34 #include <vseg.h> … … 208 208 209 209 /********************************************************************************************* 210 * This function modifies the size of the vseg identified by <process> and <base> arguments 211 * in all clusters containing a VSL copy, as defined by <new_base> and <new_size> arguments. 212 * This function is called by the sys_munmap() function, and can be called by a thread 213 * running in any cluster, as it uses remote accesses. 210 * This function modifies the vseg identified by <process> and <base> arguments in all 211 * clusters containing a VSL copy, as defined by <new_base> and <new_size> arguments. 212 * The new vseg, defined by the <new_base> and <new_size> arguments must be included 213 * in the existing vseg. The target VSL size and base fields are modified in the VSL. 214 * This is done in all clusters containing a VMM copy to maintain VMM coherence. 215 * It is called by the sys_munmap() and dev_fbf_resize_window() functions. 216 * It can be called by a thread running in any cluster, as it uses the vmm_resize_vseg() in 217 * the local cluster, and parallel RPC_VMM_RESIZE_VSEG for remote clusters. 214 218 * It cannot fail, as only vseg registered in VSL copies are updated. 215 219 ********************************************************************************************* … … 228 232 * the VSL and remove all associated PTE entries from the GPT. 229 233 * This is done in all clusters containing a VMM copy to maintain VMM coherence. 230 * This function can be called by a thread running in any cluster, as it uses the 231 * vmm_remove_vseg() in the local cluster, and the RPC_VMM_REMOVE_VSEG for remote clusters. 234 * It is called by the sys_munmap() and dev_fbf_resize_window() functions. 235 * It can be called by a thread running in any cluster, as it uses the vmm_remove_vseg() in 236 * the local cluster, and parallel RPC_VMM_REMOVE_VSEG for remote clusters. 232 237 * It cannot fail, as only vseg registered in VSL copies are deleted. 233 238 ********************************************************************************************* … … 317 322 * It must be called by a local thread, running in the cluster containing the modified VMM. 318 323 * Use the RPC_VMM_REMOVE_VSEG if required. 319 * It makes a kernel panic if the process is not registered in the local cluster, 320 * or if the vseg is not registered in the process VSL. 324 * It makes a kernel panic if the process is not registered in the local cluster. 321 325 * For all vseg types, the vseg is detached from local VSL, and all associated PTEs are 322 326 * unmapped from local GPT. Other actions depend on the vseg type: … … 340 344 /********************************************************************************************* 341 345 * This function resize a local vseg identified by the <process> and <vseg> arguments. 342 * It is called by the vmm_global_resize() function. 343 * It must be called by a local thread, running in the cluster containing the modified VMM. 346 * Both the "size" and "base" fields are modified in the process VSL. When the new vseg 347 * contains less pages than the target vseg, the relevant pages are removed from the GPT. 348 * It is called by the vmm_global_resize() and dev_fbf_resize_window() functions. 349 * It must be called by a local thread, running in the cluster containing the modified VSL. 344 350 * Use the RPC_VMM_RESIZE_VSEG if required. 345 * It makes a kernel panic if the process is not registered in the local cluster,346 * or if the vseg is not registered in the process VSL.347 * The new vseg, defined by the <new_base> and <new_size> arguments must be strictly348 * included in the target vseg. The target VSL size and base fields are modified in the VSL.349 * If the new vseg contains less pages than the target vseg, the relevant pages are350 * removed from the GPT.351 351 * The VSL lock protecting the VSL must be taken by the caller. 352 352 ********************************************************************************************* … … 454 454 ppn_t * ppn ); 455 455 456 457 456 #endif /* _VMM_H_ */ -
trunk/kernel/mm/vseg.c
r651 r657 2 2 * vseg.c - virtual segment (vseg) related operations 3 3 * 4 * Authors Alain Greiner (2016,2017,2018,2019 )4 * Authors Alain Greiner (2016,2017,2018,2019,2020) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites -
trunk/kernel/mm/vseg.h
r651 r657 2 2 * vseg.h - virtual segment (vseg) related operations 3 3 * 4 * Authors Alain Greiner (2016,2017,2018,2019 )4 * Authors Alain Greiner (2016,2017,2018,2019,2020) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites
Note: See TracChangeset
for help on using the changeset viewer.