Changeset 683 for trunk/kernel/mm
- Timestamp:
- Jan 13, 2021, 12:36:17 AM (4 years ago)
- Location:
- trunk/kernel/mm
- Files:
-
- 13 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/mm/kcm.c
r672 r683 36 36 #include <kcm.h> 37 37 38 /////////////////////////////////////////////////////////////////////////////////////////// 39 // global variables 40 /////////////////////////////////////////////////////////////////////////////////////////// 41 42 extern chdev_directory_t chdev_dir; // allocated in kernel_init.c 43 38 44 39 45 ///////////////////////////////////////////////////////////////////////////////////// … … 42 48 43 49 ////////////////////////////////////////////////////////////////////////////////////// 44 // This static function must be called by a local thread.50 // This static function is called by the kcm_alloc() function. 45 51 // It returns a pointer on a block allocated from an active kcm_page. 46 52 // It makes a panic if no block is available in the selected page. … … 55 61 { 56 62 // initialise variables 57 uint32_t size = 1 << kcm->order; 58 uint32_t max = kcm->max_blocks; 63 uint32_t order = kcm->order; 59 64 uint32_t count = kcm_page->count; 60 65 uint64_t status = kcm_page->status; 61 66 62 assert( __FUNCTION__, (count < max) , "kcm_page should not be full" ); 67 // check kcm page not full 68 assert( __FUNCTION__, (count < 63) , 69 "kcm_page should not be full / cxy %x / order %d / count %d", local_cxy, order, count ); 63 70 64 71 uint32_t index = 1; … … 67 74 // allocate first free block in kcm_page, update status, 68 75 // and count , compute index of allocated block in kcm_page 69 while( index <= max)76 while( index <= 63 ) 70 77 { 71 78 if( (status & mask) == 0 ) // block found … … 81 88 } 82 89 83 // change the page list if found block is the last84 if( count == max-1)90 // switch page to full if last block 91 if( (count + 1) == 63 ) 85 92 { 86 93 list_unlink( &kcm_page->list); … … 92 99 93 100 // compute return pointer 94 void * ptr = (void *)((intptr_t)kcm_page + (index * size) ); 95 96 #if DEBUG_KCM 97 thread_t * this = CURRENT_THREAD; 98 uint32_t cycle = (uint32_t)hal_get_cycles(); 99 if( DEBUG_KCM < cycle ) 100 printk("\n[%s] thread[%x,%x] allocated block %x in page %x / size %d / count %d / cycle %d\n", 101 __FUNCTION__, this->process->pid, this->trdid, ptr, kcm_page, size, count + 1, cycle ); 102 #endif 101 void * ptr = (void *)((intptr_t)kcm_page + (index << order)); 103 102 104 103 return ptr; … … 107 106 108 107 ///////////////////////////////////////////////////////////////////////////////////// 109 // This private static function must be called by a local thread.108 // This static function is called by the kcm_free() function. 110 109 // It releases a previously allocated block to the relevant kcm_page. 111 110 // It makes a panic if the released block is not allocated in this page. … … 121 120 { 122 121 // initialise variables 123 uint32_t max = kcm->max_blocks; 124 uint32_t size = 1 << kcm->order; 122 uint32_t order = kcm->order; 125 123 uint32_t count = kcm_page->count; 126 124 uint64_t status = kcm_page->status; 127 125 128 // compute block index from block pointer 129 uint32_t index = ((intptr_t)block_ptr - (intptr_t)kcm_page) / size;126 // compute block index from block pointer and kcm_page pointer 127 uint32_t index = ((intptr_t)block_ptr - (intptr_t)kcm_page) >> order; 130 128 131 129 // compute mask in bit vector … … 136 134 printk("\n[WARNING] in %s : block[%x,%x] not allocated / kcm %x / kcm_page %x\n", 137 135 __FUNCTION__, local_cxy, block_ptr, kcm, kcm_page ); 138 printk(" status %L / mask %L / sts & msk %L\n", status, mask, (status & mask) );139 136 kcm_remote_display( local_cxy , kcm ); 140 137 return; … … 145 142 kcm_page->count = count - 1; 146 143 147 // change the page mode if pagewas full148 if( count == max)144 // switch page to active if it was full 145 if( count == 63 ) 149 146 { 150 147 list_unlink( &kcm_page->list ); … … 155 152 } 156 153 157 #if DEBUG_KCM158 thread_t * this = CURRENT_THREAD;159 uint32_t cycle = (uint32_t)hal_get_cycles();160 if( DEBUG_KCM < cycle )161 printk("\n[%s] thread[%x,%x] block %x / page %x / size %d / count %d / cycle %d\n",162 __FUNCTION__, this->process->pid, this->trdid, block_ptr, kcm_page, size, count - 1, cycle );163 #endif164 165 154 } // kcm_put_block() 166 155 167 156 ///////////////////////////////////////////////////////////////////////////////////// 168 // This static function must be called by a local thread. 169 // It returns one non-full kcm_page with the following policy : 157 // This static function returns one non-full kcm_page with the following policy : 170 158 // - if the "active_list" is non empty, it returns the first "active" page, 171 159 // without modifying the KCM state. … … 188 176 else // allocate a new page from PPM 189 177 { 190 // get one 4 Kbytes page from local PPM 191 page_t * page = ppm_alloc_pages( 0 ); 178 // get KCM order 179 uint32_t order = kcm->order; 180 181 // get one kcm_page from PPM 182 page_t * page = ppm_alloc_pages( order + 6 - CONFIG_PPM_PAGE_ORDER ); 192 183 193 184 if( page == NULL ) 194 185 { 195 printk("\n[ERROR] in %s : failed to allocate page in cluster %x\n", 196 __FUNCTION__ , local_cxy ); 197 186 187 #if DEBUG_KCM_ERROR 188 printk("\n[ERROR] in %s : failed to allocate page in cluster %x\n", 189 __FUNCTION__ , local_cxy ); 190 #endif 198 191 return NULL; 199 192 } … … 202 195 xptr_t base_xp = ppm_page2base( XPTR( local_cxy , page ) ); 203 196 204 // get local pointer on kcm_page 197 // get local pointer on kcm_page 205 198 kcm_page = GET_PTR( base_xp ); 206 199 … … 225 218 { 226 219 227 assert( __FUNCTION__, ((order > 5) && (order < 12)) , "order must be in [6,11]" ); 228 229 assert( __FUNCTION__, (CONFIG_PPM_PAGE_SHIFT == 12) , "check status bit_vector width" ); 220 // check argument 221 assert( __FUNCTION__, (order < CONFIG_PPM_PAGE_ORDER), 222 "order argument %d too large", order ); 223 224 assert( __FUNCTION__, (order >= CONFIG_CACHE_LINE_ORDER), 225 "order argument %d too small", order ); 230 226 231 227 // initialize lock … … 238 234 list_root_init( &kcm->active_root ); 239 235 240 // initialize order and max_blocks 241 kcm->order = order; 242 kcm->max_blocks = ( CONFIG_PPM_PAGE_SIZE >> order ) - 1; 236 // initialize order 237 kcm->order = order; 243 238 244 239 #if DEBUG_KCM 245 thread_t * this = CURRENT_THREAD; 246 uint32_t cycle = (uint32_t)hal_get_cycles(); 247 if( DEBUG_KCM < cycle ) 248 printk("\n[%s] thread[%x,%x] initialised KCM / order %d / max_blocks %d\n", 249 __FUNCTION__, this->process->pid, this->trdid, order, kcm->max_blocks ); 240 if( (DEBUG_KCM_ORDER == order) && (DEBUG_KCM_CXY == local_cxy) ) 241 printk("\n[%s] cxy %x / order %d\n", 242 __FUNCTION__, local_cxy, order ); 250 243 #endif 251 244 … … 287 280 void * kcm_alloc( uint32_t order ) 288 281 { 289 kcm_t * kcm _ptr;282 kcm_t * kcm; 290 283 kcm_page_t * kcm_page; 291 void * block_ptr; 292 293 // min block size is 64 bytes 294 if( order < 6 ) order = 6; 295 296 assert( __FUNCTION__, (order < 12) , "order = %d / must be less than 12" , order ); 284 void * block; 285 286 // check argument 287 assert( __FUNCTION__, (order < CONFIG_PPM_PAGE_ORDER), 288 "order argument %d too large", order ); 289 290 #if DEBUG_KCM 291 uint32_t cycle = (uint32_t)hal_get_cycles(); 292 #endif 293 294 // smallest block size is a cache line 295 if (order < CONFIG_CACHE_LINE_ORDER) order = CONFIG_CACHE_LINE_ORDER; 297 296 298 297 // get local pointer on relevant KCM allocator 299 kcm _ptr = &LOCAL_CLUSTER->kcm[order - 6];298 kcm = &LOCAL_CLUSTER->kcm[order - CONFIG_CACHE_LINE_ORDER]; 300 299 301 300 // build extended pointer on local KCM lock 302 xptr_t lock_xp = XPTR( local_cxy , &kcm _ptr->lock );301 xptr_t lock_xp = XPTR( local_cxy , &kcm->lock ); 303 302 304 303 // get KCM lock … … 306 305 307 306 // get a non-full kcm_page 308 kcm_page = kcm_get_page( kcm_ptr ); 309 310 #if DEBUG_KCM 311 thread_t * this = CURRENT_THREAD; 312 uint32_t cycle = (uint32_t)hal_get_cycles(); 313 if( DEBUG_KCM < cycle ) 314 { 315 printk("\n[%s] thread[%x,%x] enters / order %d / page %x / kcm %x / page_status (%x|%x)\n", 316 __FUNCTION__, this->process->pid, this->trdid, order, kcm_page, kcm_ptr, 317 GET_CXY( kcm_page->status ), GET_PTR( kcm_page->status ) ); 318 kcm_remote_display( local_cxy , kcm_ptr ); 319 } 320 #endif 307 kcm_page = kcm_get_page( kcm ); 321 308 322 309 if( kcm_page == NULL ) … … 326 313 } 327 314 328 // get a block from selected active page 329 block_ptr = kcm_get_block( kcm_ptr , kcm_page ); 315 #if DEBUG_KCM 316 if( (DEBUG_KCM_ORDER == order) && (DEBUG_KCM_CXY == local_cxy) && (DEBUG_KCM < cycle) ) 317 printk("\n[%s] enter / order %d / kcm[%x,%x] / nb_full %d / nb_active %d\n" 318 " page %x / status [%x,%x] / count %d\n", 319 __FUNCTION__, order, local_cxy, kcm, kcm->full_pages_nr, kcm->active_pages_nr, 320 kcm_page, (uint32_t)(kcm_page->status>>32), (uint32_t)(kcm_page->status), kcm_page->count ); 321 #endif 322 323 // allocate a block from selected active page 324 block = kcm_get_block( kcm , kcm_page ); 325 326 #if DEBUG_KCM 327 if( (DEBUG_KCM_ORDER == order) && (DEBUG_KCM_CXY == local_cxy) && (DEBUG_KCM < cycle) ) 328 printk("\n[%s] exit / order %d / kcm[%x,%x] / nb_full %d / nb_active %d\n" 329 " page %x / status [%x,%x] / count %d\n", 330 __FUNCTION__, order, local_cxy, kcm, kcm->full_pages_nr, kcm->active_pages_nr, 331 kcm_page, (uint32_t)(kcm_page->status>>32), (uint32_t)(kcm_page->status), kcm_page->count ); 332 #endif 330 333 331 334 // release lock 332 335 remote_busylock_release( lock_xp ); 333 336 334 #if DEBUG_KCM 335 if( DEBUG_KCM < cycle ) 336 printk("\n[%s] thread[%x,%x] exit / order %d / block %x / kcm %x / page_status (%x|%x)\n", 337 __FUNCTION__, this->process->pid, this->trdid, order, block_ptr, kcm_ptr, 338 GET_CXY( kcm_page->status ), GET_PTR( kcm_page->status ) ); 339 #endif 340 341 return block_ptr; 337 return block; 342 338 343 339 } // end kcm_alloc() 344 340 345 ///////////////////////////////// 346 void kcm_free( void * block_ptr ) 347 { 348 kcm_t * kcm_ptr; 341 /////////////////////////////// 342 void kcm_free( void * block, 343 uint32_t order ) 344 { 345 kcm_t * kcm; 349 346 kcm_page_t * kcm_page; 350 347 351 348 // check argument 352 assert( __FUNCTION__, (block_ptr != NULL) , "block pointer cannot be NULL" ); 349 assert( __FUNCTION__, (block != NULL), 350 "block pointer cannot be NULL" ); 351 352 #if DEBUG_KCM 353 uint32_t cycle = (uint32_t)hal_get_cycles(); 354 #endif 355 356 // smallest block size is a cache line 357 if (order < CONFIG_CACHE_LINE_ORDER) order = CONFIG_CACHE_LINE_ORDER; 358 359 // get local pointer on relevant KCM allocator 360 kcm = &LOCAL_CLUSTER->kcm[order - CONFIG_CACHE_LINE_ORDER]; 353 361 354 362 // get local pointer on KCM page 355 kcm_page = (kcm_page_t *)((intptr_t)block_ptr & ~CONFIG_PPM_PAGE_MASK); 356 357 // get local pointer on KCM descriptor 358 kcm_ptr = kcm_page->kcm; 359 360 #if DEBUG_KCM 361 thread_t * this = CURRENT_THREAD; 362 uint32_t cycle = (uint32_t)hal_get_cycles(); 363 if( (DEBUG_KCM < cycle) && (local_cxy == 1) ) 364 { 365 printk("\n[%s] thread[%x,%x] enters / order %d / block %x / page %x / kcm %x / status [%x,%x]\n", 366 __FUNCTION__, this->process->pid, this->trdid, kcm_ptr->order, block_ptr, kcm_page, kcm_ptr, 367 GET_CXY(kcm_page->status), GET_PTR(kcm_page->status) ); 368 kcm_remote_display( local_cxy , kcm_ptr ); 369 } 370 #endif 363 intptr_t kcm_page_mask = (1 << (order + 6)) - 1; 364 kcm_page = (kcm_page_t *)((intptr_t)block & ~kcm_page_mask); 371 365 372 366 // build extended pointer on local KCM lock 373 xptr_t lock_xp = XPTR( local_cxy , &kcm _ptr->lock );367 xptr_t lock_xp = XPTR( local_cxy , &kcm->lock ); 374 368 375 369 // get lock 376 370 remote_busylock_acquire( lock_xp ); 377 371 378 // release block 379 kcm_put_block( kcm_ptr , kcm_page , block_ptr ); 372 #if DEBUG_KCM 373 if( (DEBUG_KCM_ORDER == order) && (DEBUG_KCM_CXY == local_cxy) && (DEBUG_KCM < cycle) ) 374 printk("\n[%s] exit / order %d / kcm[%x,%x] / nb_full %d / nb_active %d\n" 375 " page %x / status [%x,%x] / count %d\n", 376 __FUNCTION__, order, local_cxy, kcm, kcm->full_pages_nr, kcm->active_pages_nr, 377 kcm_page, (uint32_t)(kcm_page->status>>32), (uint32_t)(kcm_page->status), kcm_page->count ); 378 #endif 379 380 // release the block to the relevant page 381 kcm_put_block( kcm , kcm_page , block ); 382 383 #if DEBUG_KCM 384 if( (DEBUG_KCM_ORDER == order) && (DEBUG_KCM_CXY == local_cxy) && (DEBUG_KCM < cycle) ) 385 printk("\n[%s] exit / order %d / kcm[%x,%x] / nb_full %d / nb_active %d\n" 386 " page %x / status [%x,%x] / count %d\n", 387 __FUNCTION__, order, local_cxy, kcm, kcm->full_pages_nr, kcm->active_pages_nr, 388 kcm_page, (uint32_t)(kcm_page->status>>32), (uint32_t)(kcm_page->status), kcm_page->count ); 389 #endif 380 390 381 391 // release lock 382 392 remote_busylock_release( lock_xp ); 383 393 384 #if DEBUG_KCM385 if( (DEBUG_KCM < cycle) && (local_cxy == 1) )386 {387 printk("\n[%s] thread[%x,%x] exit / order %d / page %x / status [%x,%x]\n",388 __FUNCTION__, this->process->pid, this->trdid, kcm_ptr->order, kcm_ptr,389 GET_CXY(kcm_page->status), GET_PTR(kcm_page->status) );390 kcm_remote_display( local_cxy , kcm_ptr );391 }392 #endif393 394 394 } // end kcm_free() 395 395 … … 400 400 401 401 ///////////////////////////////////////////////////////////////////////////////////// 402 // This static function can be called by any thread running in any cluster. 402 // This static function is called by the kcm_remote_alloc() function. 403 // It can be called by any thread running in any cluster. 403 404 // It returns a local pointer on a block allocated from an active kcm_page. 404 405 // It makes a panic if no block available in the selected kcm_page. … … 415 416 { 416 417 uint32_t order = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->order ) ); 417 uint32_t max = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->max_blocks ) );418 418 uint32_t count = hal_remote_l32( XPTR( kcm_cxy , &kcm_page->count ) ); 419 419 uint64_t status = hal_remote_l64( XPTR( kcm_cxy , &kcm_page->status ) ); 420 uint32_t size = 1 << order; 421 422 assert( __FUNCTION__, (count < max) , "kcm_page should not be full" ); 420 421 // check kcm_page not full 422 assert( __FUNCTION__, (count < 63) , 423 "kcm_page should not be full / cxy %x / order %d / count %d", kcm_cxy, order, count ); 423 424 424 425 uint32_t index = 1; … … 427 428 // allocate first free block in kcm_page, update status, 428 429 // and count , compute index of allocated block in kcm_page 429 while( index <= max)430 while( index <= 63 ) 430 431 { 431 432 if( (status & mask) == 0 ) // block found … … 440 441 } 441 442 442 // change the page list if found block is the last443 if( count == max-1)443 // swich the page to full if last block 444 if( (count + 1) == 63 ) 444 445 { 445 446 list_remote_unlink( kcm_cxy , &kcm_page->list ); … … 451 452 452 453 // compute return pointer 453 void * ptr = (void *)((intptr_t)kcm_page + (index * size) ); 454 455 #if DEBUG_KCM_REMOTE 456 thread_t * this = CURRENT_THREAD; 457 uint32_t cycle = (uint32_t)hal_get_cycles(); 458 if( DEBUG_KCM_REMOTE < cycle ) 459 printk("\n[%s] thread[%x,%x] get block %x in page %x / cluster %x / size %x / count %d\n", 460 __FUNCTION__, this->process->pid, this->trdid, 461 ptr, kcm_page, kcm_cxy, size, count + 1 ); 462 #endif 454 void * ptr = (void *)((intptr_t)kcm_page + (index << order)); 463 455 464 456 return ptr; … … 467 459 468 460 ///////////////////////////////////////////////////////////////////////////////////// 469 // This private static function can be called by any thread running in any cluster. 461 // This static function is called by the kcm_remote_free() function. 462 // It can be called by any thread running in any cluster. 470 463 // It releases a previously allocated block to the relevant kcm_page. 471 464 // It changes the kcm_page status as required. … … 481 474 void * block_ptr ) 482 475 { 483 uint32_t max = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->max_blocks ) );484 476 uint32_t order = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->order ) ); 485 477 uint32_t count = hal_remote_l32( XPTR( kcm_cxy , &kcm_page->count ) ); 486 478 uint64_t status = hal_remote_l64( XPTR( kcm_cxy , &kcm_page->status ) ); 487 uint32_t size = 1 << order;488 479 489 // compute block index from block pointer 490 uint32_t index = ((intptr_t)block_ptr - (intptr_t)kcm_page) / size;480 // compute block index from block pointer and kcm_page pointer 481 uint32_t index = ((intptr_t)block_ptr - (intptr_t)kcm_page) >> order; 491 482 492 483 // compute mask in bit vector … … 497 488 printk("\n[WARNING] in %s : block[%x,%x] not allocated / kcm %x / kcm_page %x\n", 498 489 __FUNCTION__, kcm_cxy, block_ptr, kcm_ptr, kcm_page ); 499 printk(" status %L / mask %L / sts & msk %L\n", status, mask, (status & mask) );500 490 kcm_remote_display( kcm_cxy , kcm_ptr ); 501 491 return; … … 506 496 hal_remote_s32( XPTR( kcm_cxy , &kcm_page->count ) , count - 1 ); 507 497 508 // change the page listif page was full509 if( count == max)498 // switch the page to active if page was full 499 if( count == 63 ) 510 500 { 511 501 list_remote_unlink( kcm_cxy , &kcm_page->list ); … … 516 506 } 517 507 518 #if (DEBUG_KCM_REMOTE & 1)519 thread_t * this = CURRENT_THREAD;520 uint32_t cycle = (uint32_t)hal_get_cycles();521 if( DEBUG_KCM_REMOTE < cycle )522 printk("\n[%s] thread[%x,%x] block %x / page %x / cluster %x / size %x / count %d\n",523 __FUNCTION__, this->process->pid, this->trdid, block_ptr, kcm_page, size, count - 1 )524 #endif525 526 508 } // end kcm_remote_put_block() 527 509 528 510 ///////////////////////////////////////////////////////////////////////////////////// 529 // This privatestatic function can be called by any thread running in any cluster.511 // This static function can be called by any thread running in any cluster. 530 512 // It gets one non-full KCM page from the remote KCM. 531 513 // It allocates a page from remote PPM to populate the freelist, and initialises … … 545 527 else // allocate a new page from PPM 546 528 { 547 // get one 4 Kbytes page from remote PPM 548 xptr_t page_xp = ppm_remote_alloc_pages( kcm_cxy , 0 ); 549 529 // get KCM order 530 uint32_t order = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->order )); 531 532 // get one kcm_page from PPM 533 xptr_t page_xp = ppm_remote_alloc_pages( kcm_cxy, 534 order + 6 - CONFIG_PPM_PAGE_ORDER ); 550 535 if( page_xp == XPTR_NULL ) 551 536 { 552 printk("\n[ERROR] in %s : failed to allocate page in cluster %x\n", 553 __FUNCTION__ , kcm_cxy ); 554 537 538 #if DEBUG_KCM_ERROR 539 printk("\n[ERROR] in %s : failed to allocate page in cluster %x\n", 540 __FUNCTION__ , kcm_cxy ); 541 #endif 555 542 return NULL; 556 543 } … … 585 572 void * block_ptr; 586 573 587 if( order < 6 ) order = 6; 588 589 assert( __FUNCTION__, (order < 12) , "order = %d / must be less than 12" , order ); 590 591 // get local pointer on relevant KCM allocator 574 // check kcm_cxy argument 575 assert( __FUNCTION__, cluster_is_active( kcm_cxy ), 576 "cluster %x not active", kcm_cxy ); 577 578 // check order argument 579 assert( __FUNCTION__, (order < CONFIG_PPM_PAGE_ORDER) , 580 "order argument %d too large", order ); 581 582 // smallest size is a cache line 583 if( order < CONFIG_CACHE_LINE_ORDER ) order = CONFIG_CACHE_LINE_ORDER; 584 585 // get local pointer on relevant KCM allocator (same in all clusters) 592 586 kcm_ptr = &LOCAL_CLUSTER->kcm[order - 6]; 593 587 … … 607 601 } 608 602 603 #if DEBUG_KCM 604 uint32_t cycle = (uint32_t)hal_get_cycles(); 605 uint32_t nb_full = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->full_pages_nr )); 606 uint32_t nb_active = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->active_pages_nr )); 607 uint64_t status = hal_remote_l64( XPTR( kcm_cxy , &kcm_page->status )); 608 uint32_t count = hal_remote_l32( XPTR( kcm_cxy , &kcm_page->count )); 609 #endif 610 611 612 #if DEBUG_KCM 613 if( (DEBUG_KCM_ORDER == order) && (DEBUG_KCM_CXY == local_cxy) && (DEBUG_KCM < cycle) ) 614 printk("\n[%s] enter / order %d / kcm[%x,%x] / nb_full %d / nb_active %d\n" 615 " page %x / status [%x,%x] / count %d\n", 616 __FUNCTION__, order, kcm_cxy, kcm_ptr, nb_full, nb_active, 617 kcm_page, (uint32_t)(status>>32), (uint32_t)(status), kcm_page->count ); 618 #endif 619 609 620 // get a block from selected active page 610 621 block_ptr = kcm_remote_get_block( kcm_cxy , kcm_ptr , kcm_page ); 611 622 623 #if DEBUG_KCM 624 if( (DEBUG_KCM_ORDER == order) && (DEBUG_KCM_CXY == local_cxy) && (DEBUG_KCM < cycle) ) 625 printk("\n[%s] exit / order %d / kcm[%x,%x] / nb_full %d / nb_active %d\n" 626 " page %x / status [%x,%x] / count %d\n", 627 __FUNCTION__, order, kcm_cxy, kcm_ptr, nb_full, nb_active, 628 kcm_page, (uint32_t)(status>>32), (uint32_t)(status), kcm_page->count ); 629 #endif 630 612 631 // release lock 613 632 remote_busylock_release( lock_xp ); 614 633 615 #if DEBUG_KCM_REMOTE616 thread_t * this = CURRENT_THREAD;617 uint32_t cycle = (uint32_t)hal_get_cycles();618 if( DEBUG_KCM_REMOTE < cycle )619 printk("\n[%s] thread[%x,%x] allocated block %x / order %d / kcm[%x,%x]\n",620 __FUNCTION__, this->process->pid, this->trdid, block_ptr, order, kcm_cxy, kcm_ptr );621 #endif622 623 634 return block_ptr; 624 635 625 636 } // end kcm_remote_alloc() 626 637 627 ///////////////////////////////////// 628 void kcm_remote_free( cxy_t kcm_cxy, 629 void * block_ptr ) 638 //////////////////////////////////////// 639 void kcm_remote_free( cxy_t kcm_cxy, 640 void * block_ptr, 641 uint32_t order ) 630 642 { 631 643 kcm_t * kcm_ptr; 632 644 kcm_page_t * kcm_page; 633 645 634 // check argument 635 assert( __FUNCTION__, (block_ptr != NULL) , "block pointer cannot be NULL" ); 636 637 // get local pointer on remote KCM page 638 kcm_page = (kcm_page_t *)((intptr_t)block_ptr & ~CONFIG_PPM_PAGE_MASK); 639 640 // get local pointer on remote KCM 641 kcm_ptr = hal_remote_lpt( XPTR( kcm_cxy , &kcm_page->kcm ) ); 646 // check kcm_cxy argument 647 assert( __FUNCTION__, cluster_is_active( kcm_cxy ), 648 "cluster %x not active", kcm_cxy ); 649 650 // check block_ptr argument 651 assert( __FUNCTION__, (block_ptr != NULL), 652 "block pointer cannot be NULL" ); 653 654 // check order argument 655 assert( __FUNCTION__, (order < CONFIG_PPM_PAGE_ORDER) , 656 "order argument %d too large", order ); 657 658 // smallest block size is a cache line 659 if (order < CONFIG_CACHE_LINE_ORDER) order = CONFIG_CACHE_LINE_ORDER; 660 661 // get local pointer on relevant KCM allocator (same in all clusters) 662 kcm_ptr = &LOCAL_CLUSTER->kcm[order - CONFIG_CACHE_LINE_ORDER]; 663 664 // get local pointer on KCM page 665 intptr_t kcm_page_mask = (1 << (order + 6)) - 1; 666 kcm_page = (kcm_page_t *)((intptr_t)block_ptr & ~kcm_page_mask); 667 668 #if DEBUG_KCM 669 uint32_t cycle = (uint32_t)hal_get_cycles(); 670 uint32_t nb_full = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->full_pages_nr )); 671 uint32_t nb_active = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->active_pages_nr )); 672 uint64_t status = hal_remote_l64( XPTR( kcm_cxy , &kcm_page->status )); 673 uint32_t count = hal_remote_l32( XPTR( kcm_cxy , &kcm_page->count )); 674 #endif 642 675 643 676 // build extended pointer on remote KCM lock … … 647 680 remote_busylock_acquire( lock_xp ); 648 681 649 // release block 682 #if DEBUG_KCM 683 if( (DEBUG_KCM_ORDER == order) && (DEBUG_KCM_CXY == local_cxy) && (DEBUG_KCM < cycle) ) 684 printk("\n[%s] enter / order %d / kcm[%x,%x] / nb_full %d / nb_active %d\n" 685 " page %x / status [%x,%x] / count %d\n", 686 __FUNCTION__, order, kcm_cxy, kcm_ptr, nb_full, nb_active, 687 kcm_page, (uint32_t)(status>>32), (uint32_t)(status), kcm_page->count ); 688 #endif 689 690 // release the block to the relevant page 650 691 kcm_remote_put_block( kcm_cxy , kcm_ptr , kcm_page , block_ptr ); 692 693 #if DEBUG_KCM 694 if( (DEBUG_KCM_ORDER == order) && (DEBUG_KCM_CXY == local_cxy) && (DEBUG_KCM < cycle) ) 695 printk("\n[%s] exit / order %d / kcm[%x,%x] / nb_full %d / nb_active %d\n" 696 " page %x / status [%x,%x] / count %d\n", 697 __FUNCTION__, order, kcm_cxy, kcm_ptr, nb_full, nb_active, 698 kcm_page, (uint32_t)(status>>32), (uint32_t)(status), kcm_page->count ); 699 #endif 651 700 652 701 // release lock 653 702 remote_busylock_release( lock_xp ); 654 655 #if DEBUG_KCM_REMOTE656 thread_t * this = CURRENT_THREAD;657 uint32_t cycle = (uint32_t)hal_get_cycles();658 uint32_t order = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->order ) );659 if( DEBUG_KCM_REMOTE < cycle )660 printk("\n[%s] thread[%x,%x] released block %x / order %d / kcm[%x,%x]\n",661 __FUNCTION__, this->process->pid, this->trdid, block_ptr, order, kcm_cxy, kcm_ptr );662 #endif663 703 664 704 } // end kcm_remote_free … … 673 713 uint32_t count; 674 714 715 // get pointers on TXT0 chdev 716 xptr_t txt0_xp = chdev_dir.txt_tx[0]; 717 cxy_t txt0_cxy = GET_CXY( txt0_xp ); 718 chdev_t * txt0_ptr = GET_PTR( txt0_xp ); 719 720 // get extended pointer on remote TXT0 chdev lock 721 xptr_t txt0_lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock ); 722 723 // get TXT0 lock 724 remote_busylock_acquire( txt0_lock_xp ); 725 675 726 uint32_t order = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->order) ); 676 727 uint32_t full_pages_nr = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->full_pages_nr ) ); 677 728 uint32_t active_pages_nr = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->active_pages_nr ) ); 678 729 679 printk("*** KCM : cxy %x / order %d / full_pages_nr %d / active_pages_nr %d\n",730 nolock_printk("*** KCM : cxy %x / order %d / full_pages_nr %d / active_pages_nr %d\n", 680 731 kcm_cxy, order, full_pages_nr, active_pages_nr ); 681 732 … … 688 739 count = hal_remote_l32( XPTR( kcm_cxy , &kcm_page->count ) ); 689 740 690 printk("- active page %x / status (%x,%x) / count %d\n",691 kcm_page, GET_CXY( status ), GET_PTR( status ), count );741 nolock_printk("- active page %x / status (%x,%x) / count %d\n", 742 kcm_page, (uint32_t)( status<< 32 ), (uint32_t)( status ), count ); 692 743 } 693 744 } … … 701 752 count = hal_remote_l32( XPTR( kcm_cxy , &kcm_page->count ) ); 702 753 703 printk("- full page %x / status (%x,%x) / count %d\n",704 kcm_page, GET_CXY( status ), GET_PTR( status ), count );754 nolock_printk("- full page %x / status (%x,%x) / count %d\n", 755 kcm_page, (uint32_t)( status<< 32 ), (uint32_t)( status ), count ); 705 756 } 706 757 } 758 759 // release TXT0 lock 760 remote_busylock_release( txt0_lock_xp ); 761 707 762 } // end kcm remote_display() -
trunk/kernel/mm/kcm.h
r672 r683 32 32 #include <kmem.h> 33 33 34 35 #define KCM_PAGE_FULL 036 #define KCM_PAGE_EMPTY 137 #define KCM_PAGE_ACTIVE 238 39 34 /**************************************************************************************** 40 * This structure defines a generic Kernel Cache Manager, that is a block allocator, 41 * for fixed size objects. It exists in each cluster a specific KCM allocator for 42 * the following block sizes: 64, 128, 256, 512, 1024, 2048 bytes. 43 * These six KCM allocators are initialized by the cluster_init() function. 35 * This structure defines a generic Kernel Cache Manager, a fixed size block allocator. 36 * It returns an aligned block whose size is a power of 2, not smaller than a cache line, 37 * but smaller than a small PPM page. It exists in each cluster a specific KCM allocator 38 * for each possible block size. When the cache line contains 64 bytes and the page 39 * contains 4K bytes, the possible block sizes are 64, 128, 256, 512, 1024, 2048 bytes. 40 * These KCM allocators are initialized by the cluster_init() function. 44 41 * 45 * Each KCM cache is implemented as a set o 4 Kbytes pages. A kcm_page is split in slots, 46 * where each slot can contain one block. in each kcm_page, the first slot (that cannot 47 * be smaller than 64 bytes) contains the kcm page descriptor, defined below 42 * Each KCM cache is implemented as a set of "kcm_pages": a "kcm_page" is an aligned 43 * buffer in physical memory (allocated by the PPM allocator) such as : 44 * buffer_size = block_size * 64 <=> buffer_order = block_order + 6. 45 * 46 * A kcm_page contains always 64 kcm_blocks, but the first block (that cannot be smaller 47 * than 64 bytes) is used to store the kcm_page descriptor defining the page allocation 48 * status, and cannot be allocated to store data. 49 * 50 * A KCM cache is extensible, as new kcm_pages are dynamically allocated from the PPM 51 * allocator when required. For a given KCM cache the set of kcm_pages is split in two 52 * lists: the list of "full" pages (containing 63 allocated blocks), and the list of 53 * "active" pages (containing at least one free block). An "empty" page (containing 54 * only free blocks) is considered active, and is not released to PPM. 48 55 * 49 56 * To allow any thread running in any cluster to directly access the KCM of any cluster, … … 62 69 63 70 uint32_t order; /*! ln( block_size ) */ 64 uint32_t max_blocks; /*! max number of blocks per page */65 71 } 66 72 kcm_t; … … 84 90 list_entry_t list; /*! [active / busy / free] list member */ 85 91 kcm_t * kcm; /*! pointer on kcm allocator */ 86 page_t * page; /*! pointer on the physical page descriptor*/92 page_t * page; /*! pointer on physical page descriptor */ 87 93 } 88 94 kcm_page_t; … … 120 126 **************************************************************************************** 121 127 * @ block_ptr : local pointer on the released block. 128 * @ order : log2( block_size in bytes ). 122 129 ***************************************************************************************/ 123 void kcm_free( void * block_ptr ); 130 void kcm_free( void * block_ptr, 131 uint32_t order ); 124 132 125 133 … … 143 151 * @ kcm_cxy : remote KCM cluster identifier. 144 152 * @ block_ptr : local pointer on the released buffer in remote cluster. 153 * @ order : log2( block_size in bytes ). 145 154 ***************************************************************************************/ 146 155 void kcm_remote_free( cxy_t kcm_cxy, 147 void * block_ptr ); 156 void * block_ptr, 157 uint32_t order ); 148 158 149 159 /**************************************************************************************** -
trunk/kernel/mm/khm.c
r672 r683 40 40 { 41 41 // check config parameters 42 assert( __FUNCTION__, ((CONFIG_PPM_PAGE_ SHIFT+ CONFIG_PPM_HEAP_ORDER) < 32 ) ,42 assert( __FUNCTION__, ((CONFIG_PPM_PAGE_ORDER + CONFIG_PPM_HEAP_ORDER) < 32 ) , 43 43 "CONFIG_PPM_HEAP_ORDER too large" ); 44 44 … … 47 47 48 48 // compute kernel heap size 49 intptr_t heap_size = (1 << CONFIG_PPM_HEAP_ORDER) << CONFIG_PPM_PAGE_ SHIFT;49 intptr_t heap_size = (1 << CONFIG_PPM_HEAP_ORDER) << CONFIG_PPM_PAGE_ORDER; 50 50 51 51 // get kernel heap base from PPM -
trunk/kernel/mm/kmem.c
r672 r683 2 2 * kmem.c - kernel memory allocator implementation. 3 3 * 4 * Authors Alain Greiner (2016,2017,2018,2019,2020)4 * Authors Alain Greiner (2016,2017,2018,2019,2020) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 29 29 #include <thread.h> 30 30 #include <memcpy.h> 31 #include <khm.h>32 31 #include <ppm.h> 33 32 #include <kcm.h> … … 35 34 #include <kmem.h> 36 35 37 ///////////////////////////////////// 38 void * kmem_alloc( kmem_req_t * req ) 39 { 40 uint32_t type; // KMEM_PPM / KMEM_KCM / KMEM_KHM 41 uint32_t flags; // AF_NONE / AF_ZERO / AF_KERNEL 42 uint32_t order; // PPM: ln(pages) / KCM: ln(bytes) / KHM: bytes 43 44 type = req->type; 45 order = req->order; 46 flags = req->flags; 47 48 ////////////////////// 49 if( type == KMEM_PPM ) 50 { 51 // allocate the number of requested pages 52 page_t * page_ptr = (void *)ppm_alloc_pages( order ); 53 54 if( page_ptr == NULL ) 55 { 56 printk("\n[ERROR] in %s : PPM failed / order %d / cluster %x\n", 57 __FUNCTION__ , order , local_cxy ); 58 return NULL; 59 } 60 61 xptr_t page_xp = XPTR( local_cxy , page_ptr ); 62 63 // reset page if requested 64 if( flags & AF_ZERO ) page_zero( page_ptr ); 65 66 // get pointer on buffer from the page descriptor 67 void * ptr = GET_PTR( ppm_page2base( page_xp ) ); 68 69 #if DEBUG_KMEM 36 /////////////////////////////////// 37 void * kmem_alloc( uint32_t order, 38 uint32_t flags ) 39 { 40 41 #if DEBUG_KMEM || DEBUG_KMEM_ERROR 70 42 thread_t * this = CURRENT_THREAD; 71 43 uint32_t cycle = (uint32_t)hal_get_cycles(); 72 if( DEBUG_KMEM < cycle ) 73 printk("\n[%s] thread[%x,%x] from PPM / %d page(s) / ppn %x / cxy %x / cycle %d\n", 74 __FUNCTION__, this->process->pid, this->trdid, 75 1<<order, ppm_page2ppn(XPTR(local_cxy,ptr)), local_cxy, cycle ); 44 #endif 45 46 if( order >= CONFIG_PPM_PAGE_ORDER ) // use PPM 47 { 48 // allocate memory from PPM 49 page_t * page = (void *)ppm_alloc_pages( order - CONFIG_PPM_PAGE_ORDER ); 50 51 if( page == NULL ) 52 { 53 54 #if DEBUG_KMEM_ERROR 55 if (DEBUG_KMEM_ERROR < cycle) 56 printk("\n[ERROR] in %s : thread[%x,%x] failed for PPM / order %d / cluster %x / cycle %d\n", 57 __FUNCTION__ , this->process->pid , this->trdid , order , local_cxy , cycle ); 58 #endif 59 return NULL; 60 } 61 62 // reset page if requested 63 if( flags & AF_ZERO ) page_zero( page ); 64 65 // get pointer on buffer from the page descriptor 66 xptr_t page_xp = XPTR( local_cxy , page ); 67 void * ptr = GET_PTR( ppm_page2base( page_xp ) ); 68 69 #if DEBUG_KMEM 70 if( (DEBUG_KMEM < cycle) && (DEBUG_KMEM_CXY == local_cxy) && (DEBUG_KMEM_ORDER == order) ) 71 printk("\n[%s] thread[%x,%x] from PPM / order %d / ppn %x / cxy %x / cycle %d\n", 72 __FUNCTION__, this->process->pid, this->trdid, 73 order, ppm_page2ppn(XPTR(local_cxy,ptr)), local_cxy, cycle ); 76 74 #endif 77 75 return ptr; 78 76 } 79 /////////////////////////// 80 else if( type == KMEM_KCM ) 77 else // use KCM 81 78 { 82 79 // allocate memory from KCM … … 85 82 if( ptr == NULL ) 86 83 { 87 printk("\n[ERROR] in %s : KCM failed / order %d / cluster %x\n", 88 __FUNCTION__ , order , local_cxy ); 84 85 #if DEBUG_KMEM_ERROR 86 if (DEBUG_KMEM_ERROR < cycle) 87 printk("\n[ERROR] in %s : thread[%x,%x] failed for KCM / order %d / cluster %x / cycle %d\n", 88 __FUNCTION__ , this->process->pid , this->trdid , order , local_cxy , cycle ); 89 #endif 89 90 return NULL; 90 91 } … … 94 95 95 96 #if DEBUG_KMEM 96 thread_t * this = CURRENT_THREAD; 97 uint32_t cycle = (uint32_t)hal_get_cycles(); 98 if( DEBUG_KMEM < cycle ) 99 printk("\n[%s] thread [%x,%x] from KCM / %d bytes / base %x / cxy %x / cycle %d\n", 100 __FUNCTION__, this->process->pid, this->trdid, 101 1<<order, ptr, local_cxy, cycle ); 97 if( (DEBUG_KMEM < cycle) && (DEBUG_KMEM_CXY == local_cxy) && (DEBUG_KMEM_ORDER == order) ) 98 printk("\n[%s] thread [%x,%x] from KCM / order %d / base %x / cxy %x / cycle %d\n", 99 __FUNCTION__, this->process->pid, this->trdid, 100 order, ptr, local_cxy, cycle ); 102 101 #endif 103 102 return ptr; 104 103 } 105 ///////////////////////////106 else if( type == KMEM_KHM )107 {108 // allocate memory from KHM109 void * ptr = khm_alloc( &LOCAL_CLUSTER->khm , order );110 111 if( ptr == NULL )112 {113 printk("\n[ERROR] in %s : KHM failed / order %d / cluster %x\n",114 __FUNCTION__ , order , local_cxy );115 return NULL;116 }117 118 // reset memory if requested119 if( flags & AF_ZERO ) memset( ptr , 0 , order );120 121 #if DEBUG_KMEM122 thread_t * this = CURRENT_THREAD;123 uint32_t cycle = (uint32_t)hal_get_cycles();124 if( DEBUG_KMEM < cycle )125 printk("\n[%s] thread[%x,%x] from KHM / %d bytes / base %x / cxy %x / cycle %d\n",126 __FUNCTION__, this->process->pid, this->trdid,127 order, ptr, local_cxy, cycle );128 #endif129 return ptr;130 }131 else132 {133 printk("\n[ERROR] in %s : illegal allocator type\n", __FUNCTION__);134 return NULL;135 }136 104 } // end kmem_alloc() 137 105 138 ////////////////////////////////// 139 void kmem_free( kmem_req_t * req ) 140 { 141 uint32_t type = req->type; 142 143 ////////////////////// 144 if( type == KMEM_PPM ) 145 { 146 page_t * page = GET_PTR( ppm_base2page( XPTR( local_cxy , req->ptr ) ) ); 106 ////////////////////////////// 107 void kmem_free( void * ptr, 108 uint32_t order ) 109 { 110 if( order >= CONFIG_PPM_PAGE_ORDER ) // use PPM 111 { 112 page_t * page = GET_PTR( ppm_base2page( XPTR( local_cxy , ptr ) ) ); 147 113 148 114 ppm_free_pages( page ); 149 115 } 150 /////////////////////////// 151 else if( type == KMEM_KCM ) 116 else // use KCM 152 117 { 153 kcm_free( req->ptr ); 154 } 155 /////////////////////////// 156 else if( type == KMEM_KHM ) 157 { 158 khm_free( req->ptr ); 159 } 160 else 161 { 162 printk("\n[ERROR] in %s : illegal allocator type\n", __FUNCTION__); 163 } 118 kcm_free( ptr , order ); 119 } 164 120 } // end kmem_free() 165 121 166 /////////////////////////////////////////// 167 void * kmem_remote_alloc( cxy_t cxy, 168 kmem_req_t * req ) 169 { 170 uint32_t type; // KMEM_PPM / KMEM_KCM / KMEM_KHM 171 uint32_t flags; // AF_ZERO / AF_KERNEL / AF_NONE 172 uint32_t order; // PPM: ln(pages) / KCM: ln(bytes) / KHM: bytes 173 174 type = req->type; 175 order = req->order;176 flags = req->flags;177 178 ////////////////////// 179 if( type == KMEM_PPM )180 { 181 // allocate the number of requested pages from remote cluster182 xptr_t page_xp = ppm_remote_alloc_pages( cxy , order );122 123 124 //////////////////////////////////////// 125 void * kmem_remote_alloc( cxy_t cxy, 126 uint32_t order, 127 uint32_t flags ) 128 { 129 130 #if DEBUG_KMEM || DEBUG_KMEM_ERROR 131 thread_t * this = CURRENT_THREAD; 132 uint32_t cycle = (uint32_t)hal_get_cycles(); 133 #endif 134 135 if( order >= CONFIG_PPM_PAGE_ORDER ) // use PPM 136 { 137 // allocate memory from PPM 138 xptr_t page_xp = ppm_remote_alloc_pages( cxy , order - CONFIG_PPM_PAGE_ORDER ); 183 139 184 140 if( page_xp == XPTR_NULL ) 185 141 { 186 printk("\n[ERROR] in %s : failed for PPM / order %d in cluster %x\n", 187 __FUNCTION__ , order , cxy ); 142 143 #if DEBUG_KMEM_ERROR 144 if( DEBUG_KMEM_ERROR < cycle ) 145 printk("\n[ERROR] in %s : thread[%x,%x] failed for PPM / order %d / cluster %x / cycle %d\n", 146 __FUNCTION__ , this->process->pid , this->trdid , order , cxy , cycle ); 147 #endif 188 148 return NULL; 189 149 } … … 192 152 xptr_t base_xp = ppm_page2base( page_xp ); 193 153 194 // reset page if requested 195 if( flags & AF_ZERO ) hal_remote_memset( base_xp , 0 , CONFIG_PPM_PAGE_SIZE ); 196 197 198 #if DEBUG_KMEM_REMOTE 199 thread_t * this = CURRENT_THREAD; 200 uint32_t cycle = (uint32_t)hal_get_cycles(); 201 if( DEBUG_KMEM_REMOTE < cycle ) 202 printk("\n[%s] thread[%x,%x] from PPM / %d page(s) / ppn %x / cxy %x / cycle %d\n", 203 __FUNCTION__, this->process->pid, this->trdid, 204 1<<order, ppm_page2ppn( page_xp ), cxy, cycle ); 154 // reset memory if requested 155 if( flags & AF_ZERO ) hal_remote_memset( base_xp , 0 , 1<<order ); 156 157 #if DEBUG_KMEM 158 if( (DEBUG_KMEM < cycle) && (DEBUG_KMEM_CXY == local_cxy) && (DEBUG_KMEM_ORDER == order) ) 159 printk("\n[%s] thread[%x,%x] from PPM / order %d / ppn %x / cxy %x / cycle %d\n", 160 __FUNCTION__, this->process->pid, this->trdid, 161 order, ppm_page2ppn( page_xp ), cxy, cycle ); 205 162 #endif 206 163 return GET_PTR( base_xp ); 207 164 } 208 /////////////////////////// 209 else if( type == KMEM_KCM ) 165 else // use KCM 210 166 { 211 167 // allocate memory from KCM … … 214 170 if( ptr == NULL ) 215 171 { 216 printk("\n[ERROR] in %s : failed for KCM / order %d in cluster %x\n", 217 __FUNCTION__ , order , cxy ); 172 173 #if DEBUG_KMEM_ERROR 174 if( DEBUG_KMEM_ERROR < cycle ) 175 printk("\n[ERROR] in %s : thread[%x,%x] failed for KCM / order %d / cluster %x / cycle %d\n", 176 __FUNCTION__ , this->process->pid , this->trdid , order , cxy , cycle ); 177 #endif 218 178 return NULL; 219 179 } … … 222 182 if( flags & AF_ZERO ) hal_remote_memset( XPTR( cxy , ptr ) , 0 , 1<<order ); 223 183 224 #if DEBUG_KMEM_REMOTE 225 thread_t * this = CURRENT_THREAD; 226 uint32_t cycle = (uint32_t)hal_get_cycles(); 227 if( DEBUG_KMEM_REMOTE < cycle ) 228 printk("\n[%s] thread [%x,%x] from KCM / %d bytes / base %x / cxy %x / cycle %d\n", 229 __FUNCTION__, this->process->pid, this->trdid, 230 1<<order, ptr, cxy, cycle ); 184 #if DEBUG_KMEM 185 if( (DEBUG_KMEM < cycle) && (DEBUG_KMEM_CXY == local_cxy) && (DEBUG_KMEM_ORDER == order) ) 186 printk("\n[%s] thread [%x,%x] from KCM / order %d / base %x / cxy %x / cycle %d\n", 187 __FUNCTION__, this->process->pid, this->trdid, 188 order, ptr, cxy, cycle ); 231 189 #endif 232 190 return ptr; 233 191 } 234 ///////////////////////////235 else if( type == KMEM_KHM )236 {237 printk("\n[ERROR] in %s : remote access not supported for KHM\n", __FUNCTION__ );238 return NULL;239 }240 else241 {242 printk("\n[ERROR] in %s : illegal allocator type\n", __FUNCTION__);243 return NULL;244 }245 192 } // kmem_remote_malloc() 246 193 247 //////////////////////////////////////// 248 void kmem_remote_free( cxy_t cxy, 249 kmem_req_t * req ) 250 { 251 uint32_t type = req->type; 252 253 ////////////////////// 254 if( type == KMEM_PPM ) 255 { 256 page_t * page = GET_PTR( ppm_base2page( XPTR( cxy , req->ptr ) ) ); 194 ///////////////////////////////////// 195 void kmem_remote_free( cxy_t cxy, 196 void * ptr, 197 uint32_t order ) 198 { 199 if( order >= CONFIG_PPM_PAGE_ORDER ) // use PPM 200 { 201 page_t * page = GET_PTR( ppm_base2page( XPTR( cxy , ptr ) ) ); 257 202 258 203 ppm_remote_free_pages( cxy , page ); 259 204 } 260 /////////////////////////// 261 else if( type == KMEM_KCM ) 205 else // use KCM 262 206 { 263 kcm_remote_free( cxy , req->ptr ); 264 } 265 /////////////////////////// 266 else if( type == KMEM_KHM ) 267 { 268 printk("\n[ERROR] in %s : remote access not supported for KHM\n", __FUNCTION__ ); 269 } 270 else 271 { 272 printk("\n[ERROR] in %s : illegal allocator type\n", __FUNCTION__); 273 } 207 kcm_remote_free( cxy , ptr , order ); 208 } 274 209 } // end kmem_remote_free() 275 210 -
trunk/kernel/mm/kmem.h
r656 r683 1 1 /* 2 * kmem.h - kernel unified memory allocator interface2 * kmem.h - unified kernel memory allocator definition 3 3 * 4 * Authors Alain Greiner (2016,2017,2018,2019)4 * Authors Alain Greiner (2016,2017,2018,2019,2020) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 29 29 30 30 /************************************************************************************* 31 * This enum defines the three Kernel Memory Allocaror types32 ************************************************************************************/33 34 enum35 {36 KMEM_PPM = 0, /*! PPM allocator */37 KMEM_KCM = 1, /*! KCM allocator */38 KMEM_KHM = 2, /*! KHM allocator */39 };40 41 /*************************************************************************************42 31 * This defines the generic Allocation Flags that can be associated to 43 32 * a Kernel Memory Request. … … 45 34 46 35 #define AF_NONE 0x0000 // no attributes 47 #define AF_KERNEL 0x0001 // for kernel use 48 #define AF_ZERO 0x0002 // must be reset to 0 49 50 /************************************************************************************* 51 * This structure defines a Kernel Memory Request. 52 ************************************************************************************/ 53 54 typedef struct kmem_req_s 55 { 56 uint32_t type; /*! KMEM_PPM / KMEM_KCM / KMEM_KHM */ 57 uint32_t order; /*! PPM: ln2(pages) / KCM: ln2(bytes) / KHM: bytes */ 58 uint32_t flags; /*! request attributes */ 59 void * ptr; /*! local pointer on allocated buffer (only used by free) */ 60 } 61 kmem_req_t; 36 #define AF_KERNEL 0x0001 // for kernel use ??? 37 #define AF_ZERO 0x0002 // data buffer must be reset to 0 62 38 63 39 /************************************************************************************* 64 40 * These two functions allocate physical memory in a local or remote cluster 65 * as specified by the kmem_req_t request descriptor, and return a local pointer 66 * on the allocated buffer. It uses three specialised physical memory allocators: 67 * - PPM (Physical Pages Manager) allocates N contiguous small physical pages. 68 * N is a power of 2, and req.order = ln(N). Implement the buddy algorithm. 69 * - KCM (Kernel Cache Manager) allocates aligned blocks of M bytes from a cache. 70 * M is a power of 2, and req.order = ln( M ). One cache per block size. 71 * - KHM (Kernel Heap Manager) allocates physical memory buffers of M bytes, 72 * M can have any value, and req.order = M. 73 * 74 * WARNING: the physical memory allocated with a given allocator type must be 75 * released using the same allocator type. 41 * as specified by the <cxy>, <order> and <flags> arguments, and return a local 42 * pointer on the allocated buffer. The buffer size (in bytes) is a power of 2, 43 * equal to (1 << order) bytes. It can be initialized to zero if requested. 44 * Depending on the <order> value, it uses two specialised allocators: 45 * - When order is larger or equal to CONFIG_PPM_PAGE_ORDER, the PPM (Physical Pages 46 * Manager) allocates 2**(order - PPM_PAGE_ORDER) contiguous small physical pages. 47 * This allocator implements the buddy algorithm. 48 * - When order is smaller than CONFIG_PPM_PAGE_ORDER, the KCM (Kernel Cache Manager) 49 * allocates an aligned block of 2**order bytes from specialised KCM[ORDER] caches 50 * (one KCM cache per block size). 76 51 ************************************************************************************* 77 * @ cxy : target cluster identifier for a remote access. 78 * @ req : local pointer on allocation request. 52 * @ cxy : [in] target cluster identifier for a remote access). 53 * @ order : [in] ln( block size in bytes). 54 * @ flags : [in] allocation flags defined above. 79 55 * @ return local pointer on allocated buffer if success / return NULL if no memory. 80 56 ************************************************************************************/ 81 void * kmem_alloc( kmem_req_t * req ); 57 void * kmem_alloc( uint32_t order, 58 uint32_t flags ); 82 59 83 void * kmem_remote_alloc( cxy_t cxy, 84 kmem_req_t * req ); 60 void * kmem_remote_alloc( cxy_t cxy, 61 uint32_t order, 62 uint32_t flags ); 85 63 86 64 /************************************************************************************* 87 * These two functions release previously allocated physical memory, as specified 88 * by the <type> and <ptr> fields of the kmem_req_t request descriptor. 65 * These two functions release a previously allocated physical memory block, 66 * as specified by the <cxy>, <order> and <ptr> arguments. 67 * - When order is larger or equal to CONFIG_PPM_PAGE_ORDER, the PPM (Physical Pages 68 * Manager) releases 2**(order - PPM_PAGE_ORDER) contiguous small physical pages. 69 * This allocator implements the buddy algorithm. 70 * - When order is smaller than CONFIG_PPM_PAGE_ORDER, the KCM (Kernel Cache Manager) 71 * release release the block of 2**order bytes to the specialised KCM[order] cache. 89 72 ************************************************************************************* 90 * @ cxy : target cluster identifier for a remote access. 91 * @ req : local pointer to request descriptor. 73 * @ cxy : [in] target cluster identifier for a remote access. 74 * @ ptr : [in] local pointer to released block. 75 * @ order : [in] ln( block size in bytes ). 92 76 ************************************************************************************/ 93 void kmem_free ( kmem_req_t * req ); 77 void kmem_free( void * ptr, 78 uint32_t order ); 94 79 95 void kmem_remote_free( cxy_t cxy, 96 kmem_req_t * req ); 80 void kmem_remote_free( cxy_t cxy, 81 void * ptr, 82 uint32_t order ); 97 83 98 84 -
trunk/kernel/mm/mapper.c
r672 r683 3 3 * 4 4 * Authors Mohamed Lamine Karaoui (2015) 5 * Alain Greiner (2016,2017,2018,2019,2020)5 * Alain Greiner (2016,2017,2018,2019,2020) 6 6 * 7 7 * Copyright (c) UPMC Sorbonne Universites … … 51 51 { 52 52 mapper_t * mapper_ptr; 53 kmem_req_t req;54 53 error_t error; 55 54 56 55 // allocate memory for mapper descriptor 57 req.type = KMEM_KCM; 58 req.order = bits_log2( sizeof(mapper_t) ); 59 req.flags = AF_KERNEL | AF_ZERO; 60 mapper_ptr = kmem_remote_alloc( cxy , &req ); 56 mapper_ptr = kmem_remote_alloc( cxy , bits_log2(sizeof(mapper_t)) , AF_ZERO ); 61 57 62 58 if( mapper_ptr == NULL ) 63 59 { 64 printk("\n[ERROR] in %s : no memory for mapper descriptor\n", __FUNCTION__ ); 60 61 #if DEBUG_MAPPER_ERROR 62 printk("\n[ERROR] in %s : no memory for mapper descriptor\n", __FUNCTION__ ); 63 #endif 65 64 return XPTR_NULL; 66 65 } … … 77 76 if( error ) 78 77 { 79 printk("\n[ERROR] in %s : cannot initialize radix tree\n", __FUNCTION__ ); 80 req.type = KMEM_KCM; 81 req.ptr = mapper_ptr; 82 kmem_remote_free( cxy , &req ); 78 79 #if DEBUG_MAPPER_ERROR 80 printk("\n[ERROR] in %s : cannot initialize radix tree\n", __FUNCTION__ ); 81 kmem_remote_free( cxy , mapper_ptr , bits_log2(sizeof(mapper_t)) ); 82 #endif 83 83 return XPTR_NULL; 84 84 } … … 104 104 uint32_t found_index = 0; 105 105 uint32_t start_index = 0; 106 kmem_req_t req;107 106 108 107 cxy_t mapper_cxy = GET_CXY( mapper_xp ); … … 137 136 138 137 // release memory for mapper descriptor 139 req.type = KMEM_KCM; 140 req.ptr = mapper_ptr; 141 kmem_remote_free( mapper_cxy , &req ); 138 kmem_remote_free( mapper_cxy , mapper_ptr , bits_log2(sizeof(mapper_t)) ); 142 139 143 140 } // end mapper_destroy() … … 153 150 uint32_t inode_type = 0; 154 151 155 thread_t * this = CURRENT_THREAD; 152 #if DEBUG_MAPPER_HANDLE_MISS || DEBUG_MAPPER_ERROR 153 thread_t * this = CURRENT_THREAD; 154 uint32_t cycle = (uint32_t)hal_get_cycles(); 155 #endif 156 156 157 157 // get target mapper cluster and local pointer … … 170 170 171 171 #if DEBUG_MAPPER_HANDLE_MISS 172 uint32_t cycle = (uint32_t)hal_get_cycles();173 172 char name[CONFIG_VFS_MAX_NAME_LENGTH]; 174 173 if( (DEBUG_MAPPER_HANDLE_MISS < cycle) && (inode != NULL) ) … … 185 184 #endif 186 185 187 #if( DEBUG_MAPPER_HANDLE_MISS & 2)186 #if( DEBUG_MAPPER_HANDLE_MISS & 1 ) 188 187 if( DEBUG_MAPPER_HANDLE_MISS < cycle ) 189 188 { … … 193 192 #endif 194 193 195 // allocate one 4 Kbytes page from the remote mapper cluster 196 xptr_t page_xp = ppm_remote_alloc_pages( mapper_cxy , 0 ); 194 // allocate one 4 Kbytes page in the remote mapper cluster 195 void * base_ptr = kmem_remote_alloc( mapper_cxy , 12 , AF_NONE ); 196 197 if( base_ptr == NULL ) 198 { 199 200 #if DEBUG_MAPPER_ERROR 201 printk("\n[ERROR] in %s : thread [%x,%x] cannot allocate page in cluster %x / cycle %d\n", 202 __FUNCTION__ , this->process->pid, this->trdid , mapper_cxy , cycle ); 203 #endif 204 return -1; 205 } 206 207 // get pointers on allocated page descrptor 208 xptr_t page_xp = ppm_base2page( XPTR( mapper_cxy , base_ptr ) ); 197 209 page_t * page_ptr = GET_PTR( page_xp ); 198 199 if( page_xp == XPTR_NULL )200 {201 printk("\n[ERROR] in %s : thread [%x,%x] cannot allocate page in cluster %x\n",202 __FUNCTION__ , this->process->pid, this->trdid , mapper_cxy );203 return -1;204 }205 210 206 211 // initialize the page descriptor … … 217 222 page_id, 218 223 page_ptr ); 219 220 224 if( error ) 221 225 { 222 printk("\n[ERROR] in %s : thread[%x,%x] cannot insert page in mapper\n", 223 __FUNCTION__ , this->process->pid, this->trdid ); 224 ppm_remote_free_pages( mapper_cxy , page_ptr ); 226 227 #if DEBUG_MAPPER_ERROR 228 printk("\n[ERROR] in %s : thread[%x,%x] cannot insert page in mapper / cycle %d\n", 229 __FUNCTION__ , this->process->pid, this->trdid , cycle ); 230 ppm_remote_free_pages( mapper_cxy , page_ptr ); 231 #endif 225 232 return -1; 226 233 } … … 236 243 if( error ) 237 244 { 238 printk("\n[ERROR] in %s : thread[%x,%x] cannot load page from device\n", 239 __FUNCTION__ , this->process->pid, this->trdid ); 240 mapper_remote_release_page( mapper_xp , page_ptr ); 245 246 #if DEBUG_MAPPER_ERROR 247 printk("\n[ERROR] in %s : thread[%x,%x] cannot load page from device / cycle %d\n", 248 __FUNCTION__ , this->process->pid, this->trdid , cycle ); 249 mapper_remote_release_page( mapper_xp , page_ptr ); 250 #endif 241 251 return -1; 242 252 } … … 260 270 #endif 261 271 262 #if( DEBUG_MAPPER_HANDLE_MISS & 2)272 #if( DEBUG_MAPPER_HANDLE_MISS & 1 ) 263 273 if( DEBUG_MAPPER_HANDLE_MISS < cycle ) 264 274 { … … 299 309 #endif 300 310 301 #if( DEBUG_MAPPER_GET_PAGE & 2)311 #if( DEBUG_MAPPER_GET_PAGE & 1 ) 302 312 if( DEBUG_MAPPER_GET_PAGE < cycle ) 303 313 ppm_remote_display( local_cxy ); … … 336 346 if( error ) 337 347 { 338 printk("\n[ERROR] in %s : thread[%x,%x] cannot handle mapper miss\n", 339 __FUNCTION__ , this->process->pid, this->trdid ); 340 remote_rwlock_wr_release( lock_xp ); 348 349 #if DEBUG_MAPPER_ERROR 350 printk("\n[ERROR] in %s : thread[%x,%x] cannot handle mapper miss\n", 351 __FUNCTION__ , this->process->pid, this->trdid ); 352 remote_rwlock_wr_release( lock_xp ); 353 #endif 341 354 return XPTR_NULL; 342 355 } … … 364 377 #endif 365 378 366 #if( DEBUG_MAPPER_GET_PAGE & 2)379 #if( DEBUG_MAPPER_GET_PAGE & 1) 367 380 if( DEBUG_MAPPER_GET_PAGE < cycle ) 368 381 ppm_remote_display( local_cxy ); … … 432 445 if( error ) 433 446 { 434 printk("\n[ERROR] in %s : thread[%x,%x] cannot handle mapper miss\n", 435 __FUNCTION__ , this->process->pid, this->trdid ); 436 remote_rwlock_wr_release( lock_xp ); 447 448 #if DEBUG_MAPPER_ERROR 449 printk("\n[ERROR] in %s : thread[%x,%x] cannot handle mapper miss\n", 450 __FUNCTION__ , this->process->pid, this->trdid ); 451 remote_rwlock_wr_release( lock_xp ); 452 #endif 437 453 return XPTR_NULL; 438 454 } … … 460 476 #endif 461 477 462 #if( DEBUG_MAPPER_GET_FAT_PAGE & 2)478 #if( DEBUG_MAPPER_GET_FAT_PAGE & 1) 463 479 if( DEBUG_MAPPER_GET_FAT_PAGE < cycle ) 464 480 ppm_remote_display( local_cxy ); … … 532 548 533 549 // compute indexes of pages for first and last byte in mapper 534 uint32_t first = min_byte >> CONFIG_PPM_PAGE_ SHIFT;535 uint32_t last = max_byte >> CONFIG_PPM_PAGE_ SHIFT;550 uint32_t first = min_byte >> CONFIG_PPM_PAGE_ORDER; 551 uint32_t last = max_byte >> CONFIG_PPM_PAGE_ORDER; 536 552 537 553 #if (DEBUG_MAPPER_MOVE_USER & 1) … … 668 684 669 685 // compute indexes for first and last pages in mapper 670 uint32_t first = min_byte >> CONFIG_PPM_PAGE_ SHIFT;671 uint32_t last = max_byte >> CONFIG_PPM_PAGE_ SHIFT;686 uint32_t first = min_byte >> CONFIG_PPM_PAGE_ORDER; 687 uint32_t last = max_byte >> CONFIG_PPM_PAGE_ORDER; 672 688 673 689 // compute source and destination clusters … … 853 869 if( error ) 854 870 { 855 printk("\n[ERROR] in %s : cannot synchonize dirty page %d\n", 856 __FUNCTION__, page_ptr->index ); 871 872 #if DEBUG_MAPPER_SYNC 873 printk("\n[ERROR] in %s : cannot synchonize dirty page %d\n", 874 __FUNCTION__, page_ptr->index ); 875 #endif 857 876 return -1; 858 877 } -
trunk/kernel/mm/mapper.h
r657 r683 39 39 /******************************************************************************************* 40 40 * This mapper_t object implements the kernel cache for a given VFS file or directory. 41 * There is one mapper per file/dir. It is implemented as a three levels radix tree, 42 * entirely stored in the same cluster as the inode representing the file/dir. 41 * There is one mapper per file/dir. 42 * - It is implemented as a three levels radix tree, entirely stored in the same cluster 43 * as the inode representing the file/directory. 43 44 * - The fast retrieval key is the page index in the file. 44 45 * The ix1_width, ix2_width, ix3_width sub-indexes are configuration parameters. 45 46 * - The leaves are pointers on physical page descriptors, dynamically allocated 46 * in the local cluster.47 * in the same cluster as the radix tree. 47 48 * - The mapper is protected by a "remote_rwlock", to support several simultaneous 48 49 * "readers", and only one "writer". … … 60 61 * buffer, that can be physically located in any cluster. 61 62 * - In the present implementation the cache size for a given file increases on demand, 62 * and the allocated memory is only released when the mapper/inode is destroyed. 63 * and the allocated memory is only released when the inode is destroyed. 64 * 65 * WARNING : This mapper implementation makes the assumption that the PPM page size 66 * is 4 Kbytes. This code should be modified to support a generic page size, 67 * defined by the CONFIG_PPM_PAGE_SIZE parameter. 63 68 ******************************************************************************************/ 64 69 -
trunk/kernel/mm/page.h
r656 r683 3 3 * 4 4 * Authors Ghassan Almalles (2008,2009,2010,2011,2012) 5 * Alain Greiner (2016,2017,2018,2019 )5 * Alain Greiner (2016,2017,2018,2019,2020) 6 6 * 7 7 * Copyright (c) UPMC Sorbonne Universites -
trunk/kernel/mm/ppm.c
r672 r683 60 60 61 61 void * base_ptr = ppm->vaddr_base + 62 ((page_ptr - ppm->pages_tbl)<<CONFIG_PPM_PAGE_ SHIFT);62 ((page_ptr - ppm->pages_tbl)<<CONFIG_PPM_PAGE_ORDER); 63 63 64 64 return XPTR( page_cxy , base_ptr ); … … 75 75 76 76 page_t * page_ptr = ppm->pages_tbl + 77 ((base_ptr - ppm->vaddr_base)>>CONFIG_PPM_PAGE_ SHIFT);77 ((base_ptr - ppm->vaddr_base)>>CONFIG_PPM_PAGE_ORDER); 78 78 79 79 return XPTR( base_cxy , page_ptr ); … … 91 91 page_t * page_ptr = GET_PTR( page_xp ); 92 92 93 paddr_t paddr = PADDR( page_cxy , (page_ptr - ppm->pages_tbl)<<CONFIG_PPM_PAGE_ SHIFT);94 95 return (ppn_t)(paddr >> CONFIG_PPM_PAGE_ SHIFT);93 paddr_t paddr = PADDR( page_cxy , (page_ptr - ppm->pages_tbl)<<CONFIG_PPM_PAGE_ORDER ); 94 95 return (ppn_t)(paddr >> CONFIG_PPM_PAGE_ORDER); 96 96 97 97 } // end hal_page2ppn() … … 102 102 ppm_t * ppm = &LOCAL_CLUSTER->ppm; 103 103 104 paddr_t paddr = ((paddr_t)ppn) << CONFIG_PPM_PAGE_ SHIFT;104 paddr_t paddr = ((paddr_t)ppn) << CONFIG_PPM_PAGE_ORDER; 105 105 106 106 cxy_t cxy = CXY_FROM_PADDR( paddr ); 107 107 lpa_t lpa = LPA_FROM_PADDR( paddr ); 108 108 109 return XPTR( cxy , &ppm->pages_tbl[lpa>>CONFIG_PPM_PAGE_ SHIFT] );109 return XPTR( cxy , &ppm->pages_tbl[lpa>>CONFIG_PPM_PAGE_ORDER] ); 110 110 111 111 } // end hal_ppn2page … … 118 118 ppm_t * ppm = &LOCAL_CLUSTER->ppm; 119 119 120 paddr_t paddr = ((paddr_t)ppn) << CONFIG_PPM_PAGE_ SHIFT;120 paddr_t paddr = ((paddr_t)ppn) << CONFIG_PPM_PAGE_ORDER; 121 121 122 122 cxy_t cxy = CXY_FROM_PADDR( paddr ); … … 137 137 paddr_t paddr = PADDR( base_cxy , (base_ptr - ppm->vaddr_base) ); 138 138 139 return (ppn_t)(paddr >> CONFIG_PPM_PAGE_ SHIFT);139 return (ppn_t)(paddr >> CONFIG_PPM_PAGE_ORDER); 140 140 141 141 } // end ppm_base2ppn() … … 159 159 160 160 assert( __FUNCTION__, !page_is_flag( page , PG_FREE ) , 161 "page already released : ppn = %x \n" , ppm_page2ppn( XPTR( local_cxy , page ) ) );161 "page already released : ppn = %x" , ppm_page2ppn( XPTR( local_cxy , page ) ) ); 162 162 163 163 assert( __FUNCTION__, !page_is_flag( page , PG_RESERVED ) , 164 "reserved page : ppn = %x \n" , ppm_page2ppn( XPTR( local_cxy , page ) ) );164 "reserved page : ppn = %x" , ppm_page2ppn( XPTR( local_cxy , page ) ) ); 165 165 166 166 // set FREE flag in released page descriptor … … 214 214 page_t * found_block; 215 215 216 thread_t * this = CURRENT_THREAD;217 218 216 ppm_t * ppm = &LOCAL_CLUSTER->ppm; 219 217 220 #if DEBUG_PPM_ALLOC_PAGES 221 uint32_t cycle = (uint32_t)hal_get_cycles(); 218 #if DEBUG_PPM_ALLOC_PAGES || DEBUG_PPM_ERROR 219 thread_t * this = CURRENT_THREAD; 220 uint32_t cycle = (uint32_t)hal_get_cycles(); 222 221 #endif 223 222 … … 232 231 233 232 // check order 234 assert( __FUNCTION__, (order < CONFIG_PPM_MAX_ORDER) , "illegal order argument = %d\n" , order ); 233 assert( __FUNCTION__, (order < CONFIG_PPM_MAX_ORDER) , 234 "illegal order argument = %d" , order ); 235 235 236 236 //build extended pointer on lock protecting remote PPM … … 273 273 if( current_block == NULL ) // return failure if no free block found 274 274 { 275 // release lock protecting free lists 275 276 #if DEBUG_PPM_ERROR 277 printk("\n[ERROR] in %s thread[%x,%x] cannot allocate %d page(s) in cluster %x / cycle %d\n", 278 __FUNCTION__, this->process->pid, this->trdid, 1<<order, local_cxy, cycle ); 279 #endif 280 // release lock protecting free lists 276 281 remote_busylock_release( lock_xp ); 277 278 printk("\n[%s] thread[%x,%x] cannot allocate %d page(s) in cluster %x\n",279 __FUNCTION__, this->process->pid, this->trdid, 1<<order, local_cxy );280 281 282 return NULL; 282 283 } … … 385 386 page_t * found_block; 386 387 387 thread_t * this = CURRENT_THREAD;388 389 388 // check order 390 assert( __FUNCTION__, (order < CONFIG_PPM_MAX_ORDER) , "illegal order argument = %d\n" , order ); 389 assert( __FUNCTION__, (order < CONFIG_PPM_MAX_ORDER) , 390 "illegal order argument = %d" , order ); 391 391 392 392 // get local pointer on PPM (same in all clusters) 393 393 ppm_t * ppm = &LOCAL_CLUSTER->ppm; 394 394 395 #if DEBUG_PPM_REMOTE_ALLOC_PAGES 395 #if DEBUG_PPM_ALLOC_PAGES || DEBUG_PPM_ERROR 396 thread_t * this = CURRENT_THREAD; 396 397 uint32_t cycle = (uint32_t)hal_get_cycles(); 397 398 #endif 398 399 399 #if DEBUG_PPM_ REMOTE_ALLOC_PAGES400 if( DEBUG_PPM_ REMOTE_ALLOC_PAGES < cycle )400 #if DEBUG_PPM_ALLOC_PAGES 401 if( DEBUG_PPM_ALLOC_PAGES < cycle ) 401 402 { 402 403 printk("\n[%s] thread[%x,%x] enter for %d page(s) in cluster %x / cycle %d\n", 403 404 __FUNCTION__, this->process->pid, this->trdid, 1<<order, cxy, cycle ); 404 if( DEBUG_PPM_ REMOTE_ALLOC_PAGES & 1 ) ppm_remote_display( cxy );405 if( DEBUG_PPM_ALLOC_PAGES & 1 ) ppm_remote_display( cxy ); 405 406 } 406 407 #endif … … 445 446 if( current_block == NULL ) // return failure 446 447 { 448 449 #if DEBUG_PPM_ERROR 450 printk("\n[ERROR] in %s : thread[%x,%x] cannot allocate %d page(s) in cluster %x / cycle %d\n", 451 __FUNCTION__, this->process->pid, this->trdid, 1<<order, cxy, cycle ); 452 #endif 447 453 // release lock protecting free lists 448 454 remote_busylock_release( lock_xp ); 449 450 printk("\n[ERROR] in %s : thread[%x,%x] cannot allocate %d page(s) in cluster %x\n",451 __FUNCTION__, this->process->pid, this->trdid, 1<<order, cxy );452 453 455 return XPTR_NULL; 454 456 } … … 489 491 hal_fence(); 490 492 491 #if DEBUG_PPM_ REMOTE_ALLOC_PAGES492 if( DEBUG_PPM_ REMOTE_ALLOC_PAGES < cycle )493 #if DEBUG_PPM_ALLOC_PAGES 494 if( DEBUG_PPM_ALLOC_PAGES < cycle ) 493 495 { 494 496 printk("\n[%s] thread[%x,%x] allocated %d page(s) in cluster %x / ppn %x / cycle %d\n", 495 497 __FUNCTION__, this->process->pid, this->trdid, 496 498 1<<order, cxy, ppm_page2ppn(XPTR( cxy , found_block )), cycle ); 497 if( DEBUG_PPM_ REMOTE_ALLOC_PAGES & 1 ) ppm_remote_display( cxy );499 if( DEBUG_PPM_ALLOC_PAGES & 1 ) ppm_remote_display( cxy ); 498 500 } 499 501 #endif … … 521 523 uint32_t order = hal_remote_l32( XPTR( page_cxy , &page_ptr->order ) ); 522 524 523 #if DEBUG_PPM_ REMOTE_FREE_PAGES525 #if DEBUG_PPM_FREE_PAGES 524 526 thread_t * this = CURRENT_THREAD; 525 527 uint32_t cycle = (uint32_t)hal_get_cycles(); … … 527 529 #endif 528 530 529 #if DEBUG_PPM_ REMOTE_FREE_PAGES530 if( DEBUG_PPM_ REMOTE_FREE_PAGES < cycle )531 #if DEBUG_PPM_FREE_PAGES 532 if( DEBUG_PPM_FREE_PAGES < cycle ) 531 533 { 532 534 printk("\n[%s] thread[%x,%x] enter for %d page(s) in cluster %x / ppn %x / cycle %d\n", 533 535 __FUNCTION__, this->process->pid, this->trdid, 1<<order, page_cxy, ppn, cycle ); 534 if( DEBUG_PPM_ REMOTE_FREE_PAGES & 1 ) ppm_remote_display( page_cxy );536 if( DEBUG_PPM_FREE_PAGES & 1 ) ppm_remote_display( page_cxy ); 535 537 } 536 538 #endif … … 549 551 550 552 assert( __FUNCTION__, !page_remote_is_flag( page_xp , PG_FREE ) , 551 "page already released : ppn = %x \n" , ppm_page2ppn(XPTR( page_cxy , page_ptr ) ) );553 "page already released : ppn = %x" , ppm_page2ppn(XPTR( page_cxy , page_ptr ) ) ); 552 554 553 555 assert( __FUNCTION__, !page_remote_is_flag( page_xp , PG_RESERVED ) , 554 "reserved page : ppn = %x \n" , ppm_page2ppn(XPTR( page_cxy , page_ptr ) ) );556 "reserved page : ppn = %x" , ppm_page2ppn(XPTR( page_cxy , page_ptr ) ) ); 555 557 556 558 // set the FREE flag in released page descriptor … … 607 609 hal_fence(); 608 610 609 #if DEBUG_PPM_ REMOTE_FREE_PAGES610 if( DEBUG_PPM_ REMOTE_FREE_PAGES < cycle )611 #if DEBUG_PPM_FREE_PAGES 612 if( DEBUG_PPM_FREE_PAGES < cycle ) 611 613 { 612 614 printk("\n[%s] thread[%x,%x] released %d page(s) in cluster %x / ppn %x / cycle %d\n", 613 615 __FUNCTION__, this->process->pid, this->trdid, 1<<order, page_cxy, ppn, cycle ); 614 if( DEBUG_PPM_ REMOTE_FREE_PAGES & 1 ) ppm_remote_display( page_cxy );616 if( DEBUG_PPM_FREE_PAGES & 1 ) ppm_remote_display( page_cxy ); 615 617 } 616 618 #endif -
trunk/kernel/mm/ppm.h
r656 r683 3 3 * 4 4 * Authors Ghassan Almaless (2008,2009,2010,2011,2012) 5 * Alain Greiner (2016,2017,2018,2019 )5 * Alain Greiner (2016,2017,2018,2019,2020) 6 6 * 7 7 * Copyright (c) UPMC Sorbonne Universites … … 57 57 * the "buddy" algorithm. 58 58 * The local threads can access these free_lists by calling the ppm_alloc_pages() and 59 * ppm_free_page() functions, butthe remote threads can access the same free lists,59 * ppm_free_page() functions, and the remote threads can access the same free lists, 60 60 * by calling the ppm_remote_alloc_pages() and ppm_remote_free_pages functions. 61 61 * Therefore, these free lists are protected by a remote_busy_lock. … … 98 98 * physical pages. It takes the lock protecting the free_lists before register the 99 99 * released page in the relevant free_list. 100 * In normal use, you do not need to call itdirectly, as the recommended way to free100 * In normal use, it should not be called directly, as the recommended way to free 101 101 * physical pages is to call the generic allocator defined in kmem.h. 102 102 ***************************************************************************************** -
trunk/kernel/mm/vmm.c
r672 r683 1 1 /* 2 * vmm.c - virtual memory manager related operations definition.2 * vmm.c - virtual memory manager related operations implementation. 3 3 * 4 4 * Authors Ghassan Almaless (2008,2009,2010,2011,2012) … … 89 89 90 90 // check ltid argument 91 assert( __FUNCTION__, (ltid <= ((CONFIG_VMM_VSPACE_SIZE - CONFIG_VMM_STACK_BASE) / CONFIG_VMM_STACK_SIZE)), 91 assert( __FUNCTION__, 92 (ltid <= ((CONFIG_VMM_VSPACE_SIZE - CONFIG_VMM_STACK_BASE) / CONFIG_VMM_STACK_SIZE)), 92 93 "slot index %d too large for an user stack vseg", ltid ); 93 94 … … 107 108 if( vseg == NULL ) 108 109 { 109 // release lock protecting free lists 110 111 #if DEBUG_VMM_ERROR 112 printk("\n[ERROR] %s cannot allocate memory for vseg in cluster %x\n", 113 __FUNCTION__ , local_cxy ); 114 #endif 110 115 busylock_release( &mgr->lock ); 111 112 printk("\n[ERROR] %s cannot allocate memory for vseg in cluster %x\n",113 __FUNCTION__ , local_cxy );114 115 116 return NULL; 116 117 } … … 346 347 if( current_vseg == NULL ) // return failure 347 348 { 348 // release lock protecting free lists 349 350 #if DEBUG_VMM_ERROR 351 printk("\n[ERROR] %s cannot allocate ) %d page(s) in cluster %x\n", 352 __FUNCTION__, npages , local_cxy ); 353 #endif 349 354 busylock_release( &mgr->lock ); 350 351 printk("\n[ERROR] %s cannot allocate ) %d page(s) in cluster %x\n",352 __FUNCTION__, npages , local_cxy );353 354 355 return NULL; 355 356 } … … 368 369 if( new_vseg == NULL ) 369 370 { 370 // release lock protecting free lists 371 372 #if DEBUG_VMM_ERROR 373 printk("\n[ERROR] %s cannot allocate memory for vseg in cluster %x\n", 374 __FUNCTION__ , local_cxy ); 375 #endif 371 376 busylock_release( &mgr->lock ); 372 373 printk("\n[ERROR] %s cannot allocate memory for vseg in cluster %x\n",374 __FUNCTION__ , local_cxy );375 376 377 return NULL; 377 378 } … … 517 518 XPTR( local_cxy , &vseg->xlist ) ); 518 519 519 } // end vmm_attach_vseg_ from_vsl()520 } // end vmm_attach_vseg_to_vsl() 520 521 521 522 //////////////////////////////////////////////////////////////////////////////////////////// … … 537 538 xlist_unlink( XPTR( local_cxy , &vseg->xlist ) ); 538 539 539 } // end vmm_detach_ from_vsl()540 } // end vmm_detach_vseg_from_vsl() 540 541 541 542 //////////////////////////////////////////// … … 1290 1291 if( child_vseg == NULL ) // release all allocated vsegs 1291 1292 { 1293 1294 #if DEBUG_VMM_ERROR 1295 printk("\n[ERROR] in %s : cannot create vseg for child in cluster %x\n", 1296 __FUNCTION__, local_cxy ); 1297 #endif 1292 1298 vmm_destroy( child_process ); 1293 printk("\n[ERROR] in %s : cannot create vseg for child\n", __FUNCTION__ );1294 1299 return -1; 1295 1300 } … … 1338 1343 if( error ) 1339 1344 { 1345 1346 #if DEBUG_VMM_ERROR 1347 printk("\n[ERROR] in %s : cannot copy GPT\n", 1348 __FUNCTION__ ); 1349 #endif 1340 1350 vmm_destroy( child_process ); 1341 printk("\n[ERROR] in %s : cannot copy GPT\n", __FUNCTION__ );1342 1351 return -1; 1343 1352 } … … 1357 1366 remote_queuelock_release( parent_lock_xp ); 1358 1367 1359 /* deprecated [AG] : this is already done by the vmm_user_init() funcfion1360 1361 // initialize the child VMM STACK allocator1362 vmm_stack_init( child_vmm );1363 1364 // initialize the child VMM MMAP allocator1365 vmm_mmap_init( child_vmm );1366 1367 // initialize instrumentation counters1368 child_vmm->false_pgfault_nr = 0;1369 child_vmm->local_pgfault_nr = 0;1370 child_vmm->global_pgfault_nr = 0;1371 child_vmm->false_pgfault_cost = 0;1372 child_vmm->local_pgfault_cost = 0;1373 child_vmm->global_pgfault_cost = 0;1374 */1375 1368 // copy base addresses from parent VMM to child VMM 1376 1369 child_vmm->args_vpn_base = (vpn_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->args_vpn_base)); … … 1564 1557 if( vseg == NULL ) 1565 1558 { 1566 printk("\n[ERROR] %s cannot create %s vseg for process %x in cluster %x\n", 1567 __FUNCTION__ , vseg_type_str( type ) , process->pid , local_cxy ); 1559 1560 #if DEBUG_VMM_ERROR 1561 printk("\n[ERROR] %s cannot create %s vseg for process %x in cluster %x\n", 1562 __FUNCTION__ , vseg_type_str( type ) , process->pid , local_cxy ); 1563 #endif 1568 1564 return NULL; 1569 1565 } … … 1572 1568 vseg->type = type; 1573 1569 vseg->vmm = vmm; 1574 vseg->min = vseg->vpn_base << CONFIG_PPM_PAGE_ SHIFT;1575 vseg->max = vseg->min + (vseg->vpn_size << CONFIG_PPM_PAGE_ SHIFT);1570 vseg->min = vseg->vpn_base << CONFIG_PPM_PAGE_ORDER; 1571 vseg->max = vseg->min + (vseg->vpn_size << CONFIG_PPM_PAGE_ORDER); 1576 1572 vseg->cxy = cxy; 1577 1573 … … 1582 1578 { 1583 1579 // compute page index (in mapper) for first and last byte 1584 vpn_t vpn_min = file_offset >> CONFIG_PPM_PAGE_ SHIFT;1585 vpn_t vpn_max = (file_offset + size - 1) >> CONFIG_PPM_PAGE_ SHIFT;1580 vpn_t vpn_min = file_offset >> CONFIG_PPM_PAGE_ORDER; 1581 vpn_t vpn_max = (file_offset + size - 1) >> CONFIG_PPM_PAGE_ORDER; 1586 1582 1587 1583 // compute offset in first page and number of pages … … 1594 1590 if( vseg == NULL ) 1595 1591 { 1596 printk("\n[ERROR] %s cannot create %s vseg for process %x in cluster %x\n", 1597 __FUNCTION__ , vseg_type_str( type ) , process->pid , local_cxy ); 1592 1593 #if DEBUG_VMM_ERROR 1594 printk("\n[ERROR] %s cannot create %s vseg for process %x in cluster %x\n", 1595 __FUNCTION__ , vseg_type_str( type ) , process->pid , local_cxy ); 1596 #endif 1598 1597 return NULL; 1599 1598 } … … 1602 1601 vseg->type = type; 1603 1602 vseg->vmm = vmm; 1604 vseg->min = (vseg->vpn_base << CONFIG_PPM_PAGE_ SHIFT) + offset;1603 vseg->min = (vseg->vpn_base << CONFIG_PPM_PAGE_ORDER) + offset; 1605 1604 vseg->max = vseg->min + size; 1606 1605 vseg->file_offset = file_offset; … … 1615 1614 { 1616 1615 // compute number of required pages in virtual space 1617 vpn_t npages = size >> CONFIG_PPM_PAGE_ SHIFT;1616 vpn_t npages = size >> CONFIG_PPM_PAGE_ORDER; 1618 1617 if( size & CONFIG_PPM_PAGE_MASK) npages++; 1619 1618 … … 1623 1622 if( vseg == NULL ) 1624 1623 { 1625 printk("\n[ERROR] %s cannot create %s vseg for process %x in cluster %x\n", 1626 __FUNCTION__ , vseg_type_str( type ) , process->pid , local_cxy ); 1624 1625 #if DEBUG_VMM_ERROR 1626 printk("\n[ERROR] %s cannot create %s vseg for process %x in cluster %x\n", 1627 __FUNCTION__ , vseg_type_str( type ) , process->pid , local_cxy ); 1628 #endif 1627 1629 return NULL; 1628 1630 } … … 1631 1633 vseg->type = type; 1632 1634 vseg->vmm = vmm; 1633 vseg->min = vseg->vpn_base << CONFIG_PPM_PAGE_ SHIFT;1634 vseg->max = vseg->min + (vseg->vpn_size << CONFIG_PPM_PAGE_ SHIFT);1635 vseg->min = vseg->vpn_base << CONFIG_PPM_PAGE_ORDER; 1636 vseg->max = vseg->min + (vseg->vpn_size << CONFIG_PPM_PAGE_ORDER); 1635 1637 vseg->cxy = cxy; 1636 1638 … … 1640 1642 else // VSEG_TYPE_DATA, VSEG_TYPE_CODE or KERNEL vseg 1641 1643 { 1642 uint32_t vpn_min = base >> CONFIG_PPM_PAGE_ SHIFT;1643 uint32_t vpn_max = (base + size - 1) >> CONFIG_PPM_PAGE_ SHIFT;1644 uint32_t vpn_min = base >> CONFIG_PPM_PAGE_ORDER; 1645 uint32_t vpn_max = (base + size - 1) >> CONFIG_PPM_PAGE_ORDER; 1644 1646 1645 1647 // allocate vseg descriptor … … 1648 1650 if( vseg == NULL ) 1649 1651 { 1650 printk("\n[ERROR] %s cannot create %s vseg for process %x in cluster %x\n", 1651 __FUNCTION__ , vseg_type_str( type ) , process->pid , local_cxy ); 1652 1653 #if DEBUG_VMM_ERROR 1654 printk("\n[ERROR] %s cannot create %s vseg for process %x in cluster %x\n", 1655 __FUNCTION__ , vseg_type_str( type ) , process->pid , local_cxy ); 1656 #endif 1652 1657 return NULL; 1653 1658 } 1659 1654 1660 // initialize vseg 1655 1661 vseg->type = type; … … 1657 1663 vseg->min = base; 1658 1664 vseg->max = base + size; 1659 vseg->vpn_base = base >> CONFIG_PPM_PAGE_ SHIFT;1665 vseg->vpn_base = base >> CONFIG_PPM_PAGE_ORDER; 1660 1666 vseg->vpn_size = vpn_max - vpn_min + 1; 1661 1667 vseg->file_offset = file_offset; … … 1672 1678 if( existing_vseg != NULL ) 1673 1679 { 1674 printk("\n[ERROR] in %s for process %x : new vseg %s [vpn_base %x / vpn_size %x]\n" 1675 " overlap existing vseg %s [vpn_base %x / vpn_size %x]\n", 1676 __FUNCTION__ , process->pid, vseg_type_str(vseg->type), vseg->vpn_base, vseg->vpn_size, 1677 vseg_type_str(existing_vseg->type), existing_vseg->vpn_base, existing_vseg->vpn_size ); 1680 1681 #if DEBUG_VMM_ERROR 1682 printk("\n[ERROR] in %s for process %x : new vseg %s [vpn_base %x / vpn_size %x]\n" 1683 " overlap existing vseg %s [vpn_base %x / vpn_size %x]\n", 1684 __FUNCTION__ , process->pid, vseg_type_str(vseg->type), vseg->vpn_base, vseg->vpn_size, 1685 vseg_type_str(existing_vseg->type), existing_vseg->vpn_base, existing_vseg->vpn_size ); 1686 #endif 1678 1687 vseg_free( vseg ); 1679 1688 return NULL; … … 1801 1810 if( do_kmem_release ) 1802 1811 { 1803 kmem_req_t req; 1804 req.type = KMEM_PPM; 1805 req.ptr = GET_PTR( ppm_ppn2base( ppn ) ); 1806 1807 kmem_remote_free( page_cxy , &req ); 1812 // get physical page order 1813 uint32_t order = CONFIG_PPM_PAGE_ORDER + 1814 hal_remote_l32( XPTR( page_cxy , &page_ptr->order )); 1815 1816 // get physical page base 1817 void * base = GET_PTR( ppm_ppn2base( ppn ) ); 1818 1819 // release physical page 1820 kmem_remote_free( page_cxy , base , order ); 1808 1821 1809 1822 #if DEBUG_VMM_PPN_RELEASE … … 1855 1868 #endif 1856 1869 1857 // loop on PTEs in GPT to unmap all mapped PTE1858 1870 // the loop on PTEs in GPT to unmap all mapped PTEs 1871 for( vpn = vpn_min ; vpn < vpn_max ; vpn++ ) 1859 1872 { 1860 1873 // get ppn and attr … … 1942 1955 intptr_t min = new_base; 1943 1956 intptr_t max = new_base + new_size; 1944 vpn_t new_vpn_min = min >> CONFIG_PPM_PAGE_ SHIFT;1945 vpn_t new_vpn_max = (max - 1) >> CONFIG_PPM_PAGE_ SHIFT;1957 vpn_t new_vpn_min = min >> CONFIG_PPM_PAGE_ORDER; 1958 vpn_t new_vpn_max = (max - 1) >> CONFIG_PPM_PAGE_ORDER; 1946 1959 1947 1960 // build extended pointer on GPT … … 2082 2095 if( ref_cxy == local_cxy ) // local is ref => return error 2083 2096 { 2084 printk("\n[ERROR] in %s : vaddr %x in process %x out of segment\n", 2085 __FUNCTION__, vaddr, process->pid ); 2086 2087 // release local VSL lock 2097 2098 #if DEBUG_VMM_ERROR 2099 printk("\n[ERROR] in %s : vaddr %x in process %x out of segment\n", 2100 __FUNCTION__, vaddr, process->pid ); 2101 #endif 2088 2102 remote_queuelock_release( loc_lock_xp ); 2089 2090 2103 return -1; 2091 2104 } … … 2103 2116 if( ref_vseg == NULL ) // vseg not found => return error 2104 2117 { 2105 // release both VSL locks 2118 2119 #if DEBUG_VMM_ERROR 2120 printk("\n[ERROR] in %s : vaddr %x in process %x out of segment\n", 2121 __FUNCTION__, vaddr, process->pid ); 2122 #endif 2106 2123 remote_queuelock_release( loc_lock_xp ); 2107 2124 remote_queuelock_release( ref_lock_xp ); 2108 2109 printk("\n[ERROR] in %s : vaddr %x in process %x out of segment\n",2110 __FUNCTION__, vaddr, process->pid );2111 2112 2125 return -1; 2113 2126 } … … 2119 2132 if( loc_vseg == NULL ) // no memory => return error 2120 2133 { 2121 printk("\n[ERROR] in %s : vaddr %x in process %x / no memory\n", 2122 __FUNCTION__, vaddr, process->pid ); 2123 2124 // release both VSL locks 2134 2135 #if DEBUG_VMM_ERROR 2136 printk("\n[ERROR] in %s : vaddr %x in process %x / no memory\n", 2137 __FUNCTION__, vaddr, process->pid ); 2138 #endif 2125 2139 remote_queuelock_release( ref_lock_xp ); 2126 2140 remote_queuelock_release( loc_lock_xp ); 2127 2128 2141 return -1; 2129 2142 } … … 2158 2171 ////////////////////////////////////////////////////////////////////////////////////// 2159 2172 // This static function compute the target cluster to allocate a physical page 2160 // for a given <vpn> in a given <vseg>, allocates the page and returns an extended 2161 // pointer on the allocated page descriptor. 2173 // for a given <vpn> in a given <vseg>, allocates the physical page from a local 2174 // or remote cluster (depending on the vseg type), and returns an extended pointer 2175 // on the allocated page descriptor. 2162 2176 // The vseg cannot have the FILE type. 2163 2177 ////////////////////////////////////////////////////////////////////////////////////// 2164 2178 // @ vseg : local pointer on vseg. 2165 2179 // @ vpn : unmapped vpn. 2166 // @ return an extended pointer on the allocated page descriptor.2180 // @ return xptr on page descriptor if success / return XPTR_NULL if failure 2167 2181 ////////////////////////////////////////////////////////////////////////////////////// 2168 2182 static xptr_t vmm_page_allocate( vseg_t * vseg, … … 2207 2221 } 2208 2222 2209 // allocate one small physical page from target cluster2210 kmem_req_t req;2211 req.type = KMEM_PPM;2212 req.order = 0;2213 req.flags = AF_ZERO;2214 2215 2223 // get local pointer on page base 2216 void * ptr = kmem_remote_alloc( page_cxy , &req ); 2217 2224 void * ptr = kmem_remote_alloc( page_cxy , CONFIG_PPM_PAGE_ORDER , AF_ZERO ); 2225 2226 if( ptr == NULL ) 2227 { 2228 2229 #if DEBUG_VMM_ERROR 2230 printk("\n[ERROR] in %s : cannot allocate memory from cluster %x\n", 2231 __FUNCTION__, page_cxy ); 2232 #endif 2233 return XPTR_NULL; 2234 } 2218 2235 // get extended pointer on page descriptor 2219 2236 page_xp = ppm_base2page( XPTR( page_cxy , ptr ) ); … … 2291 2308 2292 2309 // compute missing page offset in vseg 2293 uint32_t offset = page_id << CONFIG_PPM_PAGE_ SHIFT;2310 uint32_t offset = page_id << CONFIG_PPM_PAGE_ORDER; 2294 2311 2295 2312 // compute missing page offset in .elf file … … 2427 2444 // get local vseg (access to reference VSL can be required) 2428 2445 error = vmm_get_vseg( process, 2429 (intptr_t)vpn<<CONFIG_PPM_PAGE_ SHIFT,2446 (intptr_t)vpn<<CONFIG_PPM_PAGE_ORDER, 2430 2447 &vseg ); 2431 2448 if( error ) … … 2752 2769 // get local vseg 2753 2770 error = vmm_get_vseg( process, 2754 (intptr_t)vpn<<CONFIG_PPM_PAGE_ SHIFT,2771 (intptr_t)vpn<<CONFIG_PPM_PAGE_ORDER, 2755 2772 &vseg ); 2756 2773 if( error ) -
trunk/kernel/mm/vseg.c
r672 r683 62 62 vseg_t * vseg_alloc( void ) 63 63 { 64 kmem_req_t req; 65 66 req.type = KMEM_KCM; 67 req.order = bits_log2( sizeof(vseg_t) ); 68 req.flags = AF_KERNEL | AF_ZERO; 69 70 return kmem_alloc( &req ); 64 return (vseg_t*)kmem_alloc( bits_log2( sizeof(vseg_t)) , AF_ZERO ); 71 65 } 72 66 … … 74 68 void vseg_free( vseg_t * vseg ) 75 69 { 76 kmem_req_t req; 77 78 req.type = KMEM_KCM; 79 req.ptr = vseg; 80 kmem_free( &req ); 70 kmem_free( vseg , bits_log2( sizeof(vseg_t)) ); 81 71 } 82 72 -
trunk/kernel/mm/vseg.h
r657 r683 82 82 vpn_t vpn_base; /*! first page of vseg */ 83 83 vpn_t vpn_size; /*! number of pages occupied */ 84 xptr_t mapper_xp; /*! xptr on remote mapper (for types CODE/DATA/FILE) */ 84 85 uint32_t flags; /*! vseg attributes */ 85 xptr_t mapper_xp; /*! xptr on remote mapper (for types CODE/DATA/FILE) */86 86 intptr_t file_offset; /*! vseg offset in file (for types CODE/DATA/FILE) */ 87 87 intptr_t file_size; /*! max segment size in mapper (for type CODE/DATA) */
Note: See TracChangeset
for help on using the changeset viewer.