Changeset 632 for trunk/kernel/mm
- Timestamp:
- May 28, 2019, 2:56:04 PM (6 years ago)
- Location:
- trunk/kernel/mm
- Files:
-
- 6 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/mm/page.c
r567 r632 93 93 } 94 94 95 //////////////////////////////// 96 void page_print( page_t * page ) 95 96 //////////////////////////////////////////////////// 97 inline void page_remote_set_flag( xptr_t page_xp, 98 uint32_t value ) 97 99 { 98 printk("*** Page %d : base = %x / flags = %x / order = %d / count = %d\n", 99 page->index, 100 GET_PTR( ppm_page2base( XPTR( local_cxy , page ) ) ), 101 page->flags, 102 page->order, 103 page->refcount ); 100 cxy_t page_cxy = GET_CXY( page_xp ); 101 page_t * page_ptr = GET_PTR( page_xp ); 102 103 hal_remote_atomic_or( XPTR( page_cxy , &page_ptr->flags ) , value ); 104 104 } 105 105 106 ////////////////////////////////////////////////////// 107 inline void page_remote_clear_flag( xptr_t page_xp, 108 uint32_t value ) 109 { 110 cxy_t page_cxy = GET_CXY( page_xp ); 111 page_t * page_ptr = GET_PTR( page_xp ); 112 113 hal_remote_atomic_and( XPTR( page_cxy , &page_ptr->flags ) , value ); 114 } 115 116 ///////////////////////////////////////////////////// 117 inline bool_t page_remote_is_flag( xptr_t page_xp, 118 uint32_t value ) 119 { 120 cxy_t page_cxy = GET_CXY( page_xp ); 121 page_t * page_ptr = GET_PTR( page_xp ); 122 123 uint32_t flags = hal_remote_l32( XPTR( page_cxy , &page_ptr->flags ) ); 124 125 return (flags & value) ? 1 : 0; 126 } 127 128 ///////////////////////////////////////////////////// 129 inline void page_remote_refcount_up( xptr_t page_xp ) 130 { 131 cxy_t page_cxy = GET_CXY( page_xp ); 132 page_t * page_ptr = GET_PTR( page_xp ); 133 134 hal_remote_atomic_add( XPTR( page_cxy , &page_ptr->refcount ) , 1 ); 135 } 136 137 /////////////////////////////////////////////////////// 138 inline void page_remote_refcount_down( xptr_t page_xp ) 139 { 140 cxy_t page_cxy = GET_CXY( page_xp ); 141 page_t * page_ptr = GET_PTR( page_xp ); 142 143 hal_remote_atomic_add( XPTR( page_cxy , &page_ptr->refcount ) , -1 ); 144 } 145 146 /////////////////////////////////////////// 147 void page_remote_display( xptr_t page_xp ) 148 { 149 page_t page; // local copy of page decriptor 150 151 hal_remote_memcpy( XPTR( local_cxy , &page ) , page_xp , sizeof( page_t ) ); 152 153 printk("*** Page %d in cluster %x : ppn %x / flags %x / order %d / refcount %d\n", 154 page.index, 155 GET_CXY( page_xp ), 156 ppm_page2ppn( page_xp ), 157 page.flags, 158 page.order, 159 page.refcount ); 160 } 161 162 163 -
trunk/kernel/mm/page.h
r625 r632 3 3 * 4 4 * Authors Ghassan Almalles (2008,2009,2010,2011,2012) 5 * Alain Greiner (2016 )5 * Alain Greiner (2016,2017,2018,2019) 6 6 * 7 7 * Copyright (c) UPMC Sorbonne Universites … … 68 68 69 69 /************************************************************************************* 70 * This function initializes one page descriptor. 70 * This function must be called by a thread running in the local cluster. 71 * It initializes the page descriptor. 71 72 ************************************************************************************* 72 73 * @ page : pointer to page descriptor … … 75 76 76 77 /************************************************************************************* 77 * This function atomically set one or several flags in page descriptor flags. 78 * This function must be called by a thread running in the local cluster. 79 * It atomically set one or several flags in page descriptor flags. 78 80 ************************************************************************************* 79 81 * @ page : pointer to page descriptor. … … 84 86 85 87 /************************************************************************************* 86 * This function atomically reset one or several flags in page descriptor flags. 88 * This function must be called by a thread running in the local cluster. 89 * It atomically reset one or several flags in page descriptor flags. 87 90 ************************************************************************************* 88 91 * @ page : pointer to page descriptor. … … 93 96 94 97 /************************************************************************************* 95 * This function tests the value of one or several flags in page descriptor flags. 98 * This function must be called by a thread running in the local cluster. 99 * It tests the value of one or several flags in page descriptor flags. 96 100 ************************************************************************************* 97 101 * @ page : pointer to page descriptor. … … 103 107 104 108 /************************************************************************************* 105 * This function resets to 0 all bytes in a given page. 109 * This function must be called by a thread running in the local cluster. 110 * It resets to 0 all bytes in a given page. 106 111 ************************************************************************************* 107 112 * @ page : pointer on page descriptor. … … 110 115 111 116 /************************************************************************************* 112 * This blocking function atomically increments the page refcount. 117 * This function must be called by a thread running in the local cluster. 118 * It atomically increments the page refcount. 113 119 ************************************************************************************* 114 120 * @ page : pointer on page descriptor. … … 117 123 118 124 /************************************************************************************* 119 * This blocking function atomically decrements the page refcount. 125 * This function must be called by a thread running in the local cluster. 126 * It atomically decrements the page refcount. 120 127 ************************************************************************************* 121 128 * @ page : pointer on page descriptor. … … 123 130 inline void page_refcount_down( page_t * page ); 124 131 125 /*************************************************************************************126 * This function display the values contained in a page descriptor.127 *************************************************************************************128 * @ page : pointer on page descriptor.129 ************************************************************************************/130 void page_print( page_t * page );131 132 132 133 134 135 /************************************************************************************* 136 * This function can be called by any thread running in any cluster. 137 * It atomically set one or several flags in a remote page descriptor 138 * identified by the <page_xp> argument. 139 ************************************************************************************* 140 * @ page_xp : extended pointer to page descriptor. 141 * @ value : all non zero bits in value will be set. 142 ************************************************************************************/ 143 inline void page_remote_set_flag( xptr_t page_xp, 144 uint32_t value ); 145 146 /************************************************************************************* 147 * This function can be called by any thread running in any cluster. 148 * It atomically reset one or several flags in a remote page descriptor 149 * identified by the <page_xp> argument. 150 ************************************************************************************* 151 * @ page_xp : extended pointer to page descriptor. 152 * @ value : all non zero bits in value will be cleared. 153 ************************************************************************************/ 154 inline void page_remote_clear_flag( xptr_t page_xp, 155 uint32_t value ); 156 157 /************************************************************************************* 158 * This function can be called by any thread running in any cluster. 159 * It tests the value of one or several flags in a remote page descriptor 160 * identified by the <page_xp> argument. 161 ************************************************************************************* 162 * @ page_xp : extended pointer to page descriptor. 163 * @ value : all non zero bits will be tested. 164 * @ returns true if at least one non zero bit in value is set / false otherwise. 165 ************************************************************************************/ 166 inline bool_t page_remote_is_flag( xptr_t page_xp, 167 uint32_t value ); 168 169 /************************************************************************************* 170 * This function can be called by any thread running in any cluster. 171 * It atomically increments the refcount for the remote page identified by 172 * the <page_xp> argument. 173 ************************************************************************************* 174 * @ page_xp : extended pointer on page descriptor. 175 ************************************************************************************/ 176 inline void page_remote_refcount_up( xptr_t page_xp ); 177 178 /************************************************************************************* 179 * This function can be called by any thread running in any cluster. 180 * It atomically decrements the refcount for the remote page identified by 181 * the <page_xp> argument. 182 ************************************************************************************* 183 * @ page_xp : extended pointer on page descriptor. 184 ************************************************************************************/ 185 inline void page_remote_refcount_down( xptr_t page_xp ); 186 187 /************************************************************************************* 188 * This debug function can be called by any thread running in any cluster. 189 * It displays the values contained in a page descriptor. 190 ************************************************************************************* 191 * @ page_xp : extended pointer on page descriptor. 192 ************************************************************************************/ 193 void page_remote_display( xptr_t page_xp ); 194 133 195 #endif /* _PAGE_H_ */ -
trunk/kernel/mm/ppm.c
r625 r632 3 3 * 4 4 * Authors Ghassan Almaless (2008,2009,2010,2011,2012) 5 * Alain Greiner (2016,2017,2018 )5 * Alain Greiner (2016,2017,2018,2019) 6 6 * 7 7 * Copyright (c) UPMC Sorbonne Universites … … 45 45 //////////////////////////////////////////////////////////////////////////////////////// 46 46 47 ////////////////////////////////////////////////48 inline bool_t ppm_page_is_valid( page_t * page )49 {50 ppm_t * ppm = &LOCAL_CLUSTER->ppm;51 uint32_t pgnr = (uint32_t)( page - ppm->pages_tbl );52 return (pgnr <= ppm->pages_nr);53 }54 47 55 48 ///////////////////////////////////////////// … … 151 144 void ppm_free_pages_nolock( page_t * page ) 152 145 { 153 page_t * buddy; // searched buddy page descriptor154 uint32_t buddy_index; // buddy page index155 page_t * current; // current (merged) page descriptor156 uint32_t current_index; // current (merged) page index157 uint32_t current_order; // current (merged) pageorder146 page_t * buddy; // searched buddy block page descriptor 147 uint32_t buddy_index; // buddy bloc index in page_tbl[] 148 page_t * current; // current (merged) block page descriptor 149 uint32_t current_index; // current (merged) block index in page_tbl[] 150 uint32_t current_order; // current (merged) block order 158 151 159 152 ppm_t * ppm = &LOCAL_CLUSTER->ppm; 160 153 page_t * pages_tbl = ppm->pages_tbl; 161 154 162 163 164 165 166 155 assert( !page_is_flag( page , PG_FREE ) , 156 "page already released : ppn = %x\n" , ppm_page2ppn(XPTR(local_cxy,page)) ); 157 158 assert( !page_is_flag( page , PG_RESERVED ) , 159 "reserved page : ppn = %x\n" , ppm_page2ppn(XPTR(local_cxy,page)) ); 167 160 168 161 // update released page descriptor flags … … 172 165 // - merge with current page descriptor if found 173 166 // - exit to release the current page descriptor if not found 174 current = page ,167 current = page; 175 168 current_index = (uint32_t)(page - ppm->pages_tbl); 176 169 for( current_order = page->order ; … … 181 174 buddy = pages_tbl + buddy_index; 182 175 183 if( !page_is_flag( buddy , PG_FREE ) || (buddy->order != current_order) ) break; 184 185 // remove buddy from free list 176 // exit this loop if buddy block not found 177 if( !page_is_flag( buddy , PG_FREE ) || 178 (buddy->order != current_order) ) break; 179 180 // remove buddy block from free_list 186 181 list_unlink( &buddy->list ); 187 182 ppm->free_pages_nr[current_order] --; 188 183 189 // merge buddy with current 184 // reset order field in buddy block page descriptor 190 185 buddy->order = 0; 186 187 // compute merged block index in page_tbl[] 191 188 current_index &= buddy_index; 192 189 } 193 190 194 // update merged page descriptor order191 // update pointer and order field for merged block page descriptor 195 192 current = pages_tbl + current_index; 196 193 current->order = current_order; 197 194 198 // insert currentin free list195 // insert merged block in free list 199 196 list_add_first( &ppm->free_pages_root[current_order] , ¤t->list ); 200 197 ppm->free_pages_nr[current_order] ++; … … 205 202 page_t * ppm_alloc_pages( uint32_t order ) 206 203 { 204 page_t * current_block; 207 205 uint32_t current_order; 208 page_t * remaining_block;209 206 uint32_t current_size; 207 page_t * found_block; 210 208 211 209 #if DEBUG_PPM_ALLOC_PAGES … … 213 211 uint32_t cycle = (uint32_t)hal_get_cycles(); 214 212 if( DEBUG_PPM_ALLOC_PAGES < cycle ) 215 printk("\n[%s] thread[%x,%x] enter for %d page(s) / cycle %d\n",216 __FUNCTION__, this->process->pid, this->trdid, 1<<order, c ycle );213 printk("\n[%s] thread[%x,%x] enter for %d page(s) in cluster %x / cycle %d\n", 214 __FUNCTION__, this->process->pid, this->trdid, 1<<order, cxy, cycle ); 217 215 #endif 218 216 219 217 #if(DEBUG_PPM_ALLOC_PAGES & 0x1) 220 218 if( DEBUG_PPM_ALLOC_PAGES < cycle ) 221 ppm_ print("enter ppm_alloc_pages");219 ppm_remote_display( local_cxy ); 222 220 #endif 223 221 … … 227 225 assert( (order < CONFIG_PPM_MAX_ORDER) , "illegal order argument = %d\n" , order ); 228 226 229 page_t * block = NULL; 227 //build extended pointer on lock protecting remote PPM 228 xptr_t lock_xp = XPTR( local_cxy , &ppm->free_lock ); 230 229 231 230 // take lock protecting free lists 232 busylock_acquire( &ppm->free_lock ); 231 remote_busylock_acquire( lock_xp ); 232 233 current_block = NULL; 233 234 234 235 // find a free block equal or larger to requested size … … 237 238 if( !list_is_empty( &ppm->free_pages_root[current_order] ) ) 238 239 { 239 block = LIST_FIRST( &ppm->free_pages_root[current_order] , page_t , list ); 240 list_unlink( &block->list ); 241 break; 240 // get first free block in this free_list 241 current_block = LIST_FIRST( &ppm->free_pages_root[current_order] , page_t , list ); 242 243 // remove this block from this free_list 244 list_unlink( ¤t_block->list ); 245 246 // register pointer on found block 247 found_block = current_block; 248 249 // update this free-list number of blocks 250 ppm->free_pages_nr[current_order] --; 251 252 // compute found block size 253 current_size = (1 << current_order); 254 255 break; 242 256 } 243 257 } 244 258 245 if( block == NULL ) // return failure259 if( current_block == NULL ) // return failure if no free block found 246 260 { 247 261 // release lock protecting free lists 248 busylock_release( &ppm->free_lock);262 remote_busylock_release( lock_xp ); 249 263 250 264 #if DEBUG_PPM_ALLOC_PAGES 251 265 cycle = (uint32_t)hal_get_cycles(); 252 266 if( DEBUG_PPM_ALLOC_PAGES < cycle ) 253 printk("\n[%s] thread[%x,%x] cannot allocate %d page(s) / cycle %d\n",254 __FUNCTION__, this->process->pid, this->trdid, 1<<order, c ycle );267 printk("\n[%s] thread[%x,%x] cannot allocate %d page(s) in cluster %x / cycle %d\n", 268 __FUNCTION__, this->process->pid, this->trdid, 1<<order, cxy, cycle ); 255 269 #endif 256 270 … … 258 272 } 259 273 260 // update free-lists after removing a block 261 ppm->free_pages_nr[current_order] --; 262 current_size = (1 << current_order); 263 264 // split the removed block in smaller sub-blocks if required 274 275 // split the found block in smaller sub-blocks if required 265 276 // and update the free-lists accordingly 266 277 while( current_order > order ) 267 278 { 268 279 current_order --; 280 281 // update pointer, size, and order fiels for new free block 269 282 current_size >>= 1; 270 271 remaining_block = block + current_size; 272 remaining_block->order = current_order; 273 274 list_add_first( &ppm->free_pages_root[current_order] , &remaining_block->list ); 283 current_block = found_block + current_size; 284 current_block->order = current_order; 285 286 // insert new free block in relevant free_list 287 list_add_first( &ppm->free_pages_root[current_order] , ¤t_block->list ); 288 289 // update number of blocks in free list 275 290 ppm->free_pages_nr[current_order] ++; 276 291 } 277 292 278 // update page descriptor279 page_clear_flag( block , PG_FREE );280 page_refcount_up( block );281 block->order = order;293 // update found block page descriptor 294 page_clear_flag( found_block , PG_FREE ); 295 page_refcount_up( found_block ); 296 found_block->order = order; 282 297 283 298 // release lock protecting free lists 284 busylock_release( &ppm->free_lock);299 remote_busylock_release( lock_xp ); 285 300 286 301 // update DQDT 287 dqdt_increment_pages( order );302 dqdt_increment_pages( local_cxy , order ); 288 303 289 304 #if DEBUG_PPM_ALLOC_PAGES 290 305 cycle = (uint32_t)hal_get_cycles(); 291 306 if( DEBUG_PPM_ALLOC_PAGES < cycle ) 292 printk("\n[%s] thread[%x,%x] exit for %d page(s) / ppn = %x / cycle %d\n",307 printk("\n[%s] thread[%x,%x] exit for %d page(s) in cluster %x / ppn = %x / cycle %d\n", 293 308 __FUNCTION__, this->process->pid, this->trdid, 294 1<<order, ppm_page2ppn(XPTR( local_cxy , block )), cycle );309 1<<order, ppm_page2ppn(XPTR( local_cxy , found_block )), cxy, cycle ); 295 310 #endif 296 311 297 312 #if(DEBUG_PPM_ALLOC_PAGES & 0x1) 298 313 if( DEBUG_PPM_ALLOC_PAGES < cycle ) 299 ppm_ print("exit ppm_alloc_pages");300 #endif 301 302 return block;314 ppm_remote_display( local_cxy ); 315 #endif 316 317 return found_block; 303 318 304 319 } // end ppm_alloc_pages() … … 311 326 312 327 #if DEBUG_PPM_FREE_PAGES 313 uint32_t cycle = (uint32_t)hal_get_cycles(); 328 thread_t * this = CURRENT_THREAD; 329 uint32_t cycle = (uint32_t)hal_get_cycles(); 314 330 if( DEBUG_PPM_FREE_PAGES < cycle ) 315 printk("\n[%s] thread[%x,%x] enter for %d page(s) / ppn %x / cycle %d\n",331 printk("\n[%s] thread[%x,%x] enter for %d page(s) in cluster %x / ppn %x / cycle %d\n", 316 332 __FUNCTION__, this->process->pid, this->trdid, 317 1<<page->order, ppm_page2ppn(XPTR(local_cxy , page)), cycle );333 1<<page->order, local_cxy, ppm_page2ppn(XPTR(local_cxy , page)), cycle ); 318 334 #endif 319 335 320 336 #if(DEBUG_PPM_FREE_PAGES & 0x1) 321 337 if( DEBUG_PPM_FREE_PAGES < cycle ) 322 ppm_print("enter ppm_free_pages"); 323 #endif 338 ppm_remote_display( local_cxy ); 339 #endif 340 341 //build extended pointer on lock protecting free_lists 342 xptr_t lock_xp = XPTR( local_cxy , &ppm->free_lock ); 324 343 325 344 // get lock protecting free_pages[] array 326 busylock_acquire( &ppm->free_lock);345 remote_busylock_acquire( lock_xp ); 327 346 328 347 ppm_free_pages_nolock( page ); 329 348 330 // release lock protecting free_ pages[] array331 busylock_release( &ppm->free_lock);349 // release lock protecting free_lists 350 remote_busylock_release( lock_xp ); 332 351 333 352 // update DQDT 334 dqdt_decrement_pages( page->order );353 dqdt_decrement_pages( local_cxy , page->order ); 335 354 336 355 #if DEBUG_PPM_FREE_PAGES 337 356 cycle = (uint32_t)hal_get_cycles(); 338 357 if( DEBUG_PPM_FREE_PAGES < cycle ) 339 printk("\n[%s] thread[%x,%x] exit for %d page(s) / ppn %x / cycle %d\n",358 printk("\n[%s] thread[%x,%x] exit for %d page(s) in cluster %x / ppn %x / cycle %d\n", 340 359 __FUNCTION__, this->process->pid, this->trdid, 341 1<<page->order, ppm_page2ppn(XPTR(local_cxy , page)), cycle );360 1<<page->order, local_cxy, ppm_page2ppn(XPTR(local_cxy , page)) , cycle ); 342 361 #endif 343 362 344 363 #if(DEBUG_PPM_FREE_PAGES & 0x1) 345 364 if( DEBUG_PPM_FREE_PAGES < cycle ) 346 ppm_ print("exit ppm_free_pages");365 ppm_remote_display( local_cxy ); 347 366 #endif 348 367 349 368 } // end ppm_free_pages() 350 369 351 //////////////////////// 352 void ppm_display( void ) 370 ///////////////////////////////////////////// 371 xptr_t ppm_remote_alloc_pages( cxy_t cxy, 372 uint32_t order ) 373 { 374 uint32_t current_order; 375 uint32_t current_size; 376 page_t * current_block; 377 page_t * found_block; 378 379 #if DEBUG_PPM_ALLOC_PAGES 380 thread_t * this = CURRENT_THREAD; 381 uint32_t cycle = (uint32_t)hal_get_cycles(); 382 if( DEBUG_PPM_ALLOC_PAGES < cycle ) 383 printk("\n[%s] thread[%x,%x] enter for %d small page(s) in cluster %x / cycle %d\n", 384 __FUNCTION__, this->process->pid, this->trdid, 1<<order, cxy, cycle ); 385 #endif 386 387 #if(DEBUG_PPM_ALLOC_PAGES & 0x1) 388 if( DEBUG_PPM_ALLOC_PAGES < cycle ) 389 ppm_remote_display( cxy ); 390 #endif 391 392 // check order 393 assert( (order < CONFIG_PPM_MAX_ORDER) , "illegal order argument = %d\n" , order ); 394 395 // get local pointer on PPM (same in all clusters) 396 ppm_t * ppm = &LOCAL_CLUSTER->ppm; 397 398 //build extended pointer on lock protecting remote PPM 399 xptr_t lock_xp = XPTR( cxy , &ppm->free_lock ); 400 401 // take lock protecting free lists in remote cluster 402 remote_busylock_acquire( lock_xp ); 403 404 current_block = NULL; 405 406 // find in remote cluster a free block equal or larger to requested size 407 for( current_order = order ; current_order < CONFIG_PPM_MAX_ORDER ; current_order ++ ) 408 { 409 // get local pointer on the root of relevant free_list in remote cluster 410 list_entry_t * root = &ppm->free_pages_root[current_order]; 411 412 if( !list_remote_is_empty( cxy , root ) ) 413 { 414 // get local pointer on first free page descriptor in remote cluster 415 current_block = LIST_REMOTE_FIRST( cxy, root , page_t , list ); 416 417 // remove first free page from the free-list in remote cluster 418 list_remote_unlink( cxy , ¤t_block->list ); 419 420 // register found block 421 found_block = current_block; 422 423 // decrement relevant free-list number of items in remote cluster 424 hal_remote_atomic_add( XPTR( cxy , &ppm->free_pages_nr[current_order] ), -1 ); 425 426 // compute found block size 427 current_size = (1 << current_order); 428 429 break; 430 } 431 } 432 433 if( current_block == NULL ) // return failure 434 { 435 // release lock protecting free lists 436 remote_busylock_release( lock_xp ); 437 438 #if DEBUG_PPM_ALLOC_PAGES 439 cycle = (uint32_t)hal_get_cycles(); 440 if( DEBUG_PPM_ALLOC_PAGES < cycle ) 441 printk("\n[%s] thread[%x,%x] cannot allocate %d page(s) in cluster %x / cycle %d\n", 442 __FUNCTION__, this->process->pid, this->trdid, 1<<order, cxy, cycle ); 443 #endif 444 445 return XPTR_NULL; 446 } 447 448 // split the found block in smaller sub-blocks if required 449 // and update the free-lists accordingly in remote cluster 450 while( current_order > order ) 451 { 452 // update order, size, and local pointer for new free block 453 current_order --; 454 current_size >>= 1; 455 current_block = found_block + current_size; 456 457 // update new free block order field in remote cluster 458 hal_remote_s32( XPTR( cxy , ¤t_block->order ) , current_order ); 459 460 // get local pointer on the root of the relevant free_list in remote cluster 461 list_entry_t * root = &ppm->free_pages_root[current_order]; 462 463 // insert new free block in this free_list 464 list_remote_add_first( cxy , root, ¤t_block->list ); 465 466 // update free-list number of items in remote cluster 467 hal_remote_atomic_add( XPTR(cxy , &ppm->free_pages_nr[current_order]), 1 ); 468 } 469 470 // update refcount, flags and order fields in found block remote page descriptor 471 page_remote_clear_flag( XPTR( cxy , found_block ), PG_FREE ); 472 page_remote_refcount_up( XPTR( cxy , found_block ) ); 473 hal_remote_s32( XPTR( cxy , &found_block->order ) , order ); 474 475 // release lock protecting free lists in remote cluster 476 remote_busylock_release( lock_xp ); 477 478 // update DQDT page counter in remote cluster 479 dqdt_increment_pages( cxy , order ); 480 481 #if DEBUG_PPM_ALLOC_PAGES 482 cycle = (uint32_t)hal_get_cycles(); 483 if( DEBUG_PPM_ALLOC_PAGES < cycle ) 484 printk("\n[%s] thread[%x,%x] exit for %d page(s) / ppn = %x in cluster %x / cycle %d\n", 485 __FUNCTION__, this->process->pid, this->trdid, 486 1<<order, ppm_page2ppn(XPTR( local_cxy , found_block )), cxy, cycle ); 487 #endif 488 489 #if(DEBUG_PPM_ALLOC_PAGES & 0x1) 490 if( DEBUG_PPM_ALLOC_PAGES < cycle ) 491 ppm_remote_display( cxy ); 492 #endif 493 494 return XPTR( cxy , found_block ); 495 496 } // end ppm_remote_alloc_pages() 497 498 ////////////////////////////////////////// 499 void ppm_remote_free_pages( cxy_t cxy, 500 page_t * page ) 501 { 502 xptr_t page_xp; // extended pointer on released page descriptor 503 uint32_t order; // released block order 504 page_t * buddy_ptr; // searched buddy block page descriptor 505 uint32_t buddy_order; // searched buddy block order 506 uint32_t buddy_index; // buddy block index in page_tbl[] 507 page_t * current_ptr; // current (merged) block page descriptor 508 uint32_t current_index; // current (merged) block index in page_tbl[] 509 uint32_t current_order; // current (merged) block order 510 511 #if DEBUG_PPM_FREE_PAGES 512 thread_t * this = CURRENT_THREAD; 513 uint32_t cycle = (uint32_t)hal_get_cycles(); 514 if( DEBUG_PPM_FREE_PAGES < cycle ) 515 printk("\n[%s] thread[%x,%x] enter for %d page(s) in cluster %x / ppn %x / cycle %d\n", 516 __FUNCTION__, this->process->pid, this->trdid, 517 1<<page->order, cxy, ppm_page2ppn(XPTR(cxy , page)), cycle ); 518 #endif 519 520 #if(DEBUG_PPM_FREE_PAGES & 0x1) 521 if( DEBUG_PPM_FREE_PAGES < cycle ) 522 ppm_remote_display( cxy ); 523 #endif 524 525 // build extended pointer on released page descriptor 526 page_xp = XPTR( cxy , page ); 527 528 // get released page order 529 order = hal_remote_l32( XPTR( cxy , &page->order ) ); 530 531 // get local pointer on PPM (same in all clusters) 532 ppm_t * ppm = &LOCAL_CLUSTER->ppm; 533 534 // build extended pointer on lock protecting remote PPM 535 xptr_t lock_xp = XPTR( cxy , &ppm->free_lock ); 536 537 // get local pointer on remote PPM page_tbl[] array 538 page_t * pages_tbl = hal_remote_lpt( XPTR( cxy , &ppm->pages_tbl ) ); 539 540 // get lock protecting free_pages in remote cluster 541 remote_busylock_acquire( lock_xp ); 542 543 assert( !page_remote_is_flag( page_xp , PG_FREE ) , 544 "page already released : ppn = %x\n" , ppm_page2ppn(XPTR(local_cxy,page)) ); 545 546 assert( !page_remote_is_flag( page_xp , PG_RESERVED ) , 547 "reserved page : ppn = %x\n" , ppm_page2ppn(XPTR(local_cxy,page)) ); 548 549 // update released page descriptor flags 550 page_remote_set_flag( page_xp , PG_FREE ); 551 552 // search the buddy page descriptor 553 // - merge with current page descriptor if found 554 // - exit to release the current page descriptor if not found 555 current_ptr = page; 556 current_index = (uint32_t)(page - ppm->pages_tbl); 557 for( current_order = order ; 558 current_order < CONFIG_PPM_MAX_ORDER ; 559 current_order++ ) 560 { 561 buddy_index = current_index ^ (1 << current_order); 562 buddy_ptr = pages_tbl + buddy_index; 563 564 // get buddy block order 565 buddy_order = hal_remote_l32( XPTR( cxy , &buddy_ptr->order ) ); 566 567 // exit loop if buddy block not found 568 if( !page_remote_is_flag( XPTR( cxy , buddy_ptr ) , PG_FREE ) || 569 (buddy_order != current_order) ) break; 570 571 // remove buddy from free list in remote cluster 572 list_remote_unlink( cxy , &buddy_ptr->list ); 573 hal_remote_atomic_add( XPTR( cxy , &ppm->free_pages_nr[current_order] ) , -1 ); 574 575 // reset order field in buddy block page descriptor 576 hal_remote_s32( XPTR( cxy , &buddy_ptr->order ) , 0 ); 577 578 // compute merged block index in page_tbl[] array 579 current_index &= buddy_index; 580 } 581 582 // update merged page descriptor order field 583 current_ptr = pages_tbl + current_index; 584 hal_remote_s32( XPTR( cxy , ¤t_ptr->order ) , current_order ); 585 586 // insert merged block into relevant free list in remote cluster 587 list_remote_add_first( cxy , &ppm->free_pages_root[current_order] , ¤t_ptr->list ); 588 hal_remote_atomic_add( XPTR( cxy , &ppm->free_pages_nr[current_order] ) , 1 ); 589 590 // release lock protecting free_pages[] array 591 remote_busylock_release( lock_xp ); 592 593 // update DQDT 594 dqdt_decrement_pages( cxy , page->order ); 595 596 #if DEBUG_PPM_FREE_PAGES 597 cycle = (uint32_t)hal_get_cycles(); 598 if( DEBUG_PPM_FREE_PAGES < cycle ) 599 printk("\n[%s] thread[%x,%x] exit for %d page(s) in cluster %x / ppn %x / cycle %d\n", 600 __FUNCTION__, this->process->pid, this->trdid, 601 1<<page->order, cxy, ppm_page2ppn(XPTR(cxy , page)), cycle ); 602 #endif 603 604 #if(DEBUG_PPM_FREE_PAGES & 0x1) 605 if( DEBUG_PPM_FREE_PAGES < cycle ) 606 ppm_remote_display( cxy ); 607 #endif 608 609 } // end ppm_remote_free_pages() 610 611 //////////////////////////////////// 612 void ppm_remote_display( cxy_t cxy ) 353 613 { 354 614 uint32_t order; … … 358 618 ppm_t * ppm = &LOCAL_CLUSTER->ppm; 359 619 360 // get lock protecting free lists 361 busylock_acquire( &ppm->free_lock ); 620 // build extended pointer on lock protecting remote PPM 621 xptr_t lock_xp = XPTR( cxy , &ppm->free_lock ); 622 623 // get lock protecting free lists in remote cluster 624 remote_busylock_acquire( lock_xp ); 362 625 363 626 printk("\n***** PPM in cluster %x / %d pages\n", local_cxy , ppm->pages_nr ); … … 365 628 for( order = 0 ; order < CONFIG_PPM_MAX_ORDER ; order++ ) 366 629 { 367 printk("- order = %d / free_pages = %d\t: ", 368 order , ppm->free_pages_nr[order] ); 369 370 LIST_FOREACH( &ppm->free_pages_root[order] , iter ) 630 // get number of free pages for free_list[order] in remote cluster 631 uint32_t n = hal_remote_l32( XPTR( cxy , &ppm->free_pages_nr[order] ) ); 632 printk("- order = %d / free_pages = %d\t: ", order , n ); 633 634 LIST_REMOTE_FOREACH( cxy , &ppm->free_pages_root[order] , iter ) 371 635 { 372 636 page = LIST_ELEMENT( iter , page_t , list ); … … 377 641 } 378 642 379 // release lock protecting free lists 380 busylock_release( &ppm->free_lock);643 // release lock protecting free lists in remote cluster 644 remote_busylock_release( lock_xp ); 381 645 } 382 646 383 //////////////////////////////// ///////384 error_t ppm_assert_order( ppm_t * ppm)647 //////////////////////////////// 648 error_t ppm_assert_order( void ) 385 649 { 386 650 uint32_t order; 387 651 list_entry_t * iter; 388 652 page_t * page; 653 654 ppm_t * ppm = &LOCAL_CLUSTER->ppm; 389 655 390 656 for( order=0 ; order < CONFIG_PPM_MAX_ORDER ; order++ ) … … 438 704 hal_remote_s32( page_flags_xp , flags | PG_DIRTY ); 439 705 440 // The PPM dirty list is a LOCAL list !!! 441 // We must update 4 pointers to insert a new page in this list. 442 // We can use the standard LIST API when the page is local, 443 // but we cannot use the standard API if the page is remote... 444 445 if( page_cxy == local_cxy ) // locally update the PPM dirty list 446 { 447 list_add_first( &ppm->dirty_root , &page_ptr->list ); 448 } 449 else // remotely update the PPM dirty list 450 { 451 // get local and remote pointers on "root" list entry 452 list_entry_t * root = &ppm->dirty_root; 453 xptr_t root_xp = XPTR( page_cxy , root ); 454 455 // get local and remote pointers on "page" list entry 456 list_entry_t * list = &page_ptr->list; 457 xptr_t list_xp = XPTR( page_cxy , list ); 458 459 // get local and remote pointers on first dirty page 460 list_entry_t * dirt = hal_remote_lpt( XPTR( page_cxy, &root->next ) ); 461 xptr_t dirt_xp = XPTR( page_cxy , dirt ); 462 463 // set root.next, list.next, list pred, curr.pred in remote cluster 464 hal_remote_spt( root_xp , list ); 465 hal_remote_spt( list_xp , dirt ); 466 hal_remote_spt( list_xp + sizeof(intptr_t) , root ); 467 hal_remote_spt( dirt_xp + sizeof(intptr_t) , list ); 468 } 706 // insert the page in the remote dirty list 707 list_remote_add_first( page_cxy , &ppm->dirty_root , &page_ptr->list ); 469 708 470 709 done = true; … … 512 751 hal_remote_s32( page_flags_xp , flags & (~PG_DIRTY) ); 513 752 514 // The PPM dirty list is a LOCAL list !!! 515 // We must update 4 pointers to remove a page from this list. 516 // we can use the standard LIST API when the page is local, 517 // but we cannot use the standard API if the page is remote... 518 519 if( page_cxy == local_cxy ) // locally update the PPM dirty list 520 { 521 list_unlink( &page_ptr->list ); 522 } 523 else // remotely update the PPM dirty list 524 { 525 // get local and remote pointers on "page" list entry 526 list_entry_t * list = &page_ptr->list; 527 xptr_t list_xp = XPTR( page_cxy , list ); 528 529 // get local and remote pointers on "next" page list entry 530 list_entry_t * next = hal_remote_lpt( list_xp ); 531 xptr_t next_xp = XPTR( page_cxy , next ); 532 533 // get local and remote pointers on "pred" page list entry 534 list_entry_t * pred = hal_remote_lpt( list_xp + sizeof(intptr_t) ); 535 xptr_t pred_xp = XPTR( page_cxy , pred ); 536 537 // set root.next, list.next, list pred, curr.pred in remote cluster 538 hal_remote_spt( pred_xp , next ); 539 hal_remote_spt( list_xp , NULL ); 540 hal_remote_spt( list_xp + sizeof(intptr_t) , NULL ); 541 hal_remote_spt( next_xp + sizeof(intptr_t) , pred ); 542 } 753 // remove the page from remote dirty list 754 list_remote_unlink( page_cxy , &page_ptr->list ); 543 755 544 756 done = true; -
trunk/kernel/mm/ppm.h
r625 r632 51 51 * 52 52 * The main service provided by the PMM is the dynamic allocation of physical pages 53 * from the "kernel_heap" section. This low-level allocator implements the buddy53 * from the "kernel_heap" section. This low-level allocator implements the "buddy" 54 54 * algorithm: an allocated block is an integer number n of small pages, where n 55 * is a power of 2, and ln(n) is called order. 56 * This allocator being shared by the local threads, the free_page lists rooted 57 * in the PPM descriptor are protected by a local busylock, because it is used 58 * by the idle_thread during kernel_init(). 59 * 60 * Another service is to register the dirty pages in a specific dirty_list, that is 55 * is a power of 2, and ln(n) is called order. The free_pages_root[] array contains 56 * the roots ot the local lists of free pages for different sizes, as required by 57 * the "buddy" algorithm. 58 * The local threads can access these free_lists by calling the ppm_alloc_pages() and 59 * ppm_free_page() functions, but the remote threads can access the same free lists, 60 * by calling the ppm_remote_alloc_pages() and ppm_remote_free_pages functions. 61 * Therefore, these free lists are protected by a remote_busy_lock. 62 * 63 * Another service is to register the dirty pages in a specific local dirty_list, 61 64 * also rooted in the PPM, in order to be able to synchronize all dirty pages on disk. 62 65 * This dirty list is protected by a specific remote_queuelock, because it can be 63 * modified by a remote thread , but it contains only local pages.66 * modified by a remote thread. 64 67 ****************************************************************************************/ 65 68 66 69 typedef struct ppm_s 67 70 { 68 busylock_tfree_lock; /*! lock protecting free_pages[] lists */71 remote_busylock_t free_lock; /*! lock protecting free_pages[] lists */ 69 72 list_entry_t free_pages_root[CONFIG_PPM_MAX_ORDER]; /*! roots of free lists */ 70 73 uint32_t free_pages_nr[CONFIG_PPM_MAX_ORDER]; /*! free pages number */ … … 80 83 81 84 /***************************************************************************************** 82 * This is the low-level physical pages allocation function. 83 * It allocates N contiguous physical pages. N is a power of 2. 84 * In normal use, it should not be called directly, as the recommended way to get 85 * physical pages is to call the generic allocator defined in kmem.h. 86 ***************************************************************************************** 87 * @ order : ln2( number of 4 Kbytes pages) 88 * @ returns a pointer on the page descriptor if success / NULL otherwise 85 * This local allocator must be called by a thread running in local cluster. 86 * It allocates n contiguous physical 4 Kbytes pages from the local cluster, where 87 * n is a power of 2 defined by the <order> argument. 88 * In normal use, it should not be called directly, as the recommended way to allocate 89 * physical pages is to call the generic allocator defined in kmem.h. 90 ***************************************************************************************** 91 * @ order : ln2( number of 4 Kbytes pages) 92 * @ returns a local pointer on the page descriptor if success / NULL if error. 89 93 ****************************************************************************************/ 90 94 page_t * ppm_alloc_pages( uint32_t order ); 91 95 92 96 /***************************************************************************************** 93 * This is the low-level physical pages release function. It takes the lock protecting 94 * the free_list before register the released page in the relevant free_list. 97 * This function must be called by a thread running in local cluster to release 98 * physical pages. It takes the lock protecting the free_lists before register the 99 * released page in the relevant free_list. 95 100 * In normal use, you do not need to call it directly, as the recommended way to free 96 101 * physical pages is to call the generic allocator defined in kmem.h. 97 102 ***************************************************************************************** 98 * @ page : pointer tothe page descriptor to be released103 * @ page : local pointer on the page descriptor to be released 99 104 ****************************************************************************************/ 100 105 void ppm_free_pages( page_t * page ); … … 105 110 * there is no concurrent access issue. 106 111 ***************************************************************************************** 107 * @ page : pointer tothe page descriptor to be released112 * @ page : local pointer on the page descriptor to be released 108 113 ****************************************************************************************/ 109 114 void ppm_free_pages_nolock( page_t * page ); 110 115 111 116 /***************************************************************************************** 112 * This function check if a page descriptor pointer is valid. 113 ***************************************************************************************** 114 * @ page : pointer on a page descriptor 115 * @ returns true if valid / false otherwise. 116 ****************************************************************************************/ 117 inline bool_t ppm_page_is_valid( page_t * page ); 117 * This remote allocator can be called by any thread running in any cluster. 118 * It allocates n contiguous physical 4 Kbytes pages from cluster identified 119 * by the <cxy> argument, where n is a power of 2 defined by the <order> argument. 120 * In normal use, it should not be called directly, as the recommended way to allocate 121 * physical pages is to call the generic allocator defined in kmem.h. 122 ***************************************************************************************** 123 * @ cxy : remote cluster identifier. 124 * @ order : ln2( number of 4 Kbytes pages) 125 * @ returns an extended pointer on the page descriptor if success / XPTR_NULL if error. 126 ****************************************************************************************/ 127 xptr_t ppm_remote_alloc_pages( cxy_t cxy, 128 uint32_t order ); 129 130 /***************************************************************************************** 131 * This function can be called by any thread running in any cluster to release physical 132 * pages to a remote cluster. It takes the lock protecting the free_list before register 133 * the released page in the relevant free_list. 134 * In normal use, you do not need to call it directly, as the recommended way to free 135 * physical pages is to call the generic allocator defined in kmem.h. 136 ***************************************************************************************** 137 * @ cxy : remote cluster identifier. 138 * @ page : local pointer on the page descriptor to be released in remote cluster. 139 ****************************************************************************************/ 140 void ppm_remote_free_pages( cxy_t cxy, 141 page_t * page ); 142 143 /***************************************************************************************** 144 * This debug function can be called by any thread running in any cluster to display 145 * the current PPM state of a remote cluster. 146 ***************************************************************************************** 147 * @ cxy : remote cluster identifier. 148 ****************************************************************************************/ 149 void ppm_remote_display( cxy_t cxy ); 118 150 119 151 … … 172 204 173 205 /***************************************************************************************** 174 * This function prints the PPM allocator status in the calling thread cluster. 175 ***************************************************************************************** 176 * string : character string printed in header 177 ****************************************************************************************/ 178 void ppm_display( void ); 179 180 /***************************************************************************************** 181 * This function checks PPM allocator consistency. 182 ***************************************************************************************** 183 * @ ppm : pointer on PPM allocator. 206 * This function can be called by any thread running in any cluster. 207 * It displays the PPM allocator status in cluster identified by the <cxy> argument. 208 ***************************************************************************************** 209 * @ cxy : remote cluster 210 ****************************************************************************************/ 211 void ppm_remote_display( cxy_t cxy ); 212 213 /***************************************************************************************** 214 * This function must be called by a thread running in local cluster. 215 * It checks the consistency of the local PPM allocator. 216 ***************************************************************************************** 184 217 * @ return 0 if PPM is OK / return -1 if PPM not consistent. 185 218 ****************************************************************************************/ 186 error_t ppm_assert_order( ppm_t * ppm);219 error_t ppm_assert_order( void ); 187 220 188 221 -
trunk/kernel/mm/vmm.c
r630 r632 1226 1226 ppn_t ppn; // current PTE ppn value 1227 1227 uint32_t attr; // current PTE attributes 1228 kmem_req_t req; // request to release memory1229 1228 xptr_t page_xp; // extended pointer on page descriptor 1230 1229 cxy_t page_cxy; // page descriptor cluster … … 1335 1334 1336 1335 // release physical page to relevant kmem when required 1337 if( ppn_release ) 1338 { 1339 if( page_cxy == local_cxy ) 1340 { 1341 req.type = KMEM_PAGE; 1342 req.ptr = page_ptr; 1343 kmem_free( &req ); 1344 } 1345 else 1346 { 1347 rpc_pmem_release_pages_client( page_cxy , page_ptr ); 1348 } 1349 } 1336 if( ppn_release ) ppm_remote_free_pages( page_cxy , page_ptr ); 1350 1337 1351 1338 #if( DEBUG_VMM_REMOVE_VSEG & 1 ) … … 1681 1668 ////////////////////////////////////////////////////////////////////////////////////// 1682 1669 // This static function compute the target cluster to allocate a physical page 1683 // for a given <vpn> in a given <vseg>, allocates the page (with an RPC if required) 1684 // and returns an extended pointer on the allocated page descriptor. 1685 // It can be called by a thread running in any cluster. 1670 // for a given <vpn> in a given <vseg>, allocates the page and returns an extended 1671 // pointer on the allocated page descriptor. 1686 1672 // The vseg cannot have the FILE type. 1687 1673 ////////////////////////////////////////////////////////////////////////////////////// … … 1690 1676 { 1691 1677 1692 #if DEBUG_VMM_ ALLOCATE_PAGE1678 #if DEBUG_VMM_PAGE_ALLOCATE 1693 1679 uint32_t cycle = (uint32_t)hal_get_cycles(); 1694 1680 thread_t * this = CURRENT_THREAD; 1695 if( DEBUG_VMM_ ALLOCATE_PAGE < (uint32_t)hal_get_cycles())1681 if( DEBUG_VMM_PAGE_ALLOCATE < cycle ) 1696 1682 printk("\n[%s] thread[%x,%x] enter for vpn %x / cycle %d\n", 1697 1683 __FUNCTION__ , this->process->pid, this->trdid, vpn, cycle ); 1698 1684 #endif 1699 1685 1700 page_t * page_ptr;1686 xptr_t page_xp; 1701 1687 cxy_t page_cxy; 1702 kmem_req_t req;1703 1688 uint32_t index; 1704 1689 … … 1727 1712 } 1728 1713 1729 // allocate a physical page from target cluster 1730 if( page_cxy == local_cxy ) // target cluster is the local cluster 1731 { 1732 req.type = KMEM_PAGE; 1733 req.size = 0; 1734 req.flags = AF_NONE; 1735 page_ptr = (page_t *)kmem_alloc( &req ); 1736 } 1737 else // target cluster is not the local cluster 1738 { 1739 rpc_pmem_get_pages_client( page_cxy , 0 , &page_ptr ); 1740 } 1741 1742 #if DEBUG_VMM_ALLOCATE_PAGE 1714 // allocate a 4 Kbytes physical page from target cluster 1715 page_xp = ppm_remote_alloc_pages( page_cxy , 0 ); 1716 1717 #if DEBUG_VMM_PAGE_ALLOCATE 1743 1718 cycle = (uint32_t)hal_get_cycles(); 1744 if( DEBUG_VMM_ALLOCATE_PAGE < (uint32_t)hal_get_cycles() ) 1745 printk("\n[%s] thread[%x,%x] exit for vpn %x / ppn %x / cycle %d\n", 1746 __FUNCTION__ , this->process->pid, this->trdid, vpn, 1747 ppm_page2ppn( XPTR( page_cxy , page_ptr ) , cycle ); 1748 #endif 1749 1750 if( page_ptr == NULL ) return XPTR_NULL; 1751 else return XPTR( page_cxy , page_ptr ); 1719 if( DEBUG_VMM_PAGE_ALLOCATE < cycle ) 1720 printk("\n[%s] thread[%x,%x] exit for vpn %x / ppn %x / cluster %x / cycle %d\n", 1721 __FUNCTION__ , this->process->pid, this->trdid, vpn, ppm_page2ppn(page_xp), page_cxy, cycle ); 1722 #endif 1723 1724 return page_xp; 1752 1725 1753 1726 } // end vmm_page_allocate() … … 1769 1742 uint32_t cycle = (uint32_t)hal_get_cycles(); 1770 1743 thread_t * this = CURRENT_THREAD; 1771 if( DEBUG_VMM_GET_ONE_PPN < cycle ) 1744 // if( DEBUG_VMM_GET_ONE_PPN < cycle ) 1745 if( vpn == 0x40B ) 1772 1746 printk("\n[%s] thread[%x,%x] enter for vpn %x / type %s / page_id %d / cycle %d\n", 1773 1747 __FUNCTION__, this->process->pid, this->trdid, vpn, vseg_type_str(type), page_id, cycle ); … … 1815 1789 1816 1790 #if (DEBUG_VMM_GET_ONE_PPN & 0x1) 1817 if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() ) 1791 // if( DEBUG_VMM_GET_ONE_PPN < cycle ) 1792 if( vpn == 0x40B ) 1818 1793 printk("\n[%s] thread[%x,%x] for vpn = %x / elf_offset = %x\n", 1819 1794 __FUNCTION__, this->process->pid, this->trdid, vpn, elf_offset ); … … 1829 1804 1830 1805 #if (DEBUG_VMM_GET_ONE_PPN & 0x1) 1831 if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() ) 1806 // if( DEBUG_VMM_GET_ONE_PPN < cycle ) 1807 if( vpn == 0x40B ) 1832 1808 printk("\n[%s] thread[%x,%x] for vpn %x / fully in BSS\n", 1833 1809 __FUNCTION__, this->process->pid, this->trdid, vpn ); … … 1846 1822 1847 1823 #if (DEBUG_VMM_GET_ONE_PPN & 0x1) 1848 if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() ) 1824 // if( DEBUG_VMM_GET_ONE_PPN < cycle ) 1825 if( vpn == 0x40B ) 1849 1826 printk("\n[%s] thread[%x,%x] for vpn %x / fully in mapper\n", 1850 1827 __FUNCTION__, this->process->pid, this->trdid, vpn ); … … 1863 1840 1864 1841 #if (DEBUG_VMM_GET_ONE_PPN & 0x1) 1865 if( DEBUG_VMM_GET_ONE_PPN < (uint32_t)hal_get_cycles() ) 1842 // if( DEBUG_VMM_GET_ONE_PPN < cycle ) 1843 if( vpn == 0x40B ) 1866 1844 printk("\n[%s] thread[%x,%x] for vpn %x / both mapper & BSS\n" 1867 1845 " %d bytes from mapper / %d bytes from BSS\n", … … 1897 1875 #if DEBUG_VMM_GET_ONE_PPN 1898 1876 cycle = (uint32_t)hal_get_cycles(); 1899 if( DEBUG_VMM_GET_ONE_PPN < cycle ) 1877 // if( DEBUG_VMM_GET_ONE_PPN < cycle ) 1878 if( vpn == 0x40B ) 1900 1879 printk("\n[%s] thread[%x,%x] exit for vpn %x / ppn %x / cycle\n", 1901 1880 __FUNCTION__ , this->process->pid, this->trdid , vpn , *ppn, cycle ); … … 1928 1907 1929 1908 #if DEBUG_VMM_HANDLE_PAGE_FAULT 1930 if( DEBUG_VMM_HANDLE_PAGE_FAULT < start_cycle)1909 if( vpn == 0x40b ) 1931 1910 printk("\n[%s] thread[%x,%x] enter for vpn %x / cycle %d\n", 1932 1911 __FUNCTION__, this->process->pid, this->trdid, vpn, start_cycle ); … … 1950 1929 1951 1930 #if DEBUG_VMM_HANDLE_PAGE_FAULT 1952 if( DEBUG_VMM_HANDLE_PAGE_FAULT < start_cycle ) 1953 printk("\n[%s] thread[%x,%x] found vseg %s\n", 1954 __FUNCTION__, this->process->pid, this->trdid, vseg_type_str(vseg->type) ); 1931 uint32_t cycle = (uint32_t)hal_get_cycles(); 1932 if( vpn == 0x40b ) 1933 printk("\n[%s] thread[%x,%x] found vseg %s / cycle %d\n", 1934 __FUNCTION__, this->process->pid, this->trdid, vseg_type_str(vseg->type), cycle ); 1955 1935 #endif 1956 1936 … … 1958 1938 local_gpt_xp = XPTR( local_cxy , &process->vmm.gpt ); 1959 1939 1960 // lock targetPTE in local GPT and get current PPN and attributes1940 // lock PTE in local GPT and get current PPN and attributes 1961 1941 error = hal_gpt_lock_pte( local_gpt_xp, 1962 1942 vpn, … … 1971 1951 } 1972 1952 1973 // handle page fault only if PTE still unmapped after lock 1953 #if DEBUG_VMM_HANDLE_PAGE_FAULT 1954 cycle = (uint32_t)hal_get_cycles(); 1955 if( vpn == 0x40b ) 1956 printk("\n[%s] thread[%x,%x] locked vpn %x in cluster %x / cycle %d\n", 1957 __FUNCTION__, this->process->pid, this->trdid, vpn, local_cxy, cycle ); 1958 #endif 1959 1960 // handle page fault only if local PTE still unmapped after lock 1974 1961 if( (attr & GPT_MAPPED) == 0 ) 1975 1962 { … … 1984 1971 (ref_cxy == local_cxy ) ) 1985 1972 { 1986 // allocate and initialise a physical page depending on the vseg type 1973 1974 #if DEBUG_VMM_HANDLE_PAGE_FAULT 1975 if( vpn == 0x40b ) 1976 printk("\n[%s] thread[%x,%x] : access local gpt : local_cxy %x / ref_cxy %x / type %s\n", 1977 __FUNCTION__, this->process->pid, this->trdid, local_cxy, ref_cxy, vseg_type_str(vseg->type) ); 1978 #endif 1979 // allocate and initialise a physical page 1987 1980 error = vmm_get_one_ppn( vseg , vpn , &ppn ); 1988 1981 … … 1999 1992 2000 1993 // define attr from vseg flags 2001 attr = GPT_MAPPED | GPT_SMALL ;1994 attr = GPT_MAPPED | GPT_SMALL | GPT_READABLE; 2002 1995 if( vseg->flags & VSEG_USER ) attr |= GPT_USER; 2003 1996 if( vseg->flags & VSEG_WRITE ) attr |= GPT_WRITABLE; … … 2006 1999 2007 2000 // set PTE to local GPT 2001 // it unlocks this PTE 2008 2002 hal_gpt_set_pte( local_gpt_xp, 2009 2003 vpn, … … 2016 2010 2017 2011 #if DEBUG_VMM_HANDLE_PAGE_FAULT 2018 if( DEBUG_VMM_HANDLE_PAGE_FAULT < end_cycle)2019 printk("\n[%s] local page fault handled / vpn %x/ ppn %x / attr %x / cycle %d\n",2020 __FUNCTION__, vpn, ppn, attr, end_cycle );2012 if( vpn == 0x40b ) 2013 printk("\n[%s] thread[%x,%x] handled local pgfault / ppn %x / attr %x / cycle %d\n", 2014 __FUNCTION__, this->process->pid, this->trdid, ppn, attr, end_cycle ); 2021 2015 #endif 2022 2016 … … 2033 2027 else 2034 2028 { 2029 2030 #if DEBUG_VMM_HANDLE_PAGE_FAULT 2031 if( vpn == 0x40b ) 2032 printk("\n[%s] thread[%x,%x] access ref gpt : local_cxy %x / ref_cxy %x / type %s\n", 2033 __FUNCTION__, this->process->pid, this->trdid, local_cxy, ref_cxy, vseg_type_str(vseg->type) ); 2034 #endif 2035 2035 // build extended pointer on reference GPT 2036 2036 ref_gpt_xp = XPTR( ref_cxy , &ref_ptr->vmm.gpt ); 2037 2037 2038 // get current PPN and attributes from reference GPT 2039 // without locking the PTE (in case of false page fault) 2040 hal_gpt_get_pte( ref_gpt_xp, 2041 vpn, 2042 &ref_attr, 2043 &ref_ppn ); 2044 2045 if( ref_attr & GPT_MAPPED ) // false page fault => update local GPT 2038 // lock PTE in reference GPT and get current PPN and attributes 2039 error = hal_gpt_lock_pte( ref_gpt_xp, 2040 vpn, 2041 &ref_attr, 2042 &ref_ppn ); 2043 if( error ) 2044 { 2045 printk("\n[PANIC] in %s : cannot lock PTE in ref GPT / vpn %x / process %x\n", 2046 __FUNCTION__ , vpn , process->pid ); 2047 2048 // unlock PTE in local GPT 2049 hal_gpt_unlock_pte( local_gpt_xp , vpn ); 2050 2051 return EXCP_KERNEL_PANIC; 2052 } 2053 2054 #if DEBUG_VMM_HANDLE_PAGE_FAULT 2055 if( vpn == 0x40b ) 2056 printk("\n[%s] thread[%x,%x] get pte from ref gpt / attr %x / ppn %x\n", 2057 __FUNCTION__, this->process->pid, this->trdid, ref_attr, ref_ppn ); 2058 #endif 2059 2060 if( ref_attr & GPT_MAPPED ) // false page fault 2046 2061 { 2047 2062 // update local GPT from reference GPT values 2063 // this unlocks the PTE in local GPT 2048 2064 hal_gpt_set_pte( local_gpt_xp, 2049 2065 vpn, … … 2051 2067 ref_ppn ); 2052 2068 2069 #if DEBUG_VMM_HANDLE_PAGE_FAULT 2070 if( vpn == 0x40b ) 2071 printk("\n[%s] thread[%x,%x] updated local gpt for a false pgfault\n", 2072 __FUNCTION__, this->process->pid, this->trdid ); 2073 #endif 2074 2075 // unlock the PTE in reference GPT 2076 hal_gpt_unlock_pte( ref_gpt_xp, vpn ); 2077 2078 #if DEBUG_VMM_HANDLE_PAGE_FAULT 2079 if( vpn == 0x40b ) 2080 printk("\n[%s] thread[%x,%x] unlock the ref gpt after a false pgfault\n", 2081 __FUNCTION__, this->process->pid, this->trdid ); 2082 #endif 2083 2053 2084 #if (CONFIG_INSTRUMENTATION_PGFAULTS || DEBUG_VMM_HANDLE_PAGE_FAULT) 2054 2085 uint32_t end_cycle = (uint32_t)hal_get_cycles(); … … 2056 2087 2057 2088 #if DEBUG_VMM_HANDLE_PAGE_FAULT 2058 if( DEBUG_VMM_HANDLE_PAGE_FAULT < end_cycle)2059 printk("\n[%s] false page fault handled / vpn %x/ ppn %x / attr %x / cycle %d\n",2060 __FUNCTION__, vpn, ref_ppn, ref_attr, end_cycle );2089 if( vpn == 0x40b ) 2090 printk("\n[%s] thread[%x,%x] handled false pgfault / ppn %x / attr %x / cycle %d\n", 2091 __FUNCTION__, this->process->pid, this->trdid, ref_ppn, ref_attr, end_cycle ); 2061 2092 #endif 2062 2093 … … 2067 2098 return EXCP_NON_FATAL; 2068 2099 } 2069 else // true page fault => update both GPTs2100 else // true page fault 2070 2101 { 2071 2102 // allocate and initialise a physical page depending on the vseg type … … 2077 2108 __FUNCTION__ , process->pid , vpn ); 2078 2109 2079 // unlock PTE in local GPT 2110 // unlock PTE in local GPT and in reference GPT 2080 2111 hal_gpt_unlock_pte( local_gpt_xp , vpn ); 2112 hal_gpt_unlock_pte( ref_gpt_xp , vpn ); 2081 2113 2082 2114 return EXCP_KERNEL_PANIC; 2083 2115 } 2084 2116 2085 // lock PTE in reference GPT2086 error = hal_gpt_lock_pte( ref_gpt_xp,2087 vpn,2088 &ref_attr,2089 &ref_ppn );2090 if( error )2091 {2092 printk("\n[PANIC] in %s : cannot lock PTE in ref GPT / vpn %x / process %x\n",2093 __FUNCTION__ , vpn , process->pid );2094 2095 // unlock PTE in local GPT2096 hal_gpt_unlock_pte( local_gpt_xp , vpn );2097 2098 return EXCP_KERNEL_PANIC;2099 }2100 2101 2117 // define attr from vseg flags 2102 attr = GPT_MAPPED | GPT_SMALL ;2118 attr = GPT_MAPPED | GPT_SMALL | GPT_READABLE; 2103 2119 if( vseg->flags & VSEG_USER ) attr |= GPT_USER; 2104 2120 if( vseg->flags & VSEG_WRITE ) attr |= GPT_WRITABLE; … … 2106 2122 if( vseg->flags & VSEG_CACHE ) attr |= GPT_CACHABLE; 2107 2123 2124 #if DEBUG_VMM_HANDLE_PAGE_FAULT 2125 if( vpn == 0x40b ) 2126 printk("\n[%s] thread[%x,%x] build a new PTE for a true pgfault\n", 2127 __FUNCTION__, this->process->pid, this->trdid ); 2128 #endif 2108 2129 // set PTE in reference GPT 2130 // this unlock the PTE 2109 2131 hal_gpt_set_pte( ref_gpt_xp, 2110 2132 vpn, … … 2112 2134 ppn ); 2113 2135 2136 #if DEBUG_VMM_HANDLE_PAGE_FAULT 2137 if( vpn == 0x40b ) 2138 printk("\n[%s] thread[%x,%x] set new PTE in ref gpt for a true page fault\n", 2139 __FUNCTION__, this->process->pid, this->trdid ); 2140 #endif 2141 2114 2142 // set PTE in local GPT 2143 // this unlock the PTE 2115 2144 hal_gpt_set_pte( local_gpt_xp, 2116 2145 vpn, … … 2123 2152 2124 2153 #if DEBUG_VMM_HANDLE_PAGE_FAULT 2125 if( DEBUG_VMM_HANDLE_PAGE_FAULT < end_cycle)2126 printk("\n[%s] global page fault handled / vpn %x/ ppn %x / attr %x / cycle %d\n",2127 __FUNCTION__, vpn, ppn, attr, end_cycle );2154 if( vpn == 0x40b ) 2155 printk("\n[%s] thread[%x,%x] handled global pgfault / ppn %x / attr %x / cycle %d\n", 2156 __FUNCTION__, this->process->pid, this->trdid, ppn, attr, end_cycle ); 2128 2157 #endif 2129 2158 … … 2138 2167 else // page has been locally mapped by another concurrent thread 2139 2168 { 2140 // unlock PTE in local GPT2169 // unlock the PTE in local GPT 2141 2170 hal_gpt_unlock_pte( local_gpt_xp , vpn ); 2142 2171 2172 #if (CONFIG_INSTRUMENTATION_PGFAULTS || DEBUG_VMM_HANDLE_PAGE_FAULT) 2173 uint32_t end_cycle = (uint32_t)hal_get_cycles(); 2174 #endif 2175 2176 #if DEBUG_VMM_HANDLE_PAGE_FAULT 2177 if( vpn == 0x40b ) 2178 printk("\n[%s] handled by another thread / vpn %x / ppn %x / attr %x / cycle %d\n", 2179 __FUNCTION__, vpn, ppn, attr, end_cycle ); 2180 #endif 2181 2182 #if CONFIG_INSTRUMENTATION_PGFAULTS 2183 this->info.false_pgfault_nr++; 2184 this->info.false_pgfault_cost += (end_cycle - start_cycle); 2185 #endif 2143 2186 return EXCP_NON_FATAL; 2144 2187 } … … 2214 2257 2215 2258 // lock target PTE in relevant GPT (local or reference) 2259 // and get current PTE value 2216 2260 error = hal_gpt_lock_pte( gpt_xp, 2217 2261 vpn, -
trunk/kernel/mm/vmm.h
r629 r632 202 202 /********************************************************************************************* 203 203 * This function modifies one GPT entry identified by the <process> and <vpn> arguments 204 * in all clusters containing a process copy. 204 * in all clusters containing a process copy. It is used to maintain coherence in GPT 205 * copies, using the list of copies stored in the owner process, and remote_write accesses. 205 206 * It must be called by a thread running in the process owner cluster. 206 * It is used to update to maintain coherence in GPT copies, using the list of copies 207 * stored in the owner process, and uses remote_write accesses. 207 * Use the RPC_VMM_GLOBAL_UPDATE_PTE if required. 208 208 * It cannot fail, as only mapped PTE2 in GPT copies are updated. 209 209 *********************************************************************************************
Note: See TracChangeset
for help on using the changeset viewer.