Changeset 635 for trunk/kernel/mm
- Timestamp:
- Jun 26, 2019, 11:42:37 AM (5 years ago)
- Location:
- trunk/kernel/mm
- Files:
-
- 14 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/mm/kcm.c
r619 r635 1 1 /* 2 * kcm.c - Per clusterKernel Cache Manager implementation.2 * kcm.c - Kernel Cache Manager implementation. 3 3 * 4 * Author Ghassan Almaless (2008,2009,2010,2011,2012) 5 * Alain Greiner (2016,2017,2018,2019) 4 * Author Alain Greiner (2016,2017,2018,2019) 6 5 * 7 6 * Copyright (c) UPMC Sorbonne Universites … … 38 37 39 38 39 ///////////////////////////////////////////////////////////////////////////////////// 40 // Local access functions 41 ///////////////////////////////////////////////////////////////////////////////////// 42 40 43 ////////////////////////////////////////////////////////////////////////////////////// 41 // This static function returns pointer on an allocated block from an active page. 42 // It returns NULL if no block available in selected page. 43 // It changes the page status if required. 44 // This static function must be called by a local thread. 45 // It returns a pointer on a block allocated from a non-full kcm_page. 46 // It makes a panic if no block is available in selected page. 47 // It changes the page status as required. 44 48 ////////////////////////////////////////////////////////////////////////////////////// 45 // @ kcm : pointer on kcm allocator. 46 // @ kcm_page : pointer on active kcm page to use. 47 ///////////////////////////////////////////////////////////////////////////////////// 48 static void * kcm_get_block( kcm_t * kcm, 49 kcm_page_t * kcm_page ) 50 { 51 52 #if DEBUG_KCM 53 thread_t * this = CURRENT_THREAD; 54 uint32_t cycle = (uint32_t)hal_get_cycles(); 49 // @ kcm : pointer on KCM allocator. 50 // @ kcm_page : pointer on a non-full kcm_page. 51 // @ return pointer on allocated block. 52 ///////////////////////////////////////////////////////////////////////////////////// 53 static void * __attribute__((noinline)) kcm_get_block( kcm_t * kcm, 54 kcm_page_t * kcm_page ) 55 { 56 // initialise variables 57 uint32_t size = 1 << kcm->order; 58 uint32_t max = kcm->max_blocks; 59 uint32_t count = kcm_page->count; 60 uint64_t status = kcm_page->status; 61 62 assert( (count < max) , "kcm_page should not be full" ); 63 64 uint32_t index = 1; 65 uint64_t mask = (uint64_t)0x2; 66 uint32_t found = 0; 67 68 // allocate first free block in kcm_page, update status, 69 // and count , compute index of allocated block in kcm_page 70 while( index <= max ) 71 { 72 if( (status & mask) == 0 ) // block non allocated 73 { 74 kcm_page->status = status | mask; 75 kcm_page->count = count + 1; 76 found = 1; 77 78 break; 79 } 80 81 index++; 82 mask <<= 1; 83 } 84 85 // change the page list if almost full 86 if( count == max-1 ) 87 { 88 list_unlink( &kcm_page->list); 89 kcm->active_pages_nr--; 90 91 list_add_first( &kcm->full_root , &kcm_page->list ); 92 kcm->full_pages_nr ++; 93 } 94 95 // compute return pointer 96 void * ptr = (void *)((intptr_t)kcm_page + (index * size) ); 97 98 #if (DEBUG_KCM & 1) 99 thread_t * this = CURRENT_THREAD; 100 uint32_t cycle = (uint32_t)hal_get_cycles(); 55 101 if( DEBUG_KCM < cycle ) 56 printk("\n[%s] thread[%x,%x] enters for %s / page %x / count %d / active %d\n", 57 __FUNCTION__, this->process->pid, this->trdid, kmem_type_str(kcm->type), 58 (intptr_t)kcm_page , kcm_page->count , kcm_page->active ); 59 #endif 60 61 assert( kcm_page->active , "kcm_page should be active" ); 62 63 // get first block available 64 int32_t index = bitmap_ffs( kcm_page->bitmap , kcm->blocks_nr ); 65 66 assert( (index != -1) , "kcm_page should not be full" ); 67 68 // allocate block 69 bitmap_clear( kcm_page->bitmap , index ); 70 71 // increase kcm_page count 72 kcm_page->count ++; 73 74 // change the kcm_page to busy if no more free block in page 75 if( kcm_page->count >= kcm->blocks_nr ) 76 { 77 kcm_page->active = 0; 78 list_unlink( &kcm_page->list); 79 kcm->active_pages_nr --; 80 81 list_add_first( &kcm->busy_root , &kcm_page->list); 82 kcm->busy_pages_nr ++; 83 kcm_page->busy = 1; 84 } 85 86 // compute return pointer 87 void * ptr = (void *)((intptr_t)kcm_page + CONFIG_KCM_SLOT_SIZE 88 + (index * kcm->block_size) ); 89 90 #if DEBUG_KCM 91 cycle = (uint32_t)hal_get_cycles(); 92 if( DEBUG_KCM < cycle ) 93 printk("\n[%s] thread[%x,%x] exit for %s / ptr %x / page %x / count %d\n", 94 __FUNCTION__, this->process->pid, this->trdid, kmem_type_str(kcm->type), 95 (intptr_t)ptr, (intptr_t)kcm_page, kcm_page->count ); 102 printk("\n[%s] thread[%x,%x] allocated block %x in page %x / size %d / count %d / cycle %d\n", 103 __FUNCTION__, this->process->pid, this->trdid, ptr, kcm_page, size, count + 1, cycle ); 96 104 #endif 97 105 98 106 return ptr; 99 } 100 101 ///////////////////////////////////////////////////////////////////////////////////// 102 // This static function releases a previously allocated block. 103 // It changes the kcm_page status if required. 104 ///////////////////////////////////////////////////////////////////////////////////// 105 // @ kcm : pointer on kcm allocator. 106 // @ kcm_page : pointer on kcm_page. 107 // @ ptr : pointer on block to be released. 108 ///////////////////////////////////////////////////////////////////////////////////// 109 static void kcm_put_block ( kcm_t * kcm, 110 kcm_page_t * kcm_page, 111 void * ptr ) 112 { 113 uint32_t index; 114 107 108 } // end kcm_get_block() 109 110 ///////////////////////////////////////////////////////////////////////////////////// 111 // This private static function must be called by a local thread. 112 // It releases a previously allocated block to the relevant kcm_page. 113 // It makes a panic if the released block is not allocated in this page. 114 // It changes the kcm_page status as required. 115 ///////////////////////////////////////////////////////////////////////////////////// 116 // @ kcm : pointer on kcm allocator. 117 // @ kcm_page : pointer on kcm_page. 118 // @ block_ptr : pointer on block to be released. 119 ///////////////////////////////////////////////////////////////////////////////////// 120 static void __attribute__((noinline)) kcm_put_block ( kcm_t * kcm, 121 kcm_page_t * kcm_page, 122 void * block_ptr ) 123 { 124 // initialise variables 125 uint32_t max = kcm->max_blocks; 126 uint32_t size = 1 << kcm->order; 127 uint32_t count = kcm_page->count; 128 uint64_t status = kcm_page->status; 129 115 130 // compute block index from block pointer 116 index = ((uint8_t *)ptr - (uint8_t *)kcm_page - CONFIG_KCM_SLOT_SIZE) / kcm->block_size; 117 118 assert( !bitmap_state( kcm_page->bitmap , index ) , "page already freed" ); 119 120 assert( (kcm_page->count > 0) , "count already zero" ); 121 122 bitmap_set( kcm_page->bitmap , index ); 123 kcm_page->count --; 124 125 // change the page to active if it was busy 126 if( kcm_page->busy ) 127 { 128 kcm_page->busy = 0; 131 uint32_t index = ((intptr_t)block_ptr - (intptr_t)kcm_page) / size; 132 133 // compute mask in bit vector 134 uint64_t mask = ((uint64_t)0x1) << index; 135 136 assert( (status & mask) , "released block not allocated : status (%x,%x) / mask(%x,%x)", 137 GET_CXY(status), GET_PTR(status), GET_CXY(mask ), GET_PTR(mask ) ); 138 139 // update status & count in kcm_page 140 kcm_page->status = status & ~mask; 141 kcm_page->count = count - 1; 142 143 // change the page mode if page was full 144 if( count == max ) 145 { 129 146 list_unlink( &kcm_page->list ); 130 kcm-> busy_pages_nr --;147 kcm->full_pages_nr --; 131 148 132 149 list_add_last( &kcm->active_root, &kcm_page->list ); 133 150 kcm->active_pages_nr ++; 134 kcm_page->active = 1; 135 } 136 137 // change the kcm_page to free if last block in active page 138 if( (kcm_page->active) && (kcm_page->count == 0) ) 139 { 140 kcm_page->active = 0; 141 list_unlink( &kcm_page->list); 142 kcm->active_pages_nr --; 143 144 list_add_first( &kcm->free_root , &kcm_page->list); 145 kcm->free_pages_nr ++; 146 } 147 } 148 149 ///////////////////////////////////////////////////////////////////////////////////// 150 // This static function allocates one page from PPM. It initializes 151 // the kcm_page descriptor, and introduces the new kcm_page into freelist. 152 ///////////////////////////////////////////////////////////////////////////////////// 153 static error_t freelist_populate( kcm_t * kcm ) 154 { 155 page_t * page; 156 kcm_page_t * kcm_page; 157 kmem_req_t req; 158 159 // get one page from local PPM 160 req.type = KMEM_PAGE; 161 req.size = 0; 162 req.flags = AF_KERNEL; 163 page = kmem_alloc( &req ); 164 165 if( page == NULL ) 166 { 167 printk("\n[ERROR] in %s : failed to allocate page in cluster %x\n", 168 __FUNCTION__ , local_cxy ); 169 return ENOMEM; 170 } 171 172 // get page base address 173 xptr_t base_xp = ppm_page2base( XPTR( local_cxy , page ) ); 174 kcm_page = (kcm_page_t *)GET_PTR( base_xp ); 175 176 // initialize KCM-page descriptor 177 bitmap_set_range( kcm_page->bitmap , 0 , kcm->blocks_nr ); 178 179 kcm_page->busy = 0; 180 kcm_page->active = 0; 181 kcm_page->count = 0; 182 kcm_page->kcm = kcm; 183 kcm_page->page = page; 184 185 // introduce new page in free-list 186 list_add_first( &kcm->free_root , &kcm_page->list ); 187 kcm->free_pages_nr ++; 188 189 return 0; 190 } 191 192 ///////////////////////////////////////////////////////////////////////////////////// 193 // This private function gets one KCM page from the KCM freelist. 194 // It populates the freelist if required. 195 ///////////////////////////////////////////////////////////////////////////////////// 196 static kcm_page_t * freelist_get( kcm_t * kcm ) 197 { 198 error_t error; 199 kcm_page_t * kcm_page; 200 201 // get a new page from PPM if freelist empty 202 if( kcm->free_pages_nr == 0 ) 203 { 204 error = freelist_populate( kcm ); 205 if( error ) return NULL; 206 } 207 208 // get first KCM page from freelist and unlink it 209 kcm_page = LIST_FIRST( &kcm->free_root, kcm_page_t , list ); 210 list_unlink( &kcm_page->list ); 211 kcm->free_pages_nr --; 151 } 152 153 #if (DEBUG_KCM & 1) 154 thread_t * this = CURRENT_THREAD; 155 uint32_t cycle = (uint32_t)hal_get_cycles(); 156 if( DEBUG_KCM < cycle ) 157 printk("\n[%s] thread[%x,%x] released block %x in page %x / size %d / count %d / cycle %d\n", 158 __FUNCTION__, this->process->pid, this->trdid, block_ptr, kcm_page, size, count - 1, cycle ); 159 #endif 160 161 } // kcm_put_block() 162 163 ///////////////////////////////////////////////////////////////////////////////////// 164 // This private static function must be called by a local thread. 165 // It returns one non-full kcm_page with te following policy : 166 // - if the "active_list" is non empty, it returns the first "active" page, 167 // without modifying the KCM state. 168 // - if the "active_list" is empty, it allocates a new page fromm PPM, inserts 169 // this page in the active_list, and returns it. 170 ///////////////////////////////////////////////////////////////////////////////////// 171 // @ kcm : local pointer on local KCM allocator. 172 // @ return pointer on a non-full kcm page if success / returns NULL if no memory. 173 ///////////////////////////////////////////////////////////////////////////////////// 174 static kcm_page_t * __attribute__((noinline)) kcm_get_page( kcm_t * kcm ) 175 { 176 kcm_page_t * kcm_page; 177 178 uint32_t active_pages_nr = kcm->active_pages_nr; 179 180 if( active_pages_nr > 0 ) // return first active page 181 { 182 kcm_page = LIST_FIRST( &kcm->active_root , kcm_page_t , list ); 183 } 184 else // allocate a new page from PPM 185 { 186 // get one 4 Kbytes page from local PPM 187 page_t * page = ppm_alloc_pages( 0 ); 188 189 if( page == NULL ) 190 { 191 printk("\n[ERROR] in %s : failed to allocate page in cluster %x\n", 192 __FUNCTION__ , local_cxy ); 193 194 return NULL; 195 } 196 197 // get page base address 198 xptr_t base_xp = ppm_page2base( XPTR( local_cxy , page ) ); 199 200 // get local pointer on kcm_page 201 kcm_page = GET_PTR( base_xp ); 202 203 // initialize kcm_page descriptor 204 kcm_page->status = 0; 205 kcm_page->count = 0; 206 kcm_page->kcm = kcm; 207 kcm_page->page = page; 208 209 // introduce new page in KCM active_list 210 list_add_first( &kcm->active_root , &kcm_page->list ); 211 kcm->active_pages_nr ++; 212 } 212 213 213 214 return kcm_page; 214 } 215 216 } // end kcm_get_page() 215 217 216 218 ////////////////////////////// 217 219 void kcm_init( kcm_t * kcm, 218 uint32_t type ) 219 { 220 221 // the kcm_page descriptor must fit in the KCM slot 222 assert( (sizeof(kcm_page_t) <= CONFIG_KCM_SLOT_SIZE) , "KCM slot too small\n" ); 223 224 // the allocated object must fit in one single page 225 assert( (kmem_type_size(type) <= (CONFIG_PPM_PAGE_SIZE - CONFIG_KCM_SLOT_SIZE)), 226 "allocated object requires more than one single page\n" ); 220 uint32_t order) 221 { 222 223 assert( ((order > 5) && (order < 12)) , "order must be in [6,11]" ); 227 224 228 225 // initialize lock 229 busylock_init( &kcm->lock , LOCK_KCM_STATE ); 230 231 // initialize KCM type 232 kcm->type = type; 226 remote_busylock_init( XPTR( local_cxy , &kcm->lock ) , LOCK_KCM_STATE ); 233 227 234 228 // initialize KCM page lists 235 kcm->free_pages_nr = 0; 236 kcm->busy_pages_nr = 0; 229 kcm->full_pages_nr = 0; 237 230 kcm->active_pages_nr = 0; 238 list_root_init( &kcm->free_root ); 239 list_root_init( &kcm->busy_root ); 231 list_root_init( &kcm->full_root ); 240 232 list_root_init( &kcm->active_root ); 241 233 242 // initialize block size 243 uint32_t block_size = ARROUND_UP( kmem_type_size( type ) , CONFIG_KCM_SLOT_SIZE ); 244 kcm->block_size = block_size; 245 246 // initialize number of blocks per page 247 uint32_t blocks_nr = (CONFIG_PPM_PAGE_SIZE - CONFIG_KCM_SLOT_SIZE) / block_size; 248 kcm->blocks_nr = blocks_nr; 249 234 // initialize order and max_blocks 235 kcm->order = order; 236 kcm->max_blocks = ( CONFIG_PPM_PAGE_SIZE >> order ) - 1; 237 250 238 #if DEBUG_KCM 251 239 thread_t * this = CURRENT_THREAD; 252 240 uint32_t cycle = (uint32_t)hal_get_cycles(); 253 241 if( DEBUG_KCM < cycle ) 254 printk("\n[%s] thread[%x,%x] initialised KCM %s : block_size %d / blocks_nr %d\n", 255 __FUNCTION__, this->process->pid, this->trdid, 256 kmem_type_str( kcm->type ), block_size, blocks_nr ); 257 #endif 258 259 } 242 printk("\n[%s] thread[%x,%x] initialised KCM / order %d / max_blocks %d\n", 243 __FUNCTION__, this->process->pid, this->trdid, order, kcm->max_blocks ); 244 #endif 245 246 } // end kcm_init() 260 247 261 248 /////////////////////////////// … … 263 250 { 264 251 kcm_page_t * kcm_page; 265 list_entry_t * iter; 252 253 // build extended pointer on KCM lock 254 xptr_t lock_xp = XPTR( local_cxy , &kcm->lock ); 266 255 267 256 // get KCM lock 268 busylock_acquire( &kcm->lock ); 269 270 // release all free pages 271 LIST_FOREACH( &kcm->free_root , iter ) 272 { 273 kcm_page = (kcm_page_t *)LIST_ELEMENT( iter , kcm_page_t , list ); 274 list_unlink( iter ); 275 kcm->free_pages_nr --; 257 remote_busylock_acquire( lock_xp ); 258 259 // release all full pages 260 while( list_is_empty( &kcm->full_root ) == false ) 261 { 262 kcm_page = LIST_FIRST( &kcm->full_root , kcm_page_t , list ); 263 list_unlink( &kcm_page->list ); 276 264 ppm_free_pages( kcm_page->page ); 277 265 } 278 266 279 // release all active pages 280 LIST_FOREACH( &kcm->active_root , iter ) 281 { 282 kcm_page = (kcm_page_t *)LIST_ELEMENT( iter , kcm_page_t , list ); 283 list_unlink( iter ); 284 kcm->free_pages_nr --; 267 // release all empty pages 268 while( list_is_empty( &kcm->active_root ) == false ) 269 { 270 kcm_page = LIST_FIRST( &kcm->active_root , kcm_page_t , list ); 271 list_unlink( &kcm_page->list ); 285 272 ppm_free_pages( kcm_page->page ); 286 273 } 287 274 288 // release all busy pages289 LIST_FOREACH( &kcm->busy_root , iter )290 {291 kcm_page = (kcm_page_t *)LIST_ELEMENT( iter , kcm_page_t , list );292 list_unlink( iter );293 kcm->free_pages_nr --;294 ppm_free_pages( kcm_page->page );295 }296 297 275 // release KCM lock 298 busylock_release( &kcm->lock);276 remote_busylock_release( lock_xp ); 299 277 } 300 278 301 /////////////////////////////// 302 void * kcm_alloc( kcm_t * kcm ) 303 { 279 ////////////////////////////////// 280 void * kcm_alloc( uint32_t order ) 281 { 282 kcm_t * kcm_ptr; 304 283 kcm_page_t * kcm_page; 305 void * ptr = NULL; // pointer on block 284 void * block_ptr; 285 286 // min block size is 64 bytes 287 if( order < 6 ) order = 6; 288 289 assert( (order < 12) , "order = %d / must be less than 12" , order ); 290 291 // get local pointer on relevant KCM allocator 292 kcm_ptr = &LOCAL_CLUSTER->kcm[order - 6]; 293 294 // build extended pointer on local KCM lock 295 xptr_t lock_xp = XPTR( local_cxy , &kcm_ptr->lock ); 296 297 // get KCM lock 298 remote_busylock_acquire( lock_xp ); 299 300 // get a non-full kcm_page 301 kcm_page = kcm_get_page( kcm_ptr ); 302 303 if( kcm_page == NULL ) 304 { 305 remote_busylock_release( lock_xp ); 306 return NULL; 307 } 308 309 // get a block from selected active page 310 block_ptr = kcm_get_block( kcm_ptr , kcm_page ); 311 312 // release lock 313 remote_busylock_release( lock_xp ); 314 315 #if DEBUG_KCM 316 thread_t * this = CURRENT_THREAD; 317 uint32_t cycle = (uint32_t)hal_get_cycles(); 318 if( DEBUG_KCM < cycle ) 319 printk("\n[%s] thread[%x,%x] allocated block %x / order %d / kcm %x / status[%x,%x] / count %d\n", 320 __FUNCTION__, this->process->pid, this->trdid, block_ptr, order, kcm_ptr, 321 GET_CXY(kcm_page->status), GET_PTR(kcm_page->status), kcm_page->count ); 322 #endif 323 324 return block_ptr; 325 326 } // end kcm_alloc() 327 328 ///////////////////////////////// 329 void kcm_free( void * block_ptr ) 330 { 331 kcm_t * kcm_ptr; 332 kcm_page_t * kcm_page; 333 334 // check argument 335 assert( (block_ptr != NULL) , "block pointer cannot be NULL" ); 336 337 // get local pointer on KCM page 338 kcm_page = (kcm_page_t *)((intptr_t)block_ptr & ~CONFIG_PPM_PAGE_MASK); 339 340 // get local pointer on KCM descriptor 341 kcm_ptr = kcm_page->kcm; 342 343 #if DEBUG_KCM 344 thread_t * this = CURRENT_THREAD; 345 uint32_t cycle = (uint32_t)hal_get_cycles(); 346 if( DEBUG_KCM < cycle ) 347 printk("\n[%s] thread[%x,%x] release block %x / order %d / kcm %x / status [%x,%x] / count %d\n", 348 __FUNCTION__, this->process->pid, this->trdid, block_ptr, kcm_ptr->order, kcm_ptr, 349 GET_CXY(kcm_page->status), GET_PTR(kcm_page->status), kcm_page->count ); 350 #endif 351 352 // build extended pointer on local KCM lock 353 xptr_t lock_xp = XPTR( local_cxy , &kcm_ptr->lock ); 306 354 307 355 // get lock 308 busylock_acquire( &kcm->lock ); 309 310 // get an active page 311 if( list_is_empty( &kcm->active_root ) ) // no active page => get one 312 { 313 // get a page from free list 314 kcm_page = freelist_get( kcm ); 315 316 if( kcm_page == NULL ) 317 { 318 busylock_release( &kcm->lock ); 319 return NULL; 320 } 321 322 // insert page in active list 323 list_add_first( &kcm->active_root , &kcm_page->list ); 324 kcm->active_pages_nr ++; 325 kcm_page->active = 1; 326 } 327 else // get first page from active list 328 { 329 // get page pointer from active list 330 kcm_page = (kcm_page_t *)LIST_FIRST( &kcm->active_root , kcm_page_t , list ); 356 remote_busylock_acquire( lock_xp ); 357 358 // release block 359 kcm_put_block( kcm_ptr , kcm_page , block_ptr ); 360 361 // release lock 362 remote_busylock_release( lock_xp ); 363 } 364 365 ///////////////////////////////////////////////////////////////////////////////////// 366 // Remote access functions 367 ///////////////////////////////////////////////////////////////////////////////////// 368 369 ///////////////////////////////////////////////////////////////////////////////////// 370 // This static function can be called by any thread running in any cluster. 371 // It returns a local pointer on a block allocated from an non-full kcm_page. 372 // It makes a panic if no block available in selected page. 373 // It changes the page status as required. 374 ///////////////////////////////////////////////////////////////////////////////////// 375 // @ kcm_cxy : remote KCM cluster identidfier. 376 // @ kcm_ptr : local pointer on remote KCM allocator. 377 // @ kcm_page : pointer on active kcm page to use. 378 // @ return a local pointer on the allocated block. 379 ///////////////////////////////////////////////////////////////////////////////////// 380 static void * __attribute__((noinline)) kcm_remote_get_block( cxy_t kcm_cxy, 381 kcm_t * kcm_ptr, 382 kcm_page_t * kcm_page ) 383 { 384 uint32_t order = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->order ) ); 385 uint32_t max = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->max_blocks ) ); 386 uint32_t count = hal_remote_l32( XPTR( kcm_cxy , &kcm_page->count ) ); 387 uint64_t status = hal_remote_l64( XPTR( kcm_cxy , &kcm_page->status ) ); 388 uint32_t size = 1 << order; 389 390 assert( (count < max) , "kcm_page should not be full" ); 391 392 uint32_t index = 1; 393 uint64_t mask = (uint64_t)0x2; 394 uint32_t found = 0; 395 396 // allocate first free block in kcm_page, update status, 397 // and count , compute index of allocated block in kcm_page 398 while( index <= max ) 399 { 400 if( (status & mask) == 0 ) // block non allocated 401 { 402 hal_remote_s64( XPTR( kcm_cxy , &kcm_page->status ) , status | mask ); 403 hal_remote_s64( XPTR( kcm_cxy , &kcm_page->count ) , count + 1 ); 404 found = 1; 405 break; 406 } 407 408 index++; 409 mask <<= 1; 410 } 411 412 // change the page list if almost full 413 if( count == max-1 ) 414 { 415 list_remote_unlink( kcm_cxy , &kcm_page->list ); 416 hal_remote_atomic_add( XPTR( kcm_cxy , &kcm_ptr->active_pages_nr ) , -1 ); 417 418 list_remote_add_first( kcm_cxy , &kcm_ptr->full_root , &kcm_page->list ); 419 hal_remote_atomic_add( XPTR( kcm_cxy , &kcm_ptr->full_pages_nr ) , 1 ); 420 } 421 422 // compute return pointer 423 void * ptr = (void *)((intptr_t)kcm_page + (index * size) ); 424 425 #if DEBUG_KCM_REMOTE 426 thread_t * this = CURRENT_THREAD; 427 uint32_t cycle = (uint32_t)hal_get_cycles(); 428 if( DEBUG_KCM_REMOTE < cycle ) 429 printk("\n[%s] thread[%x,%x] get block %x in page %x / cluster %x / size %x / count %d\n", 430 __FUNCTION__, this->process->pid, this->trdid, 431 ptr, kcm_page, kcm_cxy, size, count + 1 ); 432 #endif 433 434 return ptr; 435 436 } // end kcm_remote_get_block() 437 438 ///////////////////////////////////////////////////////////////////////////////////// 439 // This private static function can be called by any thread running in any cluster. 440 // It releases a previously allocated block to the relevant kcm_page. 441 // It changes the kcm_page status as required. 442 ///////////////////////////////////////////////////////////////////////////////////// 443 // @ kcm_cxy : remote KCM cluster identifier 444 // @ kcm_ptr : local pointer on remote KCM. 445 // @ kcm_page : local pointer on kcm_page. 446 // @ block_ptr : pointer on block to be released. 447 ///////////////////////////////////////////////////////////////////////////////////// 448 static void __attribute__((noinline)) kcm_remote_put_block ( cxy_t kcm_cxy, 449 kcm_t * kcm_ptr, 450 kcm_page_t * kcm_page, 451 void * block_ptr ) 452 { 453 uint32_t max = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->max_blocks ) ); 454 uint32_t order = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->order ) ); 455 uint32_t count = hal_remote_l32( XPTR( kcm_cxy , &kcm_page->count ) ); 456 uint64_t status = hal_remote_l64( XPTR( kcm_cxy , &kcm_page->status ) ); 457 uint32_t size = 1 << order; 458 459 // compute block index from block pointer 460 uint32_t index = ((intptr_t)block_ptr - (intptr_t)kcm_page) / size; 461 462 // compute mask in bit vector 463 uint64_t mask = 1 << index; 464 465 assert( (status & mask) , "released page not allocated" ); 466 467 // update status & count in kcm_page 468 hal_remote_s64( XPTR( kcm_cxy , &kcm_page->status ) , status & ~mask ); 469 hal_remote_s32( XPTR( kcm_cxy , &kcm_page->count ) , count - 1 ); 470 471 // change the page list if page was full 472 if( count == max ) 473 { 474 list_remote_unlink( kcm_cxy , &kcm_page->list ); 475 hal_remote_atomic_add( XPTR( kcm_cxy , &kcm_ptr->full_pages_nr ) , -1 ); 476 477 list_remote_add_last( kcm_cxy , &kcm_ptr->active_root, &kcm_page->list ); 478 hal_remote_atomic_add( XPTR( kcm_cxy , &kcm_ptr->active_pages_nr ) , 1 ); 479 } 480 481 #if (DEBUG_KCM_REMOTE & 1) 482 thread_t * this = CURRENT_THREAD; 483 uint32_t cycle = (uint32_t)hal_get_cycles(); 484 if( DEBUG_KCM_REMOTE < cycle ) 485 printk("\n[%s] thread[%x,%x] released block %x in page %x / cluster %x / size %x / count %d\n", 486 __FUNCTION__, this->process->pid, this->trdid, block_ptr, kcm_page, size, count - 1 ) 487 #endif 488 489 } // end kcm_remote_put_block() 490 491 ///////////////////////////////////////////////////////////////////////////////////// 492 // This private static function can be called by any thread running in any cluster. 493 // It gets one non-full KCM page from the remote KCM. 494 // It allocates a page from remote PPM to populate the freelist, and initialises 495 // the kcm_page descriptor when required. 496 ///////////////////////////////////////////////////////////////////////////////////// 497 static kcm_page_t * __attribute__((noinline)) kcm_remote_get_page( cxy_t kcm_cxy, 498 kcm_t * kcm_ptr ) 499 { 500 kcm_page_t * kcm_page; // local pointer on remote KCM page 501 502 uint32_t active_pages_nr = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->active_pages_nr ) ); 503 504 if( active_pages_nr > 0 ) // return first active page 505 { 506 kcm_page = LIST_REMOTE_FIRST( kcm_cxy , &kcm_ptr->active_root , kcm_page_t , list ); 507 } 508 else // allocate a new page from PPM 509 { 510 // get one 4 Kbytes page from remote PPM 511 page_t * page = ppm_remote_alloc_pages( kcm_cxy , 0 ); 512 513 if( page == NULL ) 514 { 515 printk("\n[ERROR] in %s : failed to allocate page in cluster %x\n", 516 __FUNCTION__ , kcm_cxy ); 517 518 return NULL; 519 } 520 521 // get remote page base address 522 xptr_t base_xp = ppm_page2base( XPTR( kcm_cxy , page ) ); 523 524 // get local pointer on kcm_page 525 kcm_page = GET_PTR( base_xp ); 526 527 // initialize kcm_page descriptor 528 hal_remote_s32( XPTR( kcm_cxy , &kcm_page->count ) , 0 ); 529 hal_remote_s64( XPTR( kcm_cxy , &kcm_page->status ) , 0 ); 530 hal_remote_spt( XPTR( kcm_cxy , &kcm_page->kcm ) , kcm_ptr ); 531 hal_remote_spt( XPTR( kcm_cxy , &kcm_page->page ) , page ); 532 533 // introduce new page in remote KCM active_list 534 list_remote_add_first( kcm_cxy , &kcm_ptr->active_root , &kcm_page->list ); 535 hal_remote_atomic_add( XPTR( kcm_cxy , &kcm_ptr->active_pages_nr ) , 1 ); 536 } 537 538 return kcm_page; 539 540 } // end kcm_remote_get_page() 541 542 ///////////////////////////////////////// 543 void * kcm_remote_alloc( cxy_t kcm_cxy, 544 uint32_t order ) 545 { 546 kcm_t * kcm_ptr; 547 kcm_page_t * kcm_page; 548 void * block_ptr; 549 550 if( order < 6 ) order = 6; 551 552 assert( (order < 12) , "order = %d / must be less than 12" , order ); 553 554 // get local pointer on relevant KCM allocator 555 kcm_ptr = &LOCAL_CLUSTER->kcm[order - 6]; 556 557 // build extended pointer on remote KCM lock 558 xptr_t lock_xp = XPTR( kcm_cxy , &kcm_ptr->lock ); 559 560 // get lock 561 remote_busylock_acquire( lock_xp ); 562 563 // get a non-full kcm_page 564 kcm_page = kcm_remote_get_page( kcm_cxy , kcm_ptr ); 565 566 if( kcm_page == NULL ) 567 { 568 remote_busylock_release( lock_xp ); 569 return NULL; 331 570 } 332 571 333 572 // get a block from selected active page 334 // cannot fail, as an active page cannot be full... 335 ptr = kcm_get_block( kcm , kcm_page ); 573 block_ptr = kcm_remote_get_block( kcm_cxy , kcm_ptr , kcm_page ); 336 574 337 575 // release lock 338 busylock_release( &kcm->lock ); 339 340 return ptr; 576 remote_busylock_release( lock_xp ); 577 578 #if DEBUG_KCM_REMOTE 579 thread_t * this = CURRENT_THREAD; 580 uint32_t cycle = (uint32_t)hal_get_cycles(); 581 if( DEBUG_KCM_REMOTE < cycle ) 582 printk("\n[%s] thread[%x,%x] allocated block %x / order %d / kcm[%x,%x]\n", 583 __FUNCTION__, this->process->pid, this->trdid, block_ptr, order, kcm_cxy, kcm_ptr ); 584 #endif 585 586 return block_ptr; 587 588 } // end kcm_remote_alloc() 589 590 ///////////////////////////////////// 591 void kcm_remote_free( cxy_t kcm_cxy, 592 void * block_ptr ) 593 { 594 kcm_t * kcm_ptr; 595 kcm_page_t * kcm_page; 596 597 // check argument 598 assert( (block_ptr != NULL) , "block pointer cannot be NULL" ); 599 600 // get local pointer on remote KCM page 601 kcm_page = (kcm_page_t *)((intptr_t)block_ptr & ~CONFIG_PPM_PAGE_MASK); 602 603 // get local pointer on remote KCM 604 kcm_ptr = hal_remote_lpt( XPTR( kcm_cxy , &kcm_page->kcm ) ); 605 606 // build extended pointer on remote KCM lock 607 xptr_t lock_xp = XPTR( kcm_cxy , &kcm_ptr->lock ); 608 609 // get lock 610 remote_busylock_acquire( lock_xp ); 611 612 // release block 613 kcm_remote_put_block( kcm_cxy , kcm_ptr , kcm_page , block_ptr ); 614 615 // release lock 616 remote_busylock_release( lock_xp ); 617 618 #if DEBUG_KCM_REMOTE 619 thread_t * this = CURRENT_THREAD; 620 uint32_t cycle = (uint32_t)hal_get_cycles(); 621 uint32_t order = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->order ) ); 622 if( DEBUG_KCM_REMOTE < cycle ) 623 printk("\n[%s] thread[%x,%x] released block %x / order %d / kcm[%x,%x]\n", 624 __FUNCTION__, this->process->pid, this->trdid, block_ptr, order, kcm_cxy, kcm_ptr ); 625 #endif 626 627 } // end kcm_remote_free 628 629 ///////////////////////////////////////// 630 void kcm_remote_display( cxy_t kcm_cxy, 631 kcm_t * kcm_ptr ) 632 { 633 uint32_t order = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->order) ); 634 uint32_t full_pages_nr = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->full_pages_nr ) ); 635 uint32_t active_pages_nr = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->active_pages_nr ) ); 636 637 printk("*** KCM / cxy %x / order %d / full_pages %d / empty_pages %d / active_pages %d\n", 638 kcm_cxy, order, full_pages_nr, active_pages_nr ); 341 639 } 342 343 ///////////////////////////344 void kcm_free( void * ptr )345 {346 kcm_page_t * kcm_page;347 kcm_t * kcm;348 349 // check argument350 assert( (ptr != NULL) , "pointer cannot be NULL" );351 352 kcm_page = (kcm_page_t *)((intptr_t)ptr & ~CONFIG_PPM_PAGE_MASK);353 kcm = kcm_page->kcm;354 355 // get lock356 busylock_acquire( &kcm->lock );357 358 // release block359 kcm_put_block( kcm , kcm_page , ptr );360 361 // release lock362 busylock_release( &kcm->lock );363 }364 365 ////////////////////////////366 void kcm_print (kcm_t * kcm)367 {368 printk("*** KCM type = %s / free_pages = %d / busy_pages = %d / active_pages = %d\n",369 kmem_type_str( kcm->type ) ,370 kcm->free_pages_nr ,371 kcm->busy_pages_nr ,372 kcm->active_pages_nr );373 } -
trunk/kernel/mm/kcm.h
r619 r635 1 1 /* 2 * kcm.h - Per-clusterKernel Cache Manager definition.2 * kcm.h - Kernel Cache Manager definition. 3 3 * 4 * Authors Ghassan Almaless (2008,2009,2010,2011,2012) 5 * Alain Greiner (2016,2017,2018) 4 * Authors Alain Greiner (2016,2017,2018,2019) 6 5 * 7 6 * Copyright (c) UPMC Sorbonne Universites … … 33 32 #include <kmem.h> 34 33 34 35 #define KCM_PAGE_FULL 0 36 #define KCM_PAGE_EMPTY 1 37 #define KCM_PAGE_ACTIVE 2 38 35 39 /**************************************************************************************** 36 40 * This structure defines a generic Kernel Cache Manager, that is a block allocator, 37 * for fixed size objects. It exists a specific KCM allocator for each object type. 38 * The actual allocated block size is the smallest multiple of the KCM slot, that 39 * contain one single object. The KCM slot is 64 bytes, as it must be large enough 40 * to store the kcm_page descriptor, defined below. 41 * The various KCM allocators themselves are not statically allocated in the cluster 42 * manager, but are dynamically allocated when required, using the embedded KCM 43 * allocator defined in the cluster manager, to allocate the other ones... 41 * for fixed size objects. It exists in each cluster a specific KCM allocator for 42 * the following block sizes: 64, 128, 256, 512, 1024, 2048 bytes. 43 * These six KCM allocators are initialized by the cluster_init() function. 44 * 45 * Each KCM cache is implemented as a set o 4 Kbytes pages. A kcm_page is split in slots, 46 * where each slot can contain one block. in each kcm_page, the first slot (that cannot 47 * be smaller than 64 bytes) contains the kcm page descriptor, defined below 48 * 49 * To allow any thread running in any cluster to directly access the KCM of any cluster, 50 * ALMOS-MKH defines two sets of access functions, for local or remote access. 44 51 ***************************************************************************************/ 45 52 46 53 typedef struct kcm_s 47 54 { 48 busylock_t lock; /*! protect KCM ammocator */ 49 uint32_t block_size; /*! rounded block size (bytes) */ 50 uint32_t blocks_nr; /*! max number of blocks per page */ 55 remote_busylock_t lock; /*! protect KCM allocator */ 51 56 57 list_entry_t full_root; /*! root of full pages list */ 52 58 list_entry_t active_root; /*! root of active pages list */ 53 list_entry_t busy_root; /*! root of busy pages list */54 list_entry_t free_root; /*! root of free pages list */55 59 56 uint32_t free_pages_nr; /*! number of free pages */ 57 uint32_t busy_pages_nr; /*! number of busy pages */ 60 uint32_t full_pages_nr; /*! number of busy pages */ 58 61 uint32_t active_pages_nr; /*! number of active pages */ 59 62 60 uint32_t type; /*! KCM type */ 63 uint32_t order; /*! ln( block_size ) */ 64 uint32_t max_blocks; /*! max number of blocks per page */ 61 65 } 62 66 kcm_t; … … 65 69 /**************************************************************************************** 66 70 * This structure defines a KCM-page descriptor. 67 * A KCM-page contains at most (CONFIG_PPM_PAGE_SIZE / CONFIG_KCM_SLOT_SIZE) blocks. 68 * This kcm page descriptor is stored in the first slot of the page. 71 * A KCM-page contains at most (CONFIG_PPM_PAGE_SIZE / CONFIG_KCM_SLOT_SIZE) slots, 72 * and each slot contains one block. The kcm page descriptor is stored in first slot. 73 * The current allocation status is defined by the 64 bits "status" bit vector: each 74 * non zero bit defines an allocated block / "counts is the number of allocated blocks. 75 * Each kcm_page is registered in one of the two following page_list: 76 * - full : when count == max 77 * - active : count < max 69 78 ***************************************************************************************/ 70 79 71 80 typedef struct kcm_page_s 72 81 { 73 uint32_t bitmap[2]; /*! at most 64 blocks in a single page */ 74 list_entry_t list; /*! [active / busy / free] list member */ 75 kcm_t * kcm; /*! pointer on kcm allocator */ 76 page_t * page; /*! pointer on the physical page descriptor */ 77 uint32_t count; /*! number of allocated blocks */ 78 uint32_t busy; /*! page busy if non zero */ 79 uint32_t active; /*! page active if non zero */ 82 uint64_t status; /*! bit vector: non-zero == allocated */ 83 uint32_t count; /*! number of allocated blocks in page */ 84 list_entry_t list; /*! [active / busy / free] list member */ 85 kcm_t * kcm; /*! pointer on kcm allocator */ 86 page_t * page; /*! pointer on the physical page descriptor */ 80 87 } 81 88 kcm_page_t; 82 89 83 90 /**************************************************************************************** 84 * This function initializes a generic Kernel Cache Manager. 91 * This function must be called by a local thread. 92 * It initializes a Kernel Cache Manager, depending on block size. 85 93 **************************************************************************************** 86 94 * @ kcm : pointer on KCM manager to initialize. 87 * @ type : KCM allocator type.95 * @ order : ln(block_size). 88 96 ***************************************************************************************/ 89 97 void kcm_init( kcm_t * kcm, 90 uint32_t type);98 uint32_t order ); 91 99 92 100 /**************************************************************************************** 93 * This function releases all memory allocated to a generic Kernel Cache Manager. 101 * This function must be called by a local thread. 102 * It releases all memory allocated to a Kernel Cache Manager. 94 103 **************************************************************************************** 95 104 * @ kcm : pointer on KCM manager to destroy. … … 98 107 99 108 /**************************************************************************************** 100 * This function allocates one single object from a Kernel Cache Manager101 * The object size must be smaller than one page size.109 * This function must be called by a local thread. 110 * It allocates one block from the local Kernel Cache Manager. 102 111 **************************************************************************************** 103 * @ kcm : pointer on the selected KCM allocator112 * @ order : ln( block-size ) == KCM allocator identifier. 104 113 * @ return pointer on allocated block if success / return NULL if failure 105 114 ***************************************************************************************/ 106 void * kcm_alloc( kcm_t * kcm);115 void * kcm_alloc( uint32_t order ); 107 116 108 117 /**************************************************************************************** 109 * This function releases a previouly allocated block containing one object. 118 * This function must be called by a local thread. 119 * It releases a previouly allocated block to the local Kernel Cache Manager. 110 120 **************************************************************************************** 111 * @ ptr : local pointer on the allocated buffer.121 * @ block_ptr : local pointer on the released block. 112 122 ***************************************************************************************/ 113 void kcm_free( void *ptr );123 void kcm_free( void * block_ptr ); 114 124 115 125 /**************************************************************************************** 116 * This function prints KCM allocator state (for debug only). 126 * This function can be called by any thread running in any cluster. 127 * It allocates one fixed size block from a remote Kernel Cache Manager. 117 128 **************************************************************************************** 118 * @ kcm : local pointer on the selected KCM allocator. 129 * @ kcm_cxy : remote KCM cluster identifier. 130 * @ order : ln( block-size ) == KCM allocator identifier. 131 * @ return a local pointer on allocated block if success / return NULL if failure 119 132 ***************************************************************************************/ 120 void kcm_print( kcm_t * kcm ); 133 void * kcm_remote_alloc( cxy_t kcm_cxy, 134 uint32_t order ); 135 136 /**************************************************************************************** 137 * This function can be called by any thread running in any cluster. 138 * It releases a previouly allocated block to a remote Kernel Cache Manager. 139 **************************************************************************************** 140 * @ kcm_cxy : remote KCM cluster identifier. 141 * @ block_ptr : local pointer on the released buffer in remote cluster. 142 ***************************************************************************************/ 143 void kcm_remote_free( cxy_t kcm_cxy, 144 void * block_ptr ); 145 146 /**************************************************************************************** 147 * This debug function can be called by any thread running in any cluster. 148 * It diplays on TXT0 the current state of a local KCM allocator. 149 **************************************************************************************** 150 * @ kcm_cxy : remote KCM cluster identifier. 151 * @ kcm_ptr : local pointer on remote KCM. 152 ***************************************************************************************/ 153 void kcm_remote_display( cxy_t kcm_cxy, 154 kcm_t * kcm_ptr ); 121 155 122 156 #endif /* _KCM_H_ */ -
trunk/kernel/mm/khm.h
r619 r635 32 32 /******************************************************************************************* 33 33 * This structure defines a Kernel Heap Manager (KHM) in a given cluster. 34 * It is used to allocate memory objects, that too large, or not enough replicated34 * It is used to allocate memory objects, that are too large, or not enough replicated 35 35 * to use a dedicated KCM allocator. 36 36 ******************************************************************************************/ … … 54 54 { 55 55 uint32_t busy; /*! free block if zero */ 56 uint32_t size; /*! block size 56 uint32_t size; /*! block size */ 57 57 } 58 58 khm_block_t; -
trunk/kernel/mm/kmem.c
r619 r635 2 2 * kmem.c - kernel memory allocator implementation. 3 3 * 4 * Authors Ghassan Almaless (2008,2009,2010,2011,2012) 5 * Alain Greiner (2016,2017,2018) 4 * Authors Alain Greiner (2016,2017,2018,2019) 6 5 * 7 6 * Copyright (c) UPMC Sorbonne Universites … … 27 26 #include <hal_special.h> 28 27 #include <printk.h> 29 #include < busylock.h>28 #include <cluster.h> 30 29 #include <memcpy.h> 31 30 #include <khm.h> 32 31 #include <ppm.h> 32 #include <kcm.h> 33 33 #include <page.h> 34 #include <cluster.h>35 #include <thread.h>36 #include <process.h>37 #include <chdev.h>38 #include <mapper.h>39 #include <vfs.h>40 #include <fatfs.h>41 #include <ramfs.h>42 #include <user_dir.h>43 #include <remote_sem.h>44 #include <remote_barrier.h>45 #include <remote_mutex.h>46 #include <remote_condvar.h>47 #include <mapper.h>48 #include <grdxt.h>49 #include <vseg.h>50 34 #include <kmem.h> 51 52 /////////////////////////////////53 void kmem_print_kcm_table( void )54 {55 uint32_t index;56 kcm_t * kcm;57 cluster_t * cluster = LOCAL_CLUSTER;58 59 printk("\n *** KCM Pointers Table ***\n");60 61 for( index = 0 ; index < KMEM_TYPES_NR ; index++ )62 {63 kcm = cluster->kcm_tbl[index];64 if( kcm != NULL )65 {66 if( index == kcm->type )67 {68 printk(" - KCM[%s] (at address %x) is OK\n",69 kmem_type_str( index ) , (intptr_t)kcm );70 }71 else72 {73 printk(" - KCM[%s] (at address %x) is KO : has type %s\n",74 kmem_type_str( index ) , (intptr_t)kcm , kmem_type_str( kcm->type ) );75 }76 }77 }78 }79 80 /////////////////////////////////////////81 uint32_t kmem_type_size( uint32_t type )82 {83 if ( type == KMEM_PAGE ) return CONFIG_PPM_PAGE_SIZE;84 else if( type == KMEM_GENERIC ) return 0;85 else if( type == KMEM_KCM ) return sizeof( kcm_t );86 else if( type == KMEM_VSEG ) return sizeof( vseg_t );87 else if( type == KMEM_DEVICE ) return sizeof( chdev_t );88 else if( type == KMEM_MAPPER ) return sizeof( mapper_t );89 else if( type == KMEM_PROCESS ) return sizeof( process_t );90 else if( type == KMEM_CPU_CTX ) return CONFIG_CPU_CTX_SIZE;91 else if( type == KMEM_FPU_CTX ) return CONFIG_FPU_CTX_SIZE;92 else if( type == KMEM_GEN_BARRIER ) return sizeof( generic_barrier_t );93 94 else if( type == KMEM_SMP_BARRIER ) return sizeof( simple_barrier_t );95 else if( type == KMEM_DEVFS_CTX ) return sizeof( fatfs_ctx_t );96 else if( type == KMEM_FATFS_CTX ) return sizeof( fatfs_ctx_t );97 else if( type == KMEM_VFS_CTX ) return sizeof( vfs_ctx_t );98 else if( type == KMEM_VFS_INODE ) return sizeof( vfs_inode_t );99 else if( type == KMEM_VFS_DENTRY ) return sizeof( vfs_dentry_t );100 else if( type == KMEM_VFS_FILE ) return sizeof( vfs_file_t );101 else if( type == KMEM_SEM ) return sizeof( remote_sem_t );102 else if( type == KMEM_CONDVAR ) return sizeof( remote_condvar_t );103 else if( type == KMEM_MUTEX ) return sizeof( remote_mutex_t );104 105 else if( type == KMEM_DIR ) return sizeof( user_dir_t );106 else if( type == KMEM_512_BYTES ) return 512;107 108 else return 0;109 }110 111 /////////////////////////////////////112 char * kmem_type_str( uint32_t type )113 {114 if ( type == KMEM_PAGE ) return "KMEM_PAGE";115 else if( type == KMEM_GENERIC ) return "KMEM_GENERIC";116 else if( type == KMEM_KCM ) return "KMEM_KCM";117 else if( type == KMEM_VSEG ) return "KMEM_VSEG";118 else if( type == KMEM_DEVICE ) return "KMEM_DEVICE";119 else if( type == KMEM_MAPPER ) return "KMEM_MAPPER";120 else if( type == KMEM_PROCESS ) return "KMEM_PROCESS";121 else if( type == KMEM_CPU_CTX ) return "KMEM_CPU_CTX";122 else if( type == KMEM_FPU_CTX ) return "KMEM_FPU_CTX";123 else if( type == KMEM_GEN_BARRIER ) return "KMEM_GEN_BARRIER";124 125 else if( type == KMEM_SMP_BARRIER ) return "KMEM_SMP_BARRIER";126 else if( type == KMEM_DEVFS_CTX ) return "KMEM_DEVFS_CTX";127 else if( type == KMEM_FATFS_CTX ) return "KMEM_FATFS_CTX";128 else if( type == KMEM_VFS_CTX ) return "KMEM_VFS_CTX";129 else if( type == KMEM_VFS_INODE ) return "KMEM_VFS_INODE";130 else if( type == KMEM_VFS_DENTRY ) return "KMEM_VFS_DENTRY";131 else if( type == KMEM_VFS_FILE ) return "KMEM_VFS_FILE";132 else if( type == KMEM_SEM ) return "KMEM_SEM";133 else if( type == KMEM_CONDVAR ) return "KMEM_CONDVAR";134 else if( type == KMEM_MUTEX ) return "KMEM_MUTEX";135 136 else if( type == KMEM_DIR ) return "KMEM_DIR";137 else if( type == KMEM_512_BYTES ) return "KMEM_512_BYTES";138 139 else return "undefined";140 }141 142 /////////////////////////////////////////////////////////////////////////////////////////////143 // This static function dynamically allocates and initializes a specific KCM allocator.144 // It uses the KCM allocator embedded in cluster manager, initialized by cluster_init().145 /////////////////////////////////////////////////////////////////////////////////////////////146 static error_t kmem_create_kcm( uint32_t type )147 {148 kcm_t * kcm;149 150 assert( ((type > 1) && (type < KMEM_TYPES_NR) ) , "illegal KCM type" );151 152 #if DEBUG_KMEM153 thread_t * this = CURRENT_THREAD;154 uint32_t cycle = (uint32_t)hal_get_cycles();155 if( DEBUG_KMEM < cycle )156 printk("\n[%s] thread[%x,%x] enter / KCM type %s missing in cluster %x / cycle %d\n",157 __FUNCTION__, this->process->pid, this->trdid, kmem_type_str( type ), local_cxy, cycle );158 #endif159 160 cluster_t * cluster = LOCAL_CLUSTER;161 162 // allocate memory for the requested KCM allocator163 // from the KCM allocator embedded in cluster descriptor164 kcm = kcm_alloc( &cluster->kcm );165 166 if( kcm == NULL )167 {168 printk("\n[ERROR] in %s : failed to create KCM type %d in cluster %x\n",169 __FUNCTION__ , type , local_cxy );170 return ENOMEM;171 }172 173 // initialize the new KCM allocator174 kcm_init( kcm , type );175 176 // register it in the KCM pointers Table177 cluster->kcm_tbl[type] = kcm;178 179 hal_fence();180 181 #if DEBUG_KMEM182 cycle = (uint32_t)hal_get_cycles();183 if( DEBUG_KMEM < cycle )184 printk("\n[%s] thread[%x,%x] exit / cycle %d\n",185 __FUNCTION__, this->process->pid, this->trdid, cycle );186 #endif187 188 return 0;189 }190 35 191 36 ///////////////////////////////////// 192 37 void * kmem_alloc( kmem_req_t * req ) 193 38 { 194 cluster_t * cluster = LOCAL_CLUSTER; 195 196 uint32_t type; 197 uint32_t flags; 198 uint32_t size; // ln( pages ) if PPM / bytes if KHM / unused if KCM 199 void * ptr; // memory buffer if KHM or KCM / page descriptor if PPM 39 uint32_t type; // KMEM_PPM / KMEM_KCM / KMEM_KHM 40 uint32_t flags; // AF_NONE / AF_ZERO / AF_KERNEL 41 uint32_t order; // PPM: ln(pages) / KCM: ln(bytes) / KHM: bytes 200 42 201 43 type = req->type; 202 size = req->size;44 order = req->order; 203 45 flags = req->flags; 204 46 205 assert( (type < KMEM_TYPES_NR) , "illegal KMEM request type" ); 47 ////////////////////////////////// PPM 48 if( type == KMEM_PPM ) 49 { 50 // allocate the number of requested pages 51 page_t * page_ptr = (void *)ppm_alloc_pages( order ); 52 53 if( page_ptr == NULL ) 54 { 55 printk("\n[ERROR] in %s : PPM failed / order %d / cluster %x\n", 56 __FUNCTION__ , order , local_cxy ); 57 return NULL; 58 } 59 60 xptr_t page_xp = XPTR( local_cxy , page_ptr ); 61 62 // reset page if requested 63 if( flags & AF_ZERO ) page_zero( page_ptr ); 64 65 // get pointer on buffer from the page descriptor 66 void * ptr = GET_PTR( ppm_page2base( page_xp ) ); 206 67 207 68 #if DEBUG_KMEM 208 thread_t * this = CURRENT_THREAD;209 uint32_t cycle = (uint32_t)hal_get_cycles();69 thread_t * this = CURRENT_THREAD; 70 uint32_t cycle = (uint32_t)hal_get_cycles(); 210 71 if( DEBUG_KMEM < cycle ) 211 printk("\n[%s] thread [%x,%x] enter / %s / size %d / cluster %x / cycle %d\n", 72 printk("\n[%s] thread[%x,%x] from PPM / %d page(s) / ppn %x / cxy %x / cycle %d\n", 73 __FUNCTION__, this->process->pid, this->trdid, 74 1<<order, ppm_page2ppn(XPTR(local_cxy,ptr)), local_cxy, cycle ); 75 #endif 76 return ptr; 77 } 78 ///////////////////////////////////// KCM 79 else if( type == KMEM_KCM ) 80 { 81 // allocate memory from KCM 82 void * ptr = kcm_alloc( order ); 83 84 if( ptr == NULL ) 85 { 86 printk("\n[ERROR] in %s : KCM failed / order %d / cluster %x\n", 87 __FUNCTION__ , order , local_cxy ); 88 return NULL; 89 } 90 91 // reset memory if requested 92 if( flags & AF_ZERO ) memset( ptr , 0 , 1<<order ); 93 94 #if DEBUG_KMEM 95 thread_t * this = CURRENT_THREAD; 96 uint32_t cycle = (uint32_t)hal_get_cycles(); 97 if( DEBUG_KMEM < cycle ) 98 printk("\n[%s] thread [%x,%x] from KCM / %d bytes / base %x / cxy %x / cycle %d\n", 99 __FUNCTION__, this->process->pid, this->trdid, 100 1<<order, ptr, local_cxy, cycle ); 101 #endif 102 return ptr; 103 } 104 //////////////////////////////////// KHM 105 else if( type == KMEM_KHM ) 106 { 107 // allocate memory from KHM 108 void * ptr = khm_alloc( &LOCAL_CLUSTER->khm , order ); 109 110 if( ptr == NULL ) 111 { 112 printk("\n[ERROR] in %s : KHM failed / order %d / cluster %x\n", 113 __FUNCTION__ , order , local_cxy ); 114 return NULL; 115 } 116 117 // reset memory if requested 118 if( flags & AF_ZERO ) memset( ptr , 0 , order ); 119 120 #if DEBUG_KMEM 121 thread_t * this = CURRENT_THREAD; 122 uint32_t cycle = (uint32_t)hal_get_cycles(); 123 if( DEBUG_KMEM < cycle ) 124 printk("\n[%s] thread[%x,%x] from KHM / %d bytes / base %x / cxy %x / cycle %d\n", 212 125 __FUNCTION__, this->process->pid, this->trdid, 213 kmem_type_str( type ), size, local_cxy, cycle ); 214 #endif 215 216 // analyse request type 217 if( type == KMEM_PAGE ) // PPM allocator 218 { 219 // allocate the number of requested pages 220 ptr = (void *)ppm_alloc_pages( size ); 221 if( ptr == NULL ) 222 { 223 printk("\n[ERROR] in %s : failed for type %d / size %d in cluster %x\n", 224 __FUNCTION__ , type , size , local_cxy ); 225 return NULL; 226 } 227 228 // reset page if requested 229 if( flags & AF_ZERO ) page_zero( (page_t *)ptr ); 230 231 #if DEBUG_KMEM 232 cycle = (uint32_t)hal_get_cycles(); 233 if( DEBUG_KMEM < cycle ) 234 printk("\n[%s] thread[%x,%x] exit / %d page(s) allocated / ppn %x / cycle %d\n", 235 __FUNCTION__, this->process->pid, this->trdid, 236 1<<size, ppm_page2ppn(XPTR(local_cxy,ptr)), cycle ); 237 #endif 238 239 } 240 else if( type == KMEM_GENERIC ) // KHM allocator 241 { 242 // allocate memory from KHM 243 ptr = khm_alloc( &cluster->khm , size ); 244 if( ptr == NULL ) 245 { 246 printk("\n[ERROR] in %s : failed for type %d / size %d in cluster %x\n", 247 __FUNCTION__ , type , size , local_cxy ); 248 return NULL; 249 } 250 251 // reset memory if requested 252 if( flags & AF_ZERO ) memset( ptr , 0 , size ); 253 254 #if DEBUG_KMEM 255 cycle = (uint32_t)hal_get_cycles(); 256 if( DEBUG_KMEM < cycle ) 257 printk("\n[%s] thread[%x,%x] exit / type %s allocated / base %x / size %d / cycle %d\n", 258 __FUNCTION__, this->process->pid, this->trdid, 259 kmem_type_str( type ), (intptr_t)ptr, size, cycle ); 260 #endif 261 262 } 263 else // KCM allocator 264 { 265 // initialize the KCM allocator if not already done 266 if( cluster->kcm_tbl[type] == NULL ) 267 { 268 // get lock protecting local kcm_tbl[] array 269 busylock_acquire( &cluster->kcm_lock ); 270 271 // create missing KCM 272 error_t error = kmem_create_kcm( type ); 273 274 // release lock protecting local kcm_tbl[] array 275 busylock_release( &cluster->kcm_lock ); 276 277 if ( error ) 278 { 279 printk("\n[ERROR] in %s : cannot create KCM type %d in cluster %x\n", 280 __FUNCTION__, type, local_cxy ); 281 return NULL; 282 } 283 } 284 285 // allocate memory from KCM 286 ptr = kcm_alloc( cluster->kcm_tbl[type] ); 287 if( ptr == NULL ) 288 { 289 printk("\n[ERROR] in %s : failed for type %d / size %d in cluster %x\n", 290 __FUNCTION__ , type , size , local_cxy ); 291 return NULL; 292 } 293 294 // reset memory if requested 295 if( flags & AF_ZERO ) memset( ptr , 0 , kmem_type_size( type ) ); 296 297 #if DEBUG_KMEM 298 cycle = (uint32_t)hal_get_cycles(); 299 if( DEBUG_KMEM < cycle ) 300 printk("\n[%s] thread [%x,%x] exit / type %s allocated / base %x / size %d / cycle %d\n", 301 __FUNCTION__, this->process->pid, this->trdid, kmem_type_str(type), (intptr_t)ptr, 302 kmem_type_size(type), cycle ); 303 #endif 304 305 } 306 307 return ptr; 308 } 126 order, ptr, local_cxy, cycle ); 127 #endif 128 return ptr; 129 } 130 else 131 { 132 printk("\n[ERROR] in %s : illegal allocator type\n", __FUNCTION__); 133 return NULL; 134 } 135 } // end kmem_alloc() 309 136 310 137 ////////////////////////////////// 311 138 void kmem_free( kmem_req_t * req ) 312 139 { 313 if( req->type >= KMEM_TYPES_NR ) 314 { 315 assert( false , "illegal request type\n" ); 316 } 317 318 switch(req->type) 319 { 320 case KMEM_PAGE: 321 ppm_free_pages( (page_t*)req->ptr ); 322 return; 323 324 case KMEM_GENERIC: 325 khm_free( req->ptr ); 326 return; 327 328 default: 329 kcm_free( req->ptr ); 330 return; 331 } 332 } 333 140 uint32_t type = req->type; 141 142 if( type == KMEM_PPM ) 143 { 144 page_t * page = GET_PTR( ppm_base2page( XPTR( local_cxy , req->ptr ) ) ); 145 146 ppm_free_pages( page ); 147 } 148 else if( type == KMEM_KCM ) 149 { 150 kcm_free( req->ptr ); 151 } 152 else if( type == KMEM_KHM ) 153 { 154 khm_free( req->ptr ); 155 } 156 else 157 { 158 printk("\n[ERROR] in %s : illegal allocator type\n", __FUNCTION__); 159 } 160 } // end kmem_free() 161 162 /////////////////////////////////////////// 163 void * kmem_remote_alloc( cxy_t cxy, 164 kmem_req_t * req ) 165 { 166 uint32_t type; // KMEM_PPM / KMEM_KCM / KMEM_KHM 167 uint32_t flags; // AF_ZERO / AF_KERNEL / AF_NONE 168 uint32_t order; // PPM: ln(pages) / KCM: ln(bytes) / KHM: bytes 169 170 type = req->type; 171 order = req->order; 172 flags = req->flags; 173 174 ///////////////////////////////// PPM 175 if( type == KMEM_PPM ) 176 { 177 // allocate the number of requested pages 178 page_t * page_ptr = ppm_remote_alloc_pages( cxy , order ); 179 180 if( page_ptr == NULL ) 181 { 182 printk("\n[ERROR] in %s : failed for PPM / order %d in cluster %x\n", 183 __FUNCTION__ , order , cxy ); 184 return NULL; 185 } 186 187 xptr_t page_xp = XPTR( cxy , page_ptr ); 188 189 // get pointer on buffer from the page descriptor 190 xptr_t base_xp = ppm_page2base( page_xp ); 191 192 // reset page if requested 193 if( flags & AF_ZERO ) hal_remote_memset( base_xp , 0 , CONFIG_PPM_PAGE_SIZE ); 194 195 void * ptr = GET_PTR( base_xp ); 196 197 #if DEBUG_KMEM_REMOTE 198 thread_t * this = CURRENT_THREAD; 199 uint32_t cycle = (uint32_t)hal_get_cycles(); 200 if( DEBUG_KMEM_REMOTE < cycle ) 201 printk("\n[%s] thread[%x,%x] from PPM / %d page(s) / ppn %x / cxy %x / cycle %d\n", 202 __FUNCTION__, this->process->pid, this->trdid, 203 1<<order, ppm_page2ppn(XPTR(local_cxy,ptr)), cxy, cycle ); 204 #endif 205 return ptr; 206 } 207 /////////////////////////////////// KCM 208 else if( type == KMEM_KCM ) 209 { 210 // allocate memory from KCM 211 void * ptr = kcm_remote_alloc( cxy , order ); 212 213 if( ptr == NULL ) 214 { 215 printk("\n[ERROR] in %s : failed for KCM / order %d in cluster %x\n", 216 __FUNCTION__ , order , cxy ); 217 return NULL; 218 } 219 220 // reset memory if requested 221 if( flags & AF_ZERO ) hal_remote_memset( XPTR( cxy , ptr ) , 0 , 1<<order ); 222 223 #if DEBUG_KMEM_REMOTE 224 thread_t * this = CURRENT_THREAD; 225 uint32_t cycle = (uint32_t)hal_get_cycles(); 226 if( DEBUG_KMEM_REMOTE < cycle ) 227 printk("\n[%s] thread [%x,%x] from KCM / %d bytes / base %x / cxy %x / cycle %d\n", 228 __FUNCTION__, this->process->pid, this->trdid, 229 1<<order, ptr, cxy, cycle ); 230 #endif 231 return ptr; 232 } 233 /////////////////////////////////// KHM 234 else if( type == KMEM_KHM ) 235 { 236 printk("\n[ERROR] in %s : remote access not supported for KHM\n", __FUNCTION__ ); 237 return NULL; 238 } 239 else 240 { 241 printk("\n[ERROR] in %s : illegal allocator type\n", __FUNCTION__); 242 return NULL; 243 } 244 } // kmem_remote_malloc() 245 246 //////////////////////////////////////// 247 void kmem_remote_free( cxy_t cxy, 248 kmem_req_t * req ) 249 { 250 uint32_t type = req->type; 251 252 if( type == KMEM_PPM ) 253 { 254 page_t * page = GET_PTR( ppm_base2page( XPTR( cxy , req->ptr ) ) ); 255 256 ppm_remote_free_pages( cxy , page ); 257 } 258 else if( type == KMEM_KCM ) 259 { 260 kcm_remote_free( cxy , req->ptr ); 261 } 262 else if( type == KMEM_KHM ) 263 { 264 printk("\n[ERROR] in %s : remote access not supported for KHM\n", __FUNCTION__ ); 265 } 266 else 267 { 268 printk("\n[ERROR] in %s : illegal allocator type\n", __FUNCTION__); 269 } 270 } // end kmem_remote_free() 271 272 273 -
trunk/kernel/mm/kmem.h
r619 r635 2 2 * kmem.h - kernel unified memory allocator interface 3 3 * 4 * Authors Ghassan Almaless (2008,2009,2010,2011,2012) 5 * Alain Greiner (2016,2017,2018) 4 * Authors Alain Greiner (2016,2017,2018,2019) 6 5 * 7 6 * Copyright (c) UPMC Sorbonne Universites … … 30 29 31 30 /************************************************************************************* 32 * This enum defines the Kernel Memory Types for dynamically allocated objects. 33 * WARNING : this enum must be kepts consistent with use in kmem.c file. 31 * This enum defines the three Kernel Memory Allocaror types: 34 32 ************************************************************************************/ 35 33 36 34 enum 37 35 { 38 KMEM_PAGE = 0, /*! reserved for PPM allocator */ 39 KMEM_GENERIC = 1, /*! reserved for KHM allocator */ 40 KMEM_KCM = 2, /*! kcm_t */ 41 KMEM_VSEG = 3, /*! vseg_t */ 42 KMEM_DEVICE = 4, /*! device_t */ 43 KMEM_MAPPER = 5, /*! mapper_t */ 44 KMEM_PROCESS = 6, /*! process_t */ 45 KMEM_CPU_CTX = 7, /*! hal_cpu_context_t */ 46 KMEM_FPU_CTX = 8, /*! hal_fpu_context_t */ 47 KMEM_GEN_BARRIER = 9, /*! generi_cbarrier_t */ 48 49 KMEM_SMP_BARRIER = 10, /*! simple_barrier_t */ 50 KMEM_DEVFS_CTX = 11, /*! fatfs_inode_t */ 51 KMEM_FATFS_CTX = 12, /*! fatfs_ctx_t */ 52 KMEM_VFS_CTX = 13, /*! vfs_context_t */ 53 KMEM_VFS_INODE = 14, /*! vfs_inode_t */ 54 KMEM_VFS_DENTRY = 15, /*! vfs_dentry_t */ 55 KMEM_VFS_FILE = 16, /*! vfs_file_t */ 56 KMEM_SEM = 17, /*! remote_sem_t */ 57 KMEM_CONDVAR = 18, /*! remote_condvar_t */ 58 KMEM_MUTEX = 19, /*! remote_mutex_t */ 59 60 KMEM_DIR = 20, /*! remote_dir_t */ 61 KMEM_512_BYTES = 21, /*! 512 bytes aligned */ 62 63 KMEM_TYPES_NR = 22, 36 KMEM_PPM = 0, /*! PPM allocator */ 37 KMEM_KCM = 1, /*! KCM allocator */ 38 KMEM_KHM = 2, /*! KHM allocator */ 64 39 }; 65 40 … … 79 54 typedef struct kmem_req_s 80 55 { 81 uint32_t type; /*! request type*/82 uint32_t size; /*! ln2(nb_pages) if PPM / bytes if KHM / unused by KCM*/56 uint32_t type; /*! KMEM_PPM / KMEM_KCM / KMEM_KHM */ 57 uint32_t order; /*! PPM: ln2(pages) / KCM: ln2(bytes) / KHM: bytes */ 83 58 uint32_t flags; /*! request attributes */ 84 59 void * ptr; /*! local pointer on allocated buffer (only used by free) */ … … 87 62 88 63 /************************************************************************************* 89 * Th is generic function allocates physical memory in the localcluster90 * as specified by the request descriptor.91 * It uses three specialised physical memory allocators, depending on request type:92 * - PPM (Physical Pages Manager) allocates N contiguous physical pages,93 * N must be a power of 2.94 * - K HM (Kernel Heap Manager) allocates a physical memory buffer,95 * that can have anysize.96 * - K CM (Kernel Cache Manager) allocates various fixed size objects,97 * handling a dedicated cache for each object type.64 * These two functions allocate physical memory in a local or remote cluster 65 * as specified by the kmem_req_t request descriptor, and return a local pointer 66 * on the allocated buffer. It uses three specialised physical memory allocators: 67 * - PPM (Physical Pages Manager) allocates N contiguous small physical pages. 68 * N is a power of 2, and req.order = ln(N). Implement the buddy algorithm. 69 * - KCM (Kernel Cache Manager) allocates aligned blocks of M bytes from a cache. 70 * M is a power of 2, and req.order = ln( M ). One cache per block size. 71 * - KHM (Kernel Heap Manager) allocates physical memory buffers of M bytes, 72 * M can have any value, and req.order = M. 98 73 ************************************************************************************* 99 * @ req : local pointer to allocation request. 100 * @ return a local pointer on page descriptor if KMEM_PAGE. 101 * return a local pointer to allocated buffer if KCM or KHM. 102 * return NULL if no physical memory available. 74 * @ cxy : target cluster identifier for a remote access. 75 * @ req : local pointer on allocation request. 76 * @ return local pointer on allocated buffer if success / return NULL if no memory. 103 77 ************************************************************************************/ 104 78 void * kmem_alloc( kmem_req_t * req ); 105 79 80 void * kmem_remote_alloc( cxy_t cxy, 81 kmem_req_t * req ); 82 106 83 /************************************************************************************* 107 * Th is function releasespreviously allocated physical memory, as specified108 * by the "type" and "ptr" fiels of the kmem-req_t request.84 * These two functions release previously allocated physical memory, as specified 85 * by the <type> and <ptr> fields of the kmem_req_t request descriptor. 109 86 ************************************************************************************* 87 * @ cxy : target cluster identifier for a remote access. 110 88 * @ req : local pointer to request descriptor. 111 89 ************************************************************************************/ 112 90 void kmem_free ( kmem_req_t * req ); 113 91 114 /************************************************************************************* 115 * This function returns a printable string for a kmem object type. 116 ************************************************************************************* 117 * @ type : kmem object type. 118 ************************************************************************************/ 119 char * kmem_type_str( uint32_t type ); 120 121 /************************************************************************************* 122 * This function returns the size (bytes) for a kmem object type. 123 ************************************************************************************* 124 * @ type : kmem object type. 125 ************************************************************************************/ 126 uint32_t kmem_type_size( uint32_t type ); 127 128 /************************************************************************************* 129 * This function displays the content of the KCM pointers Table 130 ************************************************************************************/ 131 void kmem_print_kcm_table( void ); 92 void kmem_remote_free( cxy_t cxy, 93 kmem_req_t * req ); 132 94 133 95 -
trunk/kernel/mm/mapper.c
r628 r635 52 52 error_t error; 53 53 54 // allocate memory for mapper 55 req.type = KMEM_ MAPPER;56 req. size = sizeof(mapper_t);54 // allocate memory for mapper descriptor 55 req.type = KMEM_KCM; 56 req.order = bits_log2( sizeof(mapper_t) ); 57 57 req.flags = AF_KERNEL | AF_ZERO; 58 mapper = (mapper_t *)kmem_alloc( &req );58 mapper = kmem_alloc( &req ); 59 59 60 60 if( mapper == NULL ) … … 73 73 CONFIG_MAPPER_GRDXT_W2, 74 74 CONFIG_MAPPER_GRDXT_W3 ); 75 76 75 if( error ) 77 76 { 78 77 printk("\n[ERROR] in %s : cannot initialize radix tree\n", __FUNCTION__ ); 79 req.type = KMEM_ MAPPER;78 req.type = KMEM_KCM; 80 79 req.ptr = mapper; 81 80 kmem_free( &req ); … … 117 116 { 118 117 // remove page from mapper and release to PPM 119 mapper_re lease_page( mapper, page );118 mapper_remote_release_page( XPTR( local_cxy , mapper ) , page ); 120 119 121 120 // update start_key value for next page … … 129 128 130 129 // release memory for mapper descriptor 131 req.type = KMEM_ MAPPER;130 req.type = KMEM_KCM; 132 131 req.ptr = mapper; 133 132 kmem_free( &req ); 134 133 135 134 } // end mapper_destroy() 135 136 //////////////////////////////////////////////////////// 137 error_t mapper_remote_handle_miss( xptr_t mapper_xp, 138 uint32_t page_id, 139 xptr_t * page_xp_ptr ) 140 { 141 error_t error; 142 143 thread_t * this = CURRENT_THREAD; 144 145 // get target mapper cluster and local pointer 146 cxy_t mapper_cxy = GET_CXY( mapper_xp ); 147 mapper_t * mapper_ptr = GET_PTR( mapper_xp ); 148 149 #if DEBUG_MAPPER_HANDLE_MISS 150 uint32_t cycle = (uint32_t)hal_get_cycles(); 151 char name[CONFIG_VFS_MAX_NAME_LENGTH]; 152 vfs_inode_t * inode = mapper->inode; 153 if( (DEBUG_MAPPER_HANDLE_MISS < cycle) && (inode != NULL) ) 154 { 155 vfs_inode_get_name( XPTR( local_cxy , inode ) , name ); 156 printk("\n[%s] thread[%x,%x] enter for page %d in <%s> / cluster %x / cycle %d", 157 __FUNCTION__, this->process->pid, this->trdid, page_id, name, mapper_cxy, cycle ); 158 if( DEBUG_MAPPER_HANDLE_MISS & 1 ) grdxt_display( XPTR(local_cxy,&mapper->rt), name ); 159 } 160 if( (DEBUG_MAPPER_HANDLE_MISS < cycle) && (inode == NULL) ) 161 { 162 printk("\n[%s] thread[%x,%x] enter for page %d in FAT / cluster %x / cycle %d", 163 __FUNCTION__, this->process->pid, this->trdid, page_id, mapper_cxy, cycle ); 164 if( DEBUG_MAPPER_HANDLE_MISS & 1 ) grdxt_display( XPTR(local_cxy,&mapper->rt), "FAT" ); 165 } 166 #endif 167 168 // allocate one 4 Kbytes page from the remote mapper cluster 169 page_t * page_ptr = ppm_remote_alloc_pages( mapper_cxy , 0 ); 170 171 if( page_ptr == NULL ) 172 { 173 printk("\n[ERROR] in %s : thread [%x,%x] cannot allocate page in cluster %x\n", 174 __FUNCTION__ , this->process->pid, this->trdid , mapper_cxy ); 175 return -1; 176 } 177 178 // build extended pointer on new page descriptor 179 xptr_t page_xp = XPTR( mapper_cxy , page_ptr ); 180 181 // initialize the page descriptor 182 page_remote_init( page_xp ); 183 184 hal_remote_s32( XPTR( mapper_cxy , &page_ptr->refcount ) , 1 ); 185 hal_remote_s32( XPTR( mapper_cxy , &page_ptr->index ) , page_id ); 186 hal_remote_spt( XPTR( mapper_cxy , &page_ptr->mapper ) , mapper_ptr ); 187 hal_remote_s32( XPTR( mapper_cxy , &page_ptr->flags ) , PG_INIT ); 188 189 // insert page in mapper radix tree 190 error = grdxt_remote_insert( XPTR( mapper_cxy , &mapper_ptr->rt), 191 page_id, 192 page_ptr ); 193 194 if( error ) 195 { 196 printk("\n[ERROR] in %s : thread[%x,%x] cannot insert page in mapper\n", 197 __FUNCTION__ , this->process->pid, this->trdid ); 198 ppm_remote_free_pages( mapper_cxy , page_ptr ); 199 return -1; 200 } 201 202 // launch I/O operation to load page from IOC device to mapper 203 error = vfs_fs_move_page( page_xp , IOC_SYNC_READ ); 204 205 if( error ) 206 { 207 printk("\n[ERROR] in %s : thread[%x,%x] cannot load page from device\n", 208 __FUNCTION__ , this->process->pid, this->trdid ); 209 mapper_remote_release_page( mapper_xp , page_ptr ); 210 return -1; 211 } 212 213 // return extended pointer on allocated page 214 *page_xp_ptr = page_xp; 215 216 #if DEBUG_MAPPER_HANDLE_MISS 217 cycle = (uint32_t)hal_get_cycles(); 218 if( (DEBUG_MAPPER_HANDLE_MISS < cycle) && (inode != NULL) ) 219 { 220 printk("\n[%s] thread[%x,%x] exit for page %d in <%s> / ppn %x / cycle %d", 221 __FUNCTION__, this->process->pid, this->trdid, 222 page_id, name, ppm_page2ppn( page_xp ), cycle ); 223 if( DEBUG_MAPPER_HANDLE_MISS & 1 ) grdxt_display( XPTR(local_cxy,&mapper->rt) , name ); 224 } 225 if( (DEBUG_MAPPER_HANDLE_MISS < cycle) && (inode == NULL) ) 226 { 227 printk("\n[%s] thread[%x,%x] exit for page %d in FAT / ppn %x / cycle %d", 228 __FUNCTION__, this->process->pid, this->trdid, 229 page_id, ppm_page2ppn( page_xp ), cycle ); 230 if( DEBUG_MAPPER_HANDLE_MISS & 1 ) grdxt_display( XPTR(local_cxy,&mapper->rt ), "FAT" ); 231 } 232 #endif 233 234 return 0; 235 236 } // end mapper_remote_handle_miss() 136 237 137 238 //////////////////////////////////////////////////// … … 183 284 184 285 // test mapper miss 185 if( page_xp == XPTR_NULL ) // miss => try tohandle it286 if( page_xp == XPTR_NULL ) // miss => handle it 186 287 { 187 288 // release the lock in READ_MODE and take it in WRITE_MODE … … 196 297 if ( page_xp == XPTR_NULL ) // miss confirmed => handle it 197 298 { 198 199 if( mapper_cxy == local_cxy ) // mapper is local 200 { 201 202 #if (DEBUG_MAPPER_GET_PAGE & 1) 203 if( DEBUG_MAPPER_GET_PAGE < cycle ) 204 printk("\n[%s] missing page => load it from FS / local access \n", __FUNCTION__ ); 205 #endif 206 error = mapper_handle_miss( mapper_ptr, 207 page_id, 208 &page_xp ); 209 } 210 else 211 { 212 213 #if (DEBUG_MAPPER_GET_PAGE & 1) 214 if( DEBUG_MAPPER_GET_PAGE < cycle ) 215 printk("\n[%s] missing page => load it from FS / RPC access \n", __FUNCTION__ ); 216 #endif 217 rpc_mapper_handle_miss_client( mapper_cxy, 218 mapper_ptr, 219 page_id, 220 &page_xp, 221 &error ); 222 } 223 224 if ( error ) 299 error = mapper_remote_handle_miss( mapper_xp, 300 page_id, 301 &page_xp ); 302 if( error ) 225 303 { 226 304 printk("\n[ERROR] in %s : thread[%x,%x] cannot handle mapper miss\n", … … 230 308 } 231 309 } 310 311 #if (DEBUG_MAPPER_GET_PAGE & 1) 312 if( DEBUG_MAPPER_GET_PAGE < cycle ) 313 printk("\n[%s] thread[%x,%x] load missing page from FS : ppn %x\n", 314 __FUNCTION__, this->process->pid, this->trdid, ppm_page2ppn(page_xp) ); 315 #endif 232 316 233 317 // release mapper lock from WRITE_MODE … … 260 344 } // end mapper_remote_get_page() 261 345 262 ////////////////////////////////////////////// 263 error_t mapper_handle_miss( mapper_t * mapper, 264 uint32_t page_id, 265 xptr_t * page_xp ) 266 { 267 kmem_req_t req; 268 page_t * page; 269 error_t error; 270 271 thread_t * this = CURRENT_THREAD; 272 273 #if DEBUG_MAPPER_HANDLE_MISS 274 uint32_t cycle = (uint32_t)hal_get_cycles(); 275 char name[CONFIG_VFS_MAX_NAME_LENGTH]; 276 vfs_inode_t * inode = mapper->inode; 277 if( (DEBUG_MAPPER_HANDLE_MISS < cycle) && (inode != NULL) ) 278 { 279 vfs_inode_get_name( XPTR( local_cxy , inode ) , name ); 280 printk("\n[%s] thread[%x,%x] enter for page %d in <%s> / cycle %d", 281 __FUNCTION__, this->process->pid, this->trdid, page_id, name, cycle ); 282 if( DEBUG_MAPPER_HANDLE_MISS & 1 ) grdxt_display( XPTR(local_cxy,&mapper->rt), name ); 283 } 284 if( (DEBUG_MAPPER_HANDLE_MISS < cycle) && (inode == NULL) ) 285 { 286 printk("\n[%s] thread[%x,%x] enter for page %d in FAT / cycle %d", 287 __FUNCTION__, this->process->pid, this->trdid, page_id, cycle ); 288 if( DEBUG_MAPPER_HANDLE_MISS & 1 ) grdxt_display( XPTR(local_cxy,&mapper->rt), "FAT" ); 289 } 290 #endif 291 292 // allocate one page from the local cluster 293 req.type = KMEM_PAGE; 294 req.size = 0; 295 req.flags = AF_NONE; 296 page = kmem_alloc( &req ); 297 298 if( page == NULL ) 299 { 300 printk("\n[ERROR] in %s : thread [%x,%x] cannot allocate page in cluster %x\n", 301 __FUNCTION__ , this->process->pid, this->trdid , local_cxy ); 302 return -1; 303 } 304 305 // initialize the page descriptor 306 page_init( page ); 307 page_set_flag( page , PG_INIT ); 308 page_refcount_up( page ); 309 page->mapper = mapper; 310 page->index = page_id; 311 312 // insert page in mapper radix tree 313 error = grdxt_insert( &mapper->rt , page_id , page ); 314 315 if( error ) 316 { 317 printk("\n[ERROR] in %s : thread[%x,%x] cannot insert page in mapper\n", 318 __FUNCTION__ , this->process->pid, this->trdid ); 319 mapper_release_page( mapper , page ); 320 req.ptr = page; 321 req.type = KMEM_PAGE; 322 kmem_free(&req); 323 return -1; 324 } 325 326 // launch I/O operation to load page from IOC device to mapper 327 error = vfs_fs_move_page( XPTR( local_cxy , page ) , IOC_SYNC_READ ); 328 329 if( error ) 330 { 331 printk("\n[ERROR] in %s : thread[%x,%x] cannot load page from device\n", 332 __FUNCTION__ , this->process->pid, this->trdid ); 333 mapper_release_page( mapper , page ); 334 req.ptr = page; 335 req.type = KMEM_PAGE; 336 kmem_free( &req ); 337 return -1; 338 } 339 340 // set extended pointer on allocated page 341 *page_xp = XPTR( local_cxy , page ); 342 343 #if DEBUG_MAPPER_HANDLE_MISS 344 cycle = (uint32_t)hal_get_cycles(); 345 if( (DEBUG_MAPPER_HANDLE_MISS < cycle) && (inode != NULL) ) 346 { 347 printk("\n[%s] thread[%x,%x] exit for page %d in <%s> / ppn %x / cycle %d", 348 __FUNCTION__, this->process->pid, this->trdid, 349 page_id, name, ppm_page2ppn( *page_xp ), cycle ); 350 if( DEBUG_MAPPER_HANDLE_MISS & 1 ) grdxt_display( XPTR(local_cxy,&mapper->rt) , name ); 351 } 352 if( (DEBUG_MAPPER_HANDLE_MISS < cycle) && (inode == NULL) ) 353 { 354 printk("\n[%s] thread[%x,%x] exit for page %d in FAT / ppn %x / cycle %d", 355 __FUNCTION__, this->process->pid, this->trdid, 356 page_id, ppm_page2ppn( *page_xp ), cycle ); 357 if( DEBUG_MAPPER_HANDLE_MISS & 1 ) grdxt_display( XPTR(local_cxy,&mapper->rt ), "FAT" ); 358 } 359 #endif 360 361 return 0; 362 363 } // end mapper_handle_miss() 364 365 //////////////////////////////////////////// 366 void mapper_release_page( mapper_t * mapper, 367 page_t * page ) 368 { 346 //////////////////////////////////////////////////// 347 void mapper_remote_release_page( xptr_t mapper_xp, 348 page_t * page ) 349 { 350 // get mapper cluster an local pointer 351 cxy_t mapper_cxy = GET_CXY( mapper_xp ); 352 mapper_t * mapper_ptr = GET_PTR( mapper_xp ); 353 369 354 // build extended pointer on mapper lock 370 xptr_t mapper_lock_xp = XPTR( local_cxy , &mapper->lock );355 xptr_t lock_xp = XPTR( mapper_cxy , &mapper_ptr->lock ); 371 356 372 357 // take mapper lock in WRITE_MODE 373 remote_rwlock_wr_acquire( mapper_lock_xp );358 remote_rwlock_wr_acquire( lock_xp ); 374 359 375 360 // remove physical page from radix tree 376 grdxt_remo ve( &mapper->rt, page->index );361 grdxt_remote_remove( XPTR( mapper_cxy , &mapper_ptr->rt ) , page->index ); 377 362 378 363 // release mapper lock from WRITE_MODE 379 remote_rwlock_wr_release( mapper_lock_xp );364 remote_rwlock_wr_release( lock_xp ); 380 365 381 366 // release page to PPM 382 kmem_req_t req; 383 req.type = KMEM_PAGE; 384 req.ptr = page; 385 kmem_free( &req ); 386 367 ppm_remote_free_pages( mapper_cxy , page ); 368 387 369 } // end mapper_release_page() 388 370 -
trunk/kernel/mm/mapper.h
r628 r635 64 64 * TODO (1) the mapper being only used to implement the VFS cache(s), the mapper.c 65 65 * and mapper.h file should be trandfered to the fs directory. 66 * TODO (2) the "type" field i s probably unused...66 * TODO (2) the "type" field in mapper descriptor is redundant and probably unused. 67 67 ******************************************************************************************/ 68 68 … … 106 106 107 107 /******************************************************************************************* 108 * This function load from device a missing page identified by the <page_id> argument109 * into the mapper identified by the <mapper> local pointer.110 * It allocates a physical page from the local cluster, initialise by accessing device,111 * and register the page in the mapper radix tree.112 * It must be executed by a thread running in the cluster containing the mapper.108 * This function load from the IOC device a missing page identified by the <page_id> 109 * argument into a - possibly remote - mapper identified by the <mapper_xp> argument. 110 * It can be executed by a thread running in any cluster. 111 * It allocates a physical page from the remote cluster PPM, initialises it by accessing 112 * the IOC device, and registers the page in the remote mapper radix tree. 113 113 * WARNING : the calling function mapper_remote_get_page() is supposed to take and release 114 114 * the lock protecting the mapper in WRITE_MODE. 115 115 ******************************************************************************************* 116 * @ mapper : [in] target mapper. 117 * @ page_id : [in] missing page index in file. 118 * @ page_xp : [out] buffer for extended pointer on missing page descriptor. 119 * @ return 0 if success / return -1 if a dirty page cannot be updated on device. 120 ******************************************************************************************/ 121 error_t mapper_handle_miss( mapper_t * mapper, 122 uint32_t page_id, 123 xptr_t * page_xp ); 116 * @ mapper_xp : [in] extended pointer on remote mapper. 117 * @ page_id : [in] missing page index in file. 118 * @ page_xp : [out] buffer for extended pointer on missing page descriptor. 119 * @ return 0 if success / return -1 if IOC cannot be accessed. 120 ******************************************************************************************/ 121 error_t mapper_remote_handle_miss( xptr_t mapper_xp, 122 uint32_t page_id, 123 xptr_t * page_xp ); 124 125 /******************************************************************************************* 126 * This function removes a physical page from a - possibly remote - mapper, 127 * and releases the page to the remote PPM. 128 * It can be executed by any thread running in any cluster. 129 * It takes the mapper lock in WRITE_MODE to update the mapper. 130 ******************************************************************************************* 131 * @ mapper : extended pointer on the remote mapper. 132 * @ page : local pointer on the page in remote mapper. 133 ******************************************************************************************/ 134 void mapper_remote_release_page( xptr_t mapper_xp, 135 struct page_s * page ); 124 136 125 137 /******************************************************************************************* … … 170 182 171 183 /******************************************************************************************* 172 * This function removes a physical page from the mapper, and releases173 * the page to the local PPM. It is called by the mapper_destroy() function.174 * It must be executed by a thread running in the cluster containing the mapper.175 * It takes the mapper lock in WRITE_MODE to update the mapper.176 *******************************************************************************************177 * @ mapper : local pointer on the mapper.178 * @ page : pointer on page to remove.179 ******************************************************************************************/180 void mapper_release_page( mapper_t * mapper,181 struct page_s * page );182 183 /*******************************************************************************************184 184 * This function returns an extended pointer on a page descriptor. 185 185 * The - possibly remote - mapper is identified by the <mapper_xp> argument. … … 237 237 /******************************************************************************************* 238 238 * This function scan all pages present in the mapper identified by the <mapper> argument, 239 * and synchronize all pages ma ked asdirty" on disk.239 * and synchronize all pages marked as "dirty" on disk. 240 240 * These pages are unmarked and removed from the local PPM dirty_list. 241 241 * This function must be called by a local thread running in same cluster as the mapper. -
trunk/kernel/mm/page.c
r634 r635 46 46 47 47 remote_busylock_init( XPTR( local_cxy , &page->lock ), LOCK_PAGE_STATE ); 48 49 list_entry_init( &page->list );50 48 } 51 49 … … 93 91 } 94 92 93 94 95 96 /////////////////////////////////////////////// 97 inline void page_remote_init( xptr_t page_xp ) 98 { 99 hal_remote_memset( page_xp , 0 , sizeof(page_t) ); 100 101 cxy_t page_cxy = GET_CXY( page_xp ); 102 page_t * page_ptr = GET_PTR( page_xp ); 103 104 remote_busylock_init( XPTR( page_cxy , &page_ptr->lock ), LOCK_PAGE_STATE ); 105 } 95 106 96 107 //////////////////////////////////////////////////// -
trunk/kernel/mm/page.h
r632 r635 48 48 * This structure defines a physical page descriptor. 49 49 * - The remote_busylock is used to allows any remote thread to atomically 50 * test/modify the forks counter or the pageflags.50 * test/modify the forks counter or the flags. 51 51 * - The list entry is used to register the page in a free list or in dirty list. 52 52 * The refcount is used for page release to KMEM. … … 133 133 134 134 135 136 /************************************************************************************* 137 * This function must be called by a thread running in the local cluster. 138 * It initializes the page descriptor. 139 ************************************************************************************* 140 * @ page_xp : extended pointer to page descriptor. 141 ************************************************************************************/ 142 inline void page_remote_init( xptr_t page_xp ); 143 135 144 /************************************************************************************* 136 145 * This function can be called by any thread running in any cluster. -
trunk/kernel/mm/ppm.c
r634 r635 212 212 page_t * found_block; 213 213 214 thread_t * this = CURRENT_THREAD; 215 214 216 #if DEBUG_PPM_ALLOC_PAGES 215 thread_t * this = CURRENT_THREAD;216 217 uint32_t cycle = (uint32_t)hal_get_cycles(); 217 218 if( DEBUG_PPM_ALLOC_PAGES < cycle ) … … 237 238 238 239 current_block = NULL; 239 240 // find a free block equal or larger to requested size 241 for( current_order = order ; current_order < CONFIG_PPM_MAX_ORDER ; current_order ++ ) 242 { 243 if( !list_is_empty( &ppm->free_pages_root[current_order] ) ) 240 current_order = order; 241 242 // search a free block equal or larger than requested size 243 while( current_order < CONFIG_PPM_MAX_ORDER ) 244 { 245 // get local pointer on the root of relevant free_list (same in all clusters) 246 list_entry_t * root = &ppm->free_pages_root[current_order]; 247 248 if( !list_is_empty( root ) ) 244 249 { 245 250 // get first free block in this free_list 246 current_block = LIST_FIRST( &ppm->free_pages_root[current_order], page_t , list );251 current_block = LIST_FIRST( root , page_t , list ); 247 252 248 253 // remove this block from this free_list 249 254 list_unlink( ¤t_block->list ); 255 ppm->free_pages_nr[current_order] --; 250 256 251 257 // register pointer on found block 252 258 found_block = current_block; 253 259 254 // update this free-list number of blocks255 ppm->free_pages_nr[current_order] --;256 257 260 // compute found block size 258 261 current_size = (1 << current_order); … … 260 263 break; 261 264 } 265 266 // increment loop index 267 current_order++; 262 268 } 263 269 … … 267 273 remote_busylock_release( lock_xp ); 268 274 269 #if DEBUG_PPM_ALLOC_PAGES 270 cycle = (uint32_t)hal_get_cycles(); 271 if( DEBUG_PPM_ALLOC_PAGES < cycle ) 272 printk("\n[%s] thread[%x,%x] cannot allocate %d page(s) in cluster %x / cycle %d\n", 273 __FUNCTION__, this->process->pid, this->trdid, 1<<order, local_cxy, cycle ); 274 #endif 275 printk("\n[%s] thread[%x,%x] cannot allocate %d page(s) in cluster %x\n", 276 __FUNCTION__, this->process->pid, this->trdid, 1<<order, local_cxy ); 275 277 276 278 return NULL; … … 282 284 while( current_order > order ) 283 285 { 286 // update size and order 284 287 current_order --; 285 286 // update pointer, size, and order fiels for new free block287 288 current_size >>= 1; 289 290 // update order fiels in new free block 288 291 current_block = found_block + current_size; 289 292 current_block->order = current_order; … … 291 294 // insert new free block in relevant free_list 292 295 list_add_first( &ppm->free_pages_root[current_order] , ¤t_block->list ); 293 294 // update number of blocks in free list295 296 ppm->free_pages_nr[current_order] ++; 296 297 } … … 312 313 printk("\n[%s] thread[%x,%x] exit for %d page(s) in cluster %x / ppn = %x / cycle %d\n", 313 314 __FUNCTION__, this->process->pid, this->trdid, 314 1<<order, ppm_page2ppn(XPTR( local_cxy , found_block )), local_cxy, cycle );315 1<<order, local_cxy, ppm_page2ppn(XPTR( local_cxy , found_block )), cycle ); 315 316 #endif 316 317 … … 374 375 375 376 ///////////////////////////////////////////// 376 xptr_tppm_remote_alloc_pages( cxy_t cxy,377 void * ppm_remote_alloc_pages( cxy_t cxy, 377 378 uint32_t order ) 378 379 { … … 382 383 page_t * found_block; 383 384 385 thread_t * this = CURRENT_THREAD; 386 384 387 #if DEBUG_PPM_REMOTE_ALLOC_PAGES 385 thread_t * this = CURRENT_THREAD;386 388 uint32_t cycle = (uint32_t)hal_get_cycles(); 387 389 if( DEBUG_PPM_REMOTE_ALLOC_PAGES < cycle ) … … 408 410 409 411 current_block = NULL; 410 411 // find in remote cluster a free block equal or larger to requested size 412 for( current_order = order ; current_order < CONFIG_PPM_MAX_ORDER ; current_order ++ ) 413 { 414 // get local pointer on the root of relevant free_list in remote cluster 412 current_order = order; 413 414 // search a free block equal or larger than requested size 415 while( current_order < CONFIG_PPM_MAX_ORDER ) 416 { 417 // get local pointer on the root of relevant free_list (same in all clusters) 415 418 list_entry_t * root = &ppm->free_pages_root[current_order]; 416 419 417 if( !list_remote_is_empty( cxy , root ) ) 420 if( !list_remote_is_empty( cxy , root ) ) // list non empty => success 418 421 { 419 422 // get local pointer on first free page descriptor in remote cluster … … 422 425 // remove first free page from the free-list in remote cluster 423 426 list_remote_unlink( cxy , ¤t_block->list ); 427 hal_remote_atomic_add( XPTR( cxy , &ppm->free_pages_nr[current_order] ), -1 ); 424 428 425 429 // register found block 426 430 found_block = current_block; 427 431 428 // decrement relevant free-list number of items in remote cluster429 hal_remote_atomic_add( XPTR( cxy , &ppm->free_pages_nr[current_order] ), -1 );430 431 432 // compute found block size 432 433 current_size = (1 << current_order); … … 434 435 break; 435 436 } 437 438 // increment loop index 439 current_order++; 436 440 } 437 441 … … 441 445 remote_busylock_release( lock_xp ); 442 446 443 #if DEBUG_REMOTE_PPM_ALLOC_PAGES 444 cycle = (uint32_t)hal_get_cycles(); 445 if( DEBUG_PPM_REMOTE_ALLOC_PAGES < cycle ) 446 printk("\n[%s] thread[%x,%x] cannot allocate %d page(s) in cluster %x / cycle %d\n", 447 __FUNCTION__, this->process->pid, this->trdid, 1<<order, cxy, cycle ); 448 #endif 447 printk("\n[ERROR] in %s : thread[%x,%x] cannot allocate %d page(s) in cluster %x\n", 448 __FUNCTION__, this->process->pid, this->trdid, 1<<order, cxy ); 449 449 450 450 return XPTR_NULL; … … 455 455 while( current_order > order ) 456 456 { 457 // update order , size, and local pointer for new free block457 // update order and size 458 458 current_order --; 459 459 current_size >>= 1; 460 461 // update new free block order field in remote cluster 460 462 current_block = found_block + current_size; 461 462 // update new free block order field in remote cluster463 463 hal_remote_s32( XPTR( cxy , ¤t_block->order ) , current_order ); 464 464 … … 497 497 #endif 498 498 499 return XPTR( cxy , found_block );499 return found_block; 500 500 501 501 } // end ppm_remote_alloc_pages() -
trunk/kernel/mm/ppm.h
r632 r635 123 123 * @ cxy : remote cluster identifier. 124 124 * @ order : ln2( number of 4 Kbytes pages) 125 * @ returns a n extended pointer on the page descriptor if success / XPTR_NULL if error.126 ****************************************************************************************/ 127 xptr_tppm_remote_alloc_pages( cxy_t cxy,125 * @ returns a local pointer on remote page descriptor if success / XPTR_NULL if error. 126 ****************************************************************************************/ 127 void * ppm_remote_alloc_pages( cxy_t cxy, 128 128 uint32_t order ); 129 129 -
trunk/kernel/mm/vmm.c
r634 r635 49 49 #include <hal_exception.h> 50 50 51 ////////////////////////////////////////////////////////////////////////////////// 51 //////////////////////////////////////////////////////////////////////////////////////////// 52 52 // Extern global variables 53 ////////////////////////////////////////////////////////////////////////////////// 53 //////////////////////////////////////////////////////////////////////////////////////////// 54 54 55 55 extern process_t process_zero; // allocated in cluster.c … … 286 286 } // end vmm_detach_from_vsl() 287 287 288 289 290 291 288 //////////////////////////////////////////// 292 289 error_t vmm_user_init( process_t * process ) 293 290 { 294 vseg_t * vseg_args;295 vseg_t * vseg_envs;296 intptr_t base;297 intptr_t size;298 291 uint32_t i; 299 292 … … 319 312 "STACK zone too small\n"); 320 313 314 // initialize the lock protecting the VSL 315 remote_rwlock_init( XPTR( local_cxy , &vmm->vsl_lock ) , LOCK_VMM_VSL ); 316 317 318 /* 321 319 // register "args" vseg in VSL 322 320 base = CONFIG_VMM_UTILS_BASE << CONFIG_PPM_PAGE_SHIFT; … … 358 356 359 357 vmm->envs_vpn_base = base; 360 358 */ 361 359 // initialize STACK allocator 362 360 vmm->stack_mgr.bitmap = 0; … … 375 373 376 374 // initialize instrumentation counters 377 vmm->pgfault_nr = 0; 375 vmm->false_pgfault_nr = 0; 376 vmm->local_pgfault_nr = 0; 377 vmm->global_pgfault_nr = 0; 378 vmm->false_pgfault_cost = 0; 379 vmm->local_pgfault_cost = 0; 380 vmm->global_pgfault_cost = 0; 378 381 379 382 hal_fence(); … … 398 401 399 402 #if DEBUG_VMM_USER_RESET 400 uint32_t cycle = (uint32_t)hal_get_cycles();403 uint32_t cycle; 401 404 thread_t * this = CURRENT_THREAD; 405 #endif 406 407 #if (DEBUG_VMM_USER_RESET & 1 ) 408 cycle = (uint32_t)hal_get_cycles(); 402 409 if( DEBUG_VMM_USER_RESET < cycle ) 403 410 printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n", … … 407 414 #if (DEBUG_VMM_USER_RESET & 1 ) 408 415 if( DEBUG_VMM_USER_RESET < cycle ) 409 hal_vmm_display( process, true );416 hal_vmm_display( XPTR( local_cxy , process ) , true ); 410 417 #endif 411 418 … … 478 485 #endif 479 486 487 #if (DEBUG_VMM_USER_RESET & 1 ) 488 if( DEBUG_VMM_USER_RESET < cycle ) 489 hal_vmm_display( XPTR( local_cxy , process ) , true ); 490 #endif 491 480 492 } // end vmm_user_reset() 481 493 … … 503 515 thread_t * this = CURRENT_THREAD; 504 516 if( DEBUG_VMM_UPDATE_PTE < cycle ) 505 printk("\n[%s] thread[%x,%x] enter for process %x / vpn %x / cycle %d\n", 506 __FUNCTION__, this->process->pid, this->trdid, process->pid , vpn , cycle ); 507 #endif 508 509 // check cluster is reference 510 assert( (GET_CXY( process->ref_xp ) == local_cxy) , "not called in reference cluster\n"); 517 printk("\n[%s] thread[%x,%x] enter for process %x / vpn %x / attr %x / ppn %x / ycle %d\n", 518 __FUNCTION__, this->process->pid, this->trdid, process->pid, vpn, attr, ppn, cycle ); 519 #endif 511 520 512 521 // get extended pointer on root of process copies xlist in owner cluster … … 517 526 process_root_xp = XPTR( owner_cxy , process_root_ptr ); 518 527 528 // check local cluster is owner cluster 529 assert( (owner_cxy == local_cxy) , "must be called in owner cluster\n"); 530 519 531 // loop on destination process copies 520 532 XLIST_FOREACH( process_root_xp , process_iter_xp ) … … 525 537 remote_process_cxy = GET_CXY( remote_process_xp ); 526 538 527 #if (DEBUG_VMM_UPDATE_PTE & 0x1)539 #if (DEBUG_VMM_UPDATE_PTE & 1) 528 540 if( DEBUG_VMM_UPDATE_PTE < cycle ) 529 printk("\n[%s] thread r[%x,%x] handling vpn %x for process %x in cluster %x\n",541 printk("\n[%s] thread[%x,%x] handling vpn %x for process %x in cluster %x\n", 530 542 __FUNCTION__, this->process->pid, this->trdid, vpn, process->pid, remote_process_cxy ); 531 543 #endif … … 545 557 #endif 546 558 559 #if (DEBUG_VMM_UPDATE_PTE & 1) 560 hal_vmm_display( process , true ); 561 #endif 562 547 563 } // end vmm_global_update_pte() 548 564 … … 570 586 cxy_t owner_cxy; 571 587 lpid_t owner_lpid; 588 589 // get target process PID 590 pid = process->pid; 572 591 573 592 #if DEBUG_VMM_SET_COW … … 576 595 if( DEBUG_VMM_SET_COW < cycle ) 577 596 printk("\n[%s] thread[%x,%x] enter for process %x / cycle %d\n", 578 __FUNCTION__, this->process->pid, this->trdid, process->pid , cycle ); 597 __FUNCTION__, this->process->pid, this->trdid, pid , cycle ); 598 #endif 599 600 #if (DEBUG_VMM_SET_COW & 1) 601 if( DEBUG_VMM_SET_COW < cycle ) 602 hal_vmm_display( process , true ); 579 603 #endif 580 604 581 605 // check cluster is reference 582 assert( ( GET_CXY( process->ref_xp ) == local_cxy),583 "local cluster is notprocess reference cluster\n");606 assert( (XPTR( local_cxy , process ) == process->ref_xp), 607 "local cluster must be process reference cluster\n"); 584 608 585 609 // get pointer on reference VMM … … 587 611 588 612 // get extended pointer on root of process copies xlist in owner cluster 589 pid = process->pid;590 613 owner_cxy = CXY_FROM_PID( pid ); 591 614 owner_lpid = LPID_FROM_PID( pid ); … … 596 619 vseg_root_xp = XPTR( local_cxy , &vmm->vsegs_root ); 597 620 598 // loop on destinationprocess copies621 // loop on target process copies 599 622 XLIST_FOREACH( process_root_xp , process_iter_xp ) 600 623 { 601 // get cluster and local pointer on remote process 624 // get cluster and local pointer on remote process copy 602 625 remote_process_xp = XLIST_ELEMENT( process_iter_xp , process_t , copies_list ); 603 626 remote_process_ptr = GET_PTR( remote_process_xp ); … … 606 629 #if (DEBUG_VMM_SET_COW & 1) 607 630 if( DEBUG_VMM_SET_COW < cycle ) 608 printk("\n[%s] thread[%x,%x] handlingprocess %x in cluster %x\n",609 __FUNCTION__, this->process->pid, this->trdid, process->pid, remote_process_cxy );631 printk("\n[%s] thread[%x,%x] (%x) handles process %x in cluster %x\n", 632 __FUNCTION__, this->process->pid, this->trdid, this, pid, remote_process_cxy ); 610 633 #endif 611 634 … … 620 643 vseg = GET_PTR( vseg_xp ); 621 644 622 assert( (GET_CXY( vseg_xp ) == local_cxy) ,623 "all vsegs in reference VSL must be local\n" );624 625 645 // get vseg type, base and size 626 646 uint32_t type = vseg->type; … … 630 650 #if (DEBUG_VMM_SET_COW & 1) 631 651 if( DEBUG_VMM_SET_COW < cycle ) 632 printk("\n[%s] thread[%x,%x] handlingvseg %s / vpn_base = %x / vpn_size = %x\n",652 printk("\n[%s] thread[%x,%x] found vseg %s / vpn_base = %x / vpn_size = %x\n", 633 653 __FUNCTION__, this->process->pid, this->trdid, vseg_type_str(type), vpn_base, vpn_size ); 634 654 #endif … … 653 673 654 674 // atomically increment pending forks counter in physical pages, 655 // for all vseg pages that are mapped in reference cluster675 // this is only done once, when handling the reference copy 656 676 if( remote_process_cxy == local_cxy ) 657 677 { 678 679 #if (DEBUG_VMM_SET_COW & 1) 680 if( DEBUG_VMM_SET_COW < cycle ) 681 printk("\n[%s] thread[%x,%x] handles vseg %s / vpn_base = %x / vpn_size = %x\n", 682 __FUNCTION__, this->process->pid, this->trdid, vseg_type_str(type), vpn_base, vpn_size ); 683 #endif 658 684 // scan all pages in vseg 659 685 for( vpn = vpn_base ; vpn < (vpn_base + vpn_size) ; vpn++ ) … … 684 710 } 685 711 } // end loop on vpn 712 713 #if (DEBUG_VMM_SET_COW & 1) 714 if( DEBUG_VMM_SET_COW < cycle ) 715 printk("\n[%s] thread[%x,%x] completes vseg %s / vpn_base = %x / vpn_size = %x\n", 716 __FUNCTION__, this->process->pid, this->trdid, vseg_type_str(type), vpn_base, vpn_size ); 717 #endif 686 718 } // end if local 687 719 } // end if vseg type … … 713 745 vseg_t * child_vseg; 714 746 uint32_t type; 715 bool_t cow;716 747 vpn_t vpn; 717 748 vpn_t vpn_base; 718 749 vpn_t vpn_size; 719 xptr_t page_xp; // extended pointer on page descriptor720 page_t * page_ptr;721 cxy_t page_cxy;722 xptr_t forks_xp; // extended pointer on forks counter in page descriptor723 750 xptr_t parent_root_xp; 724 751 bool_t mapped; … … 740 767 parent_vmm = &parent_process->vmm; 741 768 child_vmm = &child_process->vmm; 742 743 // initialize the lock protecting the child VSL744 remote_rwlock_init( XPTR( local_cxy , &child_vmm->vsl_lock ) , LOCK_VMM_VSL );745 746 // initialize the child VSL as empty747 xlist_root_init( XPTR( local_cxy, &child_vmm->vsegs_root ) );748 child_vmm->vsegs_nr = 0;749 750 // create an empty child GPT751 error = hal_gpt_create( &child_vmm->gpt );752 if( error )753 {754 printk("\n[ERROR] in %s : cannot create GPT\n", __FUNCTION__ );755 return -1;756 }757 769 758 770 // build extended pointer on parent VSL root and lock … … 820 832 { 821 833 // activate the COW for DATA, ANON, REMOTE vsegs only 822 cow = ( type != VSEG_TYPE_FILE );834 // cow = ( type != VSEG_TYPE_FILE ); 823 835 824 836 vpn_base = child_vseg->vpn_base; … … 832 844 XPTR( parent_cxy , &parent_vmm->gpt ), 833 845 vpn, 834 cow,835 &ppn, 836 &mapped ); 846 false, // does not handle COW flag 847 &ppn, // unused 848 &mapped ); // unused 837 849 if( error ) 838 850 { … … 842 854 } 843 855 844 // increment pending forks counter in page if mapped845 if( mapped )846 {847 // get pointers and cluster on page descriptor848 page_xp = ppm_ppn2page( ppn );849 page_cxy = GET_CXY( page_xp );850 page_ptr = GET_PTR( page_xp );851 852 // get extended pointers on "forks" and "lock"853 forks_xp = XPTR( page_cxy , &page_ptr->forks );854 lock_xp = XPTR( page_cxy , &page_ptr->lock );855 856 // get lock protecting "forks" counter857 remote_busylock_acquire( lock_xp );858 859 // increment "forks"860 hal_remote_atomic_add( forks_xp , 1 );861 862 // release lock protecting "forks" counter863 remote_busylock_release( lock_xp );864 865 856 #if DEBUG_VMM_FORK_COPY 866 857 cycle = (uint32_t)hal_get_cycles(); … … 869 860 __FUNCTION__ , this->process->pid, this->trdid , vpn , cycle ); 870 861 #endif 871 }872 862 } 873 863 } // end if no code & no stack … … 877 867 // release the parent VSL lock in read mode 878 868 remote_rwlock_rd_release( parent_lock_xp ); 879 880 // update child VMM with kernel vsegs881 error = hal_vmm_kernel_update( child_process );882 883 if( error )884 {885 printk("\n[ERROR] in %s : cannot update child VMM\n", __FUNCTION__ );886 return -1;887 }888 869 889 870 // initialize the child VMM STACK allocator … … 902 883 903 884 // initialize instrumentation counters 904 child_vmm->pgfault_nr = 0; 885 child_vmm->false_pgfault_nr = 0; 886 child_vmm->local_pgfault_nr = 0; 887 child_vmm->global_pgfault_nr = 0; 888 child_vmm->false_pgfault_cost = 0; 889 child_vmm->local_pgfault_cost = 0; 890 child_vmm->global_pgfault_cost = 0; 905 891 906 892 // copy base addresses from parent VMM to child VMM … … 933 919 934 920 #if DEBUG_VMM_DESTROY 935 uint32_t cycle = (uint32_t)hal_get_cycles();936 thread_t * this = CURRENT_THREAD;921 uint32_t cycle = (uint32_t)hal_get_cycles(); 922 thread_t * this = CURRENT_THREAD; 937 923 if( DEBUG_VMM_DESTROY < cycle ) 938 924 printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n", … … 942 928 #if (DEBUG_VMM_DESTROY & 1 ) 943 929 if( DEBUG_VMM_DESTROY < cycle ) 944 hal_vmm_display( process, true );930 hal_vmm_display( XPTR( local_cxy, process ) , true ); 945 931 #endif 946 932 … … 1062 1048 vseg_t * vmm_create_vseg( process_t * process, 1063 1049 vseg_type_t type, 1064 intptr_t base, 1050 intptr_t base, // ltid for VSEG_TYPE_STACK 1065 1051 uint32_t size, 1066 1052 uint32_t file_offset, … … 1074 1060 error_t error; 1075 1061 1076 #if DEBUG_VMM_CREATE_VSEG1062 #if (DEBUG_VMM_CREATE_VSEG & 1) 1077 1063 thread_t * this = CURRENT_THREAD; 1078 1064 uint32_t cycle = (uint32_t)hal_get_cycles(); 1079 1065 if( DEBUG_VMM_CREATE_VSEG < cycle ) 1080 printk("\n[%s] thread[%x,%x] enter for process %x / %s / cxy %x / cycle %d\n", 1081 __FUNCTION__, this->process->pid, this->trdid, process->pid, vseg_type_str(type), cxy, cycle ); 1066 printk("\n[%s] thread[%x,%x] enter / process %x / %s / base %x / cxy %x / cycle %d\n", 1067 __FUNCTION__, this->process->pid, this->trdid, 1068 process->pid, vseg_type_str(type), base, cxy, cycle ); 1082 1069 #endif 1083 1070 … … 1171 1158 } 1172 1159 1173 #if DEBUG_VMM_CREATE_VSEG1160 #if (DEBUG_VMM_CREATE_VSEG & 1) 1174 1161 if( DEBUG_VMM_CREATE_VSEG < cycle ) 1175 1162 printk("\n[%s] thread[%x,%x] : base %x / size %x / vpn_base %x / vpn_size %x\n", … … 1204 1191 cycle = (uint32_t)hal_get_cycles(); 1205 1192 if( DEBUG_VMM_CREATE_VSEG < cycle ) 1206 printk("\n[%s] thread[%x,%x] exit / %s / cxy %x / cycle %d\n", 1207 __FUNCTION__, this->process->pid, this->trdid, vseg_type_str(type), cxy, cycle ); 1193 printk("\n[%s] thread[%x,%x] exit / process %x / %s / base %x / cxy %x / cycle %d\n", 1194 __FUNCTION__, this->process->pid, this->trdid, 1195 process->pid, vseg_type_str(type), base, cxy, cycle ); 1208 1196 #endif 1209 1197 … … 1685 1673 xptr_t page_xp; 1686 1674 cxy_t page_cxy; 1675 page_t * page_ptr; 1687 1676 uint32_t index; 1688 1677 … … 1711 1700 } 1712 1701 1713 // allocate a 4 Kbytes physical page from target cluster 1714 page_xp = ppm_remote_alloc_pages( page_cxy , 0 ); 1702 // allocate one small physical page from target cluster 1703 page_ptr = ppm_remote_alloc_pages( page_cxy , 0 ); 1704 1705 page_xp = XPTR( page_cxy , page_ptr ); 1715 1706 1716 1707 #if DEBUG_VMM_PAGE_ALLOCATE 1717 1708 cycle = (uint32_t)hal_get_cycles(); 1718 1709 if( DEBUG_VMM_PAGE_ALLOCATE < cycle ) 1719 printk("\n[%s] thread[%x,%x] exit for vpn %x / ppn %x / c luster %x / cycle %d\n",1720 __FUNCTION__ , this->process->pid, this->trdid, vpn, ppm_page2ppn(page_xp), page_cxy,cycle );1710 printk("\n[%s] thread[%x,%x] exit for vpn %x / ppn %x / cycle %d\n", 1711 __FUNCTION__ , this->process->pid, this->trdid, vpn, ppm_page2ppn(page_xp), cycle ); 1721 1712 #endif 1722 1713 … … 1741 1732 uint32_t cycle = (uint32_t)hal_get_cycles(); 1742 1733 thread_t * this = CURRENT_THREAD; 1743 // if( DEBUG_VMM_GET_ONE_PPN < cycle ) 1744 if( vpn == 0x40B ) 1734 if( (DEBUG_VMM_GET_ONE_PPN < cycle) && (vpn == 0x40b) ) 1745 1735 printk("\n[%s] thread[%x,%x] enter for vpn %x / type %s / page_id %d / cycle %d\n", 1746 1736 __FUNCTION__, this->process->pid, this->trdid, vpn, vseg_type_str(type), page_id, cycle ); … … 1769 1759 page_xp = vmm_page_allocate( vseg , vpn ); 1770 1760 1771 if( page_xp == XPTR_NULL ) return ENOMEM;1761 if( page_xp == XPTR_NULL ) return -1; 1772 1762 1773 1763 // initialise missing page from .elf file mapper for DATA and CODE types … … 1788 1778 1789 1779 #if (DEBUG_VMM_GET_ONE_PPN & 0x1) 1790 // if( DEBUG_VMM_GET_ONE_PPN < cycle ) 1791 if( vpn == 0x40B ) 1780 if( (DEBUG_VMM_GET_ONE_PPN < cycle) && (vpn == 0x40b) ) 1792 1781 printk("\n[%s] thread[%x,%x] for vpn = %x / elf_offset = %x\n", 1793 1782 __FUNCTION__, this->process->pid, this->trdid, vpn, elf_offset ); … … 1803 1792 1804 1793 #if (DEBUG_VMM_GET_ONE_PPN & 0x1) 1805 // if( DEBUG_VMM_GET_ONE_PPN < cycle ) 1806 if( vpn == 0x40B ) 1794 if( (DEBUG_VMM_GET_ONE_PPN < cycle) && (vpn == 0x40b) ) 1807 1795 printk("\n[%s] thread[%x,%x] for vpn %x / fully in BSS\n", 1808 1796 __FUNCTION__, this->process->pid, this->trdid, vpn ); … … 1821 1809 1822 1810 #if (DEBUG_VMM_GET_ONE_PPN & 0x1) 1823 // if( DEBUG_VMM_GET_ONE_PPN < cycle ) 1824 if( vpn == 0x40B ) 1811 if( (DEBUG_VMM_GET_ONE_PPN < cycle) && (vpn == 0x40b) ) 1825 1812 printk("\n[%s] thread[%x,%x] for vpn %x / fully in mapper\n", 1826 1813 __FUNCTION__, this->process->pid, this->trdid, vpn ); … … 1839 1826 1840 1827 #if (DEBUG_VMM_GET_ONE_PPN & 0x1) 1841 // if( DEBUG_VMM_GET_ONE_PPN < cycle ) 1842 if( vpn == 0x40B ) 1828 if( (DEBUG_VMM_GET_ONE_PPN < cycle) && (vpn == 0x40b) ) 1843 1829 printk("\n[%s] thread[%x,%x] for vpn %x / both mapper & BSS\n" 1844 1830 " %d bytes from mapper / %d bytes from BSS\n", … … 1874 1860 #if DEBUG_VMM_GET_ONE_PPN 1875 1861 cycle = (uint32_t)hal_get_cycles(); 1876 // if( DEBUG_VMM_GET_ONE_PPN < cycle ) 1877 if( vpn == 0x40B ) 1878 printk("\n[%s] thread[%x,%x] exit for vpn %x / ppn %x / cycle\n", 1862 if( (DEBUG_VMM_GET_ONE_PPN < cycle) && (vpn == 0x40b) ) 1863 printk("\n[%s] thread[%x,%x] exit for vpn %x / ppn %x / cycle %d\n", 1879 1864 __FUNCTION__ , this->process->pid, this->trdid , vpn , *ppn, cycle ); 1880 1865 #endif … … 1906 1891 1907 1892 #if DEBUG_VMM_HANDLE_PAGE_FAULT 1908 if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn == vpn) )1893 if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) ) 1909 1894 printk("\n[%s] thread[%x,%x] enter for vpn %x / cycle %d\n", 1910 1895 __FUNCTION__, this->process->pid, this->trdid, vpn, start_cycle ); … … 1912 1897 1913 1898 #if (DEBUG_VMM_HANDLE_PAGE_FAULT & 1) 1914 if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn == vpn) )1915 hal_vmm_display( this->process , false );1899 if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) ) 1900 hal_vmm_display( this->process , true ); 1916 1901 #endif 1917 1902 … … 1928 1913 } 1929 1914 1930 #if DEBUG_VMM_HANDLE_PAGE_FAULT1931 if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn == vpn) )1915 #if (DEBUG_VMM_HANDLE_PAGE_FAULT & 1) 1916 if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) ) 1932 1917 printk("\n[%s] thread[%x,%x] found vseg %s\n", 1933 1918 __FUNCTION__, this->process->pid, this->trdid, vseg_type_str(vseg->type) ); … … 1950 1935 } 1951 1936 1952 #if DEBUG_VMM_HANDLE_PAGE_FAULT1953 if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn == vpn) )1954 printk("\n[%s] thread[%x,%x] locked vpn %x in cluster %x / cycle %d\n",1937 #if (DEBUG_VMM_HANDLE_PAGE_FAULT & 1) 1938 if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) ) 1939 printk("\n[%s] thread[%x,%x] locked vpn %x in cluster %x\n", 1955 1940 __FUNCTION__, this->process->pid, this->trdid, vpn, local_cxy ); 1956 1941 #endif … … 1970 1955 { 1971 1956 1972 #if DEBUG_VMM_HANDLE_PAGE_FAULT 1973 if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn == vpn) ) 1974 printk("\n[%s] thread[%x,%x] access local gpt : cxy %x / ref_cxy %x / type %s\n", 1975 __FUNCTION__, this->process->pid, this->trdid, local_cxy, ref_cxy, vseg_type_str(vseg->type) ); 1957 #if (DEBUG_VMM_HANDLE_PAGE_FAULT & 1) 1958 if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) ) 1959 printk("\n[%s] thread[%x,%x] access local gpt : cxy %x / ref_cxy %x / type %s / cycle %d\n", 1960 __FUNCTION__, this->process->pid, this->trdid, 1961 local_cxy, ref_cxy, vseg_type_str(vseg->type), (uint32_t)hal_get_cycles() ); 1976 1962 #endif 1977 1963 // allocate and initialise a physical page … … 2008 1994 2009 1995 #if DEBUG_VMM_HANDLE_PAGE_FAULT 2010 if( (end_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn == vpn) )1996 if( (end_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) ) 2011 1997 printk("\n[%s] thread[%x,%x] handled local pgfault / ppn %x / attr %x / cycle %d\n", 2012 1998 __FUNCTION__, this->process->pid, this->trdid, ppn, attr, end_cycle ); … … 2026 2012 { 2027 2013 2028 #if DEBUG_VMM_HANDLE_PAGE_FAULT 2029 if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn == vpn) ) 2030 printk("\n[%s] thread[%x,%x] access ref gpt : cxy %x / ref_cxy %x / type %s\n", 2031 __FUNCTION__, this->process->pid, this->trdid, local_cxy, ref_cxy, vseg_type_str(vseg->type) ); 2014 #if (DEBUG_VMM_HANDLE_PAGE_FAULT & 1) 2015 if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) ) 2016 printk("\n[%s] thread[%x,%x] access ref gpt : cxy %x / ref_cxy %x / type %s / cycle %d\n", 2017 __FUNCTION__, this->process->pid, this->trdid, 2018 local_cxy, ref_cxy, vseg_type_str(vseg->type), (uint32_t)hal_get_cycles() ); 2032 2019 #endif 2033 2020 // build extended pointer on reference GPT … … 2050 2037 } 2051 2038 2052 #if DEBUG_VMM_HANDLE_PAGE_FAULT2053 if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn == vpn) )2039 #if (DEBUG_VMM_HANDLE_PAGE_FAULT & 1) 2040 if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) ) 2054 2041 printk("\n[%s] thread[%x,%x] get pte from ref gpt / attr %x / ppn %x\n", 2055 2042 __FUNCTION__, this->process->pid, this->trdid, ref_attr, ref_ppn ); … … 2065 2052 ref_ppn ); 2066 2053 2067 #if DEBUG_VMM_HANDLE_PAGE_FAULT2068 if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn == vpn) )2054 #if (DEBUG_VMM_HANDLE_PAGE_FAULT & 1) 2055 if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) ) 2069 2056 printk("\n[%s] thread[%x,%x] updated local gpt for a false pgfault\n", 2070 2057 __FUNCTION__, this->process->pid, this->trdid ); … … 2074 2061 hal_gpt_unlock_pte( ref_gpt_xp, vpn ); 2075 2062 2076 #if DEBUG_VMM_HANDLE_PAGE_FAULT2077 if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn == vpn) )2063 #if (DEBUG_VMM_HANDLE_PAGE_FAULT &1) 2064 if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) ) 2078 2065 printk("\n[%s] thread[%x,%x] unlock the ref gpt after a false pgfault\n", 2079 2066 __FUNCTION__, this->process->pid, this->trdid ); … … 2085 2072 2086 2073 #if DEBUG_VMM_HANDLE_PAGE_FAULT 2087 if( (end_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn == vpn) )2074 if( (end_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) ) 2088 2075 printk("\n[%s] thread[%x,%x] handled false pgfault / ppn %x / attr %x / cycle %d\n", 2089 2076 __FUNCTION__, this->process->pid, this->trdid, ref_ppn, ref_attr, end_cycle ); … … 2120 2107 if( vseg->flags & VSEG_CACHE ) attr |= GPT_CACHABLE; 2121 2108 2122 #if DEBUG_VMM_HANDLE_PAGE_FAULT2123 if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn == vpn) )2109 #if (DEBUG_VMM_HANDLE_PAGE_FAULT & 1) 2110 if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) ) 2124 2111 printk("\n[%s] thread[%x,%x] build a new PTE for a true pgfault\n", 2125 2112 __FUNCTION__, this->process->pid, this->trdid ); … … 2132 2119 ppn ); 2133 2120 2134 #if DEBUG_VMM_HANDLE_PAGE_FAULT2135 if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn == vpn) )2121 #if (DEBUG_VMM_HANDLE_PAGE_FAULT & 1) 2122 if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) ) 2136 2123 printk("\n[%s] thread[%x,%x] set new PTE in ref gpt for a true page fault\n", 2137 2124 __FUNCTION__, this->process->pid, this->trdid ); … … 2150 2137 2151 2138 #if DEBUG_VMM_HANDLE_PAGE_FAULT 2152 if( (end_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn == vpn) )2139 if( (end_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) ) 2153 2140 printk("\n[%s] thread[%x,%x] handled global pgfault / ppn %x / attr %x / cycle %d\n", 2154 2141 __FUNCTION__, this->process->pid, this->trdid, ppn, attr, end_cycle ); … … 2173 2160 2174 2161 #if DEBUG_VMM_HANDLE_PAGE_FAULT 2175 if( (end_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn == vpn) )2162 if( (end_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) ) 2176 2163 printk("\n[%s] handled by another thread / vpn %x / ppn %x / attr %x / cycle %d\n", 2177 2164 __FUNCTION__, vpn, ppn, attr, end_cycle ); … … 2212 2199 #endif 2213 2200 2214 #if ( DEBUG_VMM_HANDLE_PAGE_FAULT & 1)2201 #if ((DEBUG_VMM_HANDLE_COW & 3) == 3 ) 2215 2202 hal_vmm_display( process , true ); 2216 2203 #endif … … 2352 2339 #if(DEBUG_VMM_HANDLE_COW & 1) 2353 2340 if( DEBUG_VMM_HANDLE_COW < cycle ) 2354 printk("\n[%s] thread[%x,%x] 2341 printk("\n[%s] thread[%x,%x] no pending forks / keep existing PPN %x\n", 2355 2342 __FUNCTION__, this->process->pid, this->trdid, old_ppn ); 2356 2343 #endif … … 2360 2347 // build new_attr : set WRITABLE, reset COW, reset LOCKED 2361 2348 new_attr = (((old_attr | GPT_WRITABLE) & (~GPT_COW)) & (~GPT_LOCKED)); 2349 2350 #if(DEBUG_VMM_HANDLE_COW & 1) 2351 if( DEBUG_VMM_HANDLE_COW < cycle ) 2352 printk("\n[%s] thread[%x,%x] new_attr %x / new_ppn %x\n", 2353 __FUNCTION__, this->process->pid, this->trdid, new_attr, new_ppn ); 2354 #endif 2362 2355 2363 2356 // update the relevant GPT(s) … … 2366 2359 if( (vseg->type == VSEG_TYPE_STACK) || (vseg->type == VSEG_TYPE_CODE) ) 2367 2360 { 2368 // set the new PTE22361 // set new PTE in local gpt 2369 2362 hal_gpt_set_pte( gpt_xp, 2370 2363 vpn, … … 2398 2391 #endif 2399 2392 2393 #if ((DEBUG_VMM_HANDLE_COW & 3) == 3) 2394 hal_vmm_display( process , true ); 2395 #endif 2396 2400 2397 return EXCP_NON_FATAL; 2401 2398 -
trunk/kernel/mm/vmm.h
r632 r635 52 52 * - The allocator checks that the requested slot has not been already allocated, and set the 53 53 * corresponding bit in the bitmap. 54 * - The de-allocator functionreset the corresponding bit in the bitmap.54 * - The de-allocator reset the corresponding bit in the bitmap. 55 55 ********************************************************************************************/ 56 56 … … 112 112 typedef struct vmm_s 113 113 { 114 remote_rwlock_t vsl_lock; /*! lock protecting the local VSL */ 115 xlist_entry_t vsegs_root; /*! Virtual Segment List (complete in reference) */ 116 uint32_t vsegs_nr; /*! total number of local vsegs */ 117 118 gpt_t gpt; /*! Generic Page Table (complete in reference) */ 119 120 stack_mgr_t stack_mgr; /*! embedded STACK vsegs allocator */ 121 mmap_mgr_t mmap_mgr; /*! embedded MMAP vsegs allocator */ 122 123 uint32_t pgfault_nr; /*! page fault counter (instrumentation) */ 124 125 vpn_t args_vpn_base; /*! args vseg first page */ 126 vpn_t envs_vpn_base; /*! envs vseg first page */ 127 vpn_t code_vpn_base; /*! code vseg first page */ 128 vpn_t data_vpn_base; /*! data vseg first page */ 129 vpn_t heap_vpn_base; /*! heap zone first page */ 130 131 intptr_t entry_point; /*! main thread entry point */ 114 remote_rwlock_t vsl_lock; /*! lock protecting the local VSL */ 115 xlist_entry_t vsegs_root; /*! Virtual Segment List (complete in reference) */ 116 uint32_t vsegs_nr; /*! total number of local vsegs */ 117 118 gpt_t gpt; /*! Generic Page Table (complete in reference) */ 119 120 stack_mgr_t stack_mgr; /*! embedded STACK vsegs allocator */ 121 mmap_mgr_t mmap_mgr; /*! embedded MMAP vsegs allocator */ 122 123 uint32_t false_pgfault_nr; /*! false page fault counter (for all threads) */ 124 uint32_t local_pgfault_nr; /*! false page fault counter (for all threads) */ 125 uint32_t global_pgfault_nr; /*! false page fault counter (for all threads) */ 126 uint32_t false_pgfault_cost; /*! cumulated cost (for all threads) */ 127 uint32_t local_pgfault_cost; /*! cumulated cost (for all threads) */ 128 uint32_t global_pgfault_cost; /*! cumulated cost (for all threads) */ 129 130 vpn_t args_vpn_base; /*! args vseg first page */ 131 vpn_t envs_vpn_base; /*! envs vseg first page */ 132 vpn_t code_vpn_base; /*! code vseg first page */ 133 vpn_t data_vpn_base; /*! data vseg first page */ 134 vpn_t heap_vpn_base; /*! heap zone first page */ 135 136 intptr_t entry_point; /*! main thread entry point */ 132 137 } 133 138 vmm_t; 134 139 135 140 /********************************************************************************************* 136 * This function mkkes a partial initialisation of the VMM attached to an user process. 137 * The GPT must have been previously created, with the hal_gpt_create() function. 138 * - It registers "args", "envs" vsegs in the VSL. 139 * - It initializes the STACK and MMAP allocators. 140 * Note: 141 * This function makes only a partial initialisation of the VMM attached to an user 142 * process: It intializes the STACK and MMAP allocators, and the VSL lock. 143 * - The GPT has been previously created, with the hal_gpt_create() function. 144 * - The "kernel" vsegs are previously registered, by the hal_vmm_kernel_update() function. 141 145 * - The "code" and "data" vsegs are registered by the elf_load_process() function. 142 146 * - The "stack" vsegs are dynamically registered by the thread_user_create() function. … … 165 169 * This function is called by the process_make_fork() function. It partially copies 166 170 * the content of a remote parent process VMM to the local child process VMM: 171 * - The KERNEL vsegs required by the architecture must have been previously 172 * created in the child VMM, using the hal_vmm_kernel_update() function. 167 173 * - The DATA, ANON, REMOTE vsegs registered in the parent VSL are registered in the 168 * child VSL. All valid PTEs in parent GPT are copied to the child GPT, but the 169 * WRITABLE flag is reset and the COW flag is set. 174 * child VSL. All valid PTEs in parent GPT are copied to the child GPT. 175 * The WRITABLE and COW flags are not modified, as it will be done later for those 176 * shared pages by the vmm_set_cow() function. 170 177 * - The CODE vsegs registered in the parent VSL are registered in the child VSL, but the 171 178 * GPT entries are not copied in the child GPT, and will be dynamically updated from … … 173 180 * - The FILE vsegs registered in the parent VSL are registered in the child VSL, and all 174 181 * valid GPT entries in parent GPT are copied to the child GPT. The COW flag is not set. 175 * - No STACK vseg is copied from parent VMM to child VMM, because the child stack vseg 176 * must be copied later from the cluster containing the user thread requesting the fork(). 177 * - The KERNEL vsegs required by the target architecture are re-created in the child 178 * VMM, from the local kernel process VMM, using the hal_vmm_kernel_update() function. 182 * - No STACK vseg is copied from parent VMM to child VMM: the child stack vseg is copied 183 * later from the cluster containing the user thread requesting the fork(). 179 184 ********************************************************************************************* 180 185 * @ child_process : local pointer on local child process descriptor. … … 203 208 * This function modifies one GPT entry identified by the <process> and <vpn> arguments 204 209 * in all clusters containing a process copy. It is used to maintain coherence in GPT 205 * copies, using the list of copies stored in the owner process, andremote_write accesses.210 * copies, using remote_write accesses. 206 211 * It must be called by a thread running in the process owner cluster. 207 212 * Use the RPC_VMM_GLOBAL_UPDATE_PTE if required. … … 248 253 * - For the FILE, ANON, & REMOTE types, it does not use the <base> and <size> arguments, 249 254 * but uses the specific MMAP virtual memory allocator. 250 * - For the STACK type, it does not use the < size> argument, and the <base> argument251 * defines the user thread LTID used bythe specific STACK virtual memory allocator.252 * It checks collision with allpre-existing vsegs.255 * - For the STACK type, it does not use the <base> and <size> arguments, but uses the 256 * and the <base> argument the specific STACK virtual memory allocator. 257 * It checks collision with pre-existing vsegs. 253 258 * To comply with the "on-demand" paging policy, this function does NOT modify the GPT, 254 259 * and does not allocate physical memory for vseg data. -
trunk/kernel/mm/vseg.c
r625 r635 2 2 * vseg.c - virtual segment (vseg) related operations 3 3 * 4 * Authors Ghassan Almaless (2008,2009,2010,2011, 2012) 5 * Mohamed Lamine Karaoui (2015) 6 * Alain Greiner (2016,2017,2018,2019) 4 * Authors Alain Greiner (2016,2017,2018,2019) 7 5 * 8 6 * Copyright (c) UPMC Sorbonne Universites … … 66 64 kmem_req_t req; 67 65 68 req.type = KMEM_ VSEG;69 req. size = sizeof(vseg_t);70 req.flags = AF_KERNEL ;71 72 return (vseg_t *)kmem_alloc( &req );66 req.type = KMEM_KCM; 67 req.order = bits_log2( sizeof(vseg_t) ); 68 req.flags = AF_KERNEL | AF_ZERO; 69 70 return kmem_alloc( &req ); 73 71 } 74 72 … … 78 76 kmem_req_t req; 79 77 80 req.type = KMEM_ VSEG;78 req.type = KMEM_KCM; 81 79 req.ptr = vseg; 82 80 kmem_free( &req );
Note: See TracChangeset
for help on using the changeset viewer.