Changeset 20
- Timestamp:
- Jun 3, 2017, 6:34:20 PM (8 years ago)
- Location:
- trunk/kernel/mm
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/mm/kcm.c
r18 r20 47 47 kcm_page_t * page ) 48 48 { 49 50 51 49 assert( page->active , __FUNCTION__ , "kcm page should be active" ); 50 51 // get first block available 52 52 int32_t index = bitmap_ffs( page->bitmap , kcm->blocks_nr ); 53 53 54 55 56 54 assert( (index != -1) , __FUNCTION__ , "kcm page should not be full" ); 55 56 // allocate block 57 57 bitmap_clear( page->bitmap , index ); 58 58 59 59 // increase page refcount 60 60 page->refcount ++; 61 61 62 63 64 65 62 // change the page to busy no more free block in page 63 if( page->refcount >= kcm->blocks_nr ) 64 { 65 page->active = 0; 66 66 list_unlink( &page->list); 67 67 kcm->active_pages_nr --; … … 69 69 list_add_first( &kcm->busy_root , &page->list); 70 70 kcm->busy_pages_nr ++; 71 72 71 page->busy = 1; 72 } 73 73 74 74 return (page->base + index * kcm->block_size ); … … 87 87 { 88 88 kcm_page_t * page; 89 89 uint32_t index; 90 90 91 91 page = (kcm_page_t*)((intptr_t)ptr & CONFIG_PPM_PAGE_MASK); … … 95 95 page->refcount --; 96 96 97 97 // change the page to active if it was busy 98 98 if( page->busy ) 99 99 { … … 104 104 list_add_last( &kcm->active_root, &page->list ); 105 105 kcm->active_pages_nr ++; 106 107 } 108 109 106 page->active = 1; 107 } 108 109 // change the page to free if last block in active page 110 110 if( (page->active) && (page->refcount == 0) ) 111 111 { 112 112 page->active = 0; 113 113 list_unlink( &page->list); 114 114 kcm->active_pages_nr --; … … 127 127 page_t * page; 128 128 kcm_page_t * ptr; 129 130 131 132 133 134 135 129 kmem_req_t req; 130 131 // get one page from local PPM 132 req.type = KMEM_PAGE; 133 req.size = 0; 134 req.flags = AF_KERNEL; 135 page = kmem_alloc( &req ); 136 136 137 137 if( page == NULL ) 138 138 { 139 139 printk("\n[ERROR] in %s : failed to allocate page in cluster %d\n", 140 141 142 } 143 144 140 __FUNCTION__ , local_cxy ); 141 return ENOMEM; 142 } 143 144 // get page base address 145 145 ptr = ppm_page2base( page ); 146 146 147 147 // initialize KCM-page descriptor 148 148 bitmap_set_range( ptr->bitmap , 0 , kcm->blocks_nr ); 149 149 … … 155 155 ptr->page = page; 156 156 157 157 // introduce new page in free-list 158 158 list_add_first( &kcm->free_root , &ptr->list ); 159 159 kcm->free_pages_nr ++; … … 164 164 165 165 ///////////////////////////////////////////////////////////////////////////////////// 166 // This private function get one KCM page from the KCM freelist.166 // This private function gets one KCM page from the KCM freelist. 167 167 // It populates the freelist if required. 168 168 ///////////////////////////////////////////////////////////////////////////////////// … … 172 172 kcm_page_t * page; 173 173 174 174 // get a new page from PPM if freelist empty 175 175 if( kcm->free_pages_nr == 0 ) 176 176 { 177 178 if( error) return NULL;179 } 180 181 177 error = freelist_populate( kcm ); 178 if( error ) return NULL; 179 } 180 181 // get first KCM page from freelist and change its status to active 182 182 page = LIST_FIRST( &kcm->free_root, kcm_page_t , list ); 183 183 list_unlink( &page->list ); … … 197 197 uint32_t remaining; 198 198 199 199 // initialize lock 200 200 spinlock_init( &kcm->lock ); 201 201 202 202 // initialize KCM type 203 203 kcm->type = type; 204 204 205 // initialise KCM page lists205 // initialize KCM page lists 206 206 kcm->free_pages_nr = 0; 207 207 kcm->busy_pages_nr = 0; … … 211 211 list_root_init( &kcm->active_root ); 212 212 213 213 // initialize block size and number of blocks per page 214 214 block_size = ARROUND_UP( kmem_type_size( type ) , 64 ); 215 215 blocks_nr = CONFIG_PPM_PAGE_SIZE / block_size; … … 220 220 kcm->block_size = block_size; 221 221 222 223 222 kcm_dmsg("\n[INFO] %s : KCM %s initialised / block_size = %d / blocks_nr = %d\n", 223 __FUNCTION__ , kmem_type_str( type ) , block_size , blocks_nr ); 224 224 225 225 } // kcm_init() … … 231 231 list_entry_t * iter; 232 232 233 233 // get KCM lock 234 234 spinlock_lock( &kcm->lock ); 235 235 236 236 // release all free pages 237 237 LIST_FOREACH( &kcm->free_root , iter ) 238 238 { … … 243 243 } 244 244 245 245 // release all active pages 246 246 LIST_FOREACH( &kcm->active_root , iter ) 247 247 { … … 252 252 } 253 253 254 254 // release all busy pages 255 255 LIST_FOREACH( &kcm->busy_root , iter ) 256 256 { … … 261 261 } 262 262 263 264 263 // release KCM lock 264 spinlock_unlock( &kcm->lock ); 265 265 266 266 } // kcm_destroy() … … 272 272 void * ptr = NULL; // pointer on block 273 273 274 274 // get lock 275 275 spinlock_lock( &kcm->lock ); 276 276 277 278 279 280 281 282 283 277 // get an active page 278 if( list_is_empty( &kcm->active_root ) ) // no active page => get one 279 { 280 kcm_dmsg("\n[INFO] %s : enters for type %s but no active page => get one\n", 281 __FUNCTION__ , kmem_type_str( kcm->type ) ); 282 283 // get a page from free list 284 284 page = freelist_get( kcm ); 285 286 287 288 289 290 291 292 293 294 295 296 297 285 if( page == NULL ) return NULL; 286 287 // insert page in active list 288 list_add_first( &kcm->active_root , &page->list ); 289 kcm->active_pages_nr ++; 290 page->active = 1; 291 } 292 else // get first page from active list 293 { 294 kcm_dmsg("\n[INFO] %s : enters for type %s with an active page\n", 295 __FUNCTION__ , kmem_type_str( kcm->type ) ); 296 297 // get page pointer from active list 298 298 page = (kcm_page_t *)LIST_FIRST( &kcm->active_root , kcm_page_t , list ); 299 300 301 302 303 304 305 299 } 300 301 // get a block from selected active page 302 // cannot fail, as an active page cannot be full... 303 ptr = kcm_get_block( kcm , page ); 304 305 // release lock 306 306 spinlock_unlock(&kcm->lock); 307 307 308 309 308 kcm_dmsg("\n[INFO] %s : allocated one block of type %s / ptr = %x\n", 309 __FUNCTION__ , kmem_type_str( kcm->type ) , (uint32_t)ptr ); 310 310 311 311 return ptr; … … 324 324 kcm = page->kcm; 325 325 326 326 // get lock 327 327 spinlock_lock( &kcm->lock ); 328 328 329 329 // release block 330 330 kcm_put_block( kcm , ptr ); 331 331 332 332 // release lock 333 333 spinlock_unlock( &kcm->lock ); 334 334 } … … 338 338 { 339 339 printk("*** KCM type = %s / free_pages = %d / busy_pages = %d / active_pages = %d\n", 340 341 342 343 340 kmem_type_str( kcm->type ) , 341 kcm->free_pages_nr , 342 kcm->busy_pages_nr , 343 kcm->active_pages_nr ); 344 344 } -
trunk/kernel/mm/khm.c
r18 r20 39 39 void khm_init( khm_t * khm ) 40 40 { 41 42 43 41 // check config parameters 42 assert( ((CONFIG_PPM_PAGE_SHIFT + CONFIG_PPM_HEAP_ORDER) < 32 ) , __FUNCTION__ , 43 "CONFIG_PPM_HEAP_ORDER too large" ); 44 44 45 45 // initialize lock 46 46 spinlock_init( &khm->lock ); 47 47 48 49 48 // compute kernel heap size 49 intptr_t heap_size = (1 << CONFIG_PPM_HEAP_ORDER) << CONFIG_PPM_PAGE_SHIFT; 50 50 51 52 53 51 // get kernel heap base from PPM 52 page_t * page = ppm_alloc_pages( CONFIG_PPM_HEAP_ORDER ); 53 void * heap_base = ppm_page2base( page ); 54 54 55 // initializes first block == complete heap 55 // initialize first block (complete heap) 56 56 khm_block_t * block = (khm_block_t *)heap_base; 57 57 block->size = heap_size; 58 58 block->busy = 0; 59 59 60 // initializesKHM fields60 // initialize KHM fields 61 61 khm->base = (intptr_t)heap_base; 62 62 khm->size = heap_size; … … 72 72 uint32_t effective_size; 73 73 74 74 // compute actual block size 75 75 effective_size = size + sizeof(khm_block_t); 76 76 effective_size = ARROUND_UP( effective_size, CONFIG_CACHE_LINE_SIZE ); 77 77 78 78 // get lock protecting heap 79 79 spinlock_lock( &khm->lock ); 80 80 81 82 83 81 // define a starting block to scan existing blocks 82 if( ((khm_block_t*)khm->next)->size < effective_size ) current = (khm_block_t*)khm->base; 83 else current = (khm_block_t*)khm->next; 84 84 85 // scan all existing blocks to find a large enough free block 85 // scan all existing blocks to find a free block large enough 86 86 while( current->busy || (current->size < effective_size)) 87 87 { 88 88 // get next block pointer 89 89 current = (khm_block_t*)((char*)current + current->size); 90 90 … … 99 99 } 100 100 101 // split the current block if current blockis too large101 // split the current block if it is too large 102 102 if( (current->size - effective_size) >= CONFIG_CACHE_LINE_SIZE ) 103 103 { 104 104 // update new free block features 105 105 next = (khm_block_t *)((char*)current + effective_size); 106 106 next->size = current->size - effective_size; 107 107 next->busy = 0; 108 108 109 109 // register new free block 110 110 khm->next = (intptr_t)next; 111 111 112 112 // update allocated block features 113 113 current->size = effective_size; 114 114 current->busy = 1; 115 115 } 116 116 else 117 118 117 { 118 // change block state 119 119 current->busy = 1; 120 120 } 121 121 122 122 // release lock protecting heap 123 123 spinlock_unlock( &khm->lock ); 124 124 … … 138 138 current = (khm_block_t *)((char*)ptr - sizeof(khm_block_t)); 139 139 140 140 // get lock protecting heap 141 141 spinlock_lock(&khm->lock); 142 142 143 143 // release block 144 144 current->busy = 0; 145 145 146 146 // try to merge released block with the next 147 147 while ( 1 ) 148 148 { 149 149 next = (khm_block_t*)((char*)current + current->size); 150 150 if ( ((intptr_t)next >= (khm->base + khm->size)) || (next->busy == 1) ) break; 151 151 current->size += next->size; … … 154 154 if( (intptr_t)current < khm->next ) khm->next = (intptr_t)current; 155 155 156 156 // release lock protecting heap 157 157 spinlock_unlock( &khm->lock ); 158 158 }
Note: See TracChangeset
for help on using the changeset viewer.