- Timestamp:
- Jun 3, 2017, 4:42:49 PM (8 years ago)
- Location:
- trunk/kernel/mm
- Files:
-
- 15 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/mm/kcm.c
r14 r18 1 1 /* 2 2 * kcm.c - Per cluster & per type Kernel Cache Manager access functions 3 * 3 * 4 4 * Author Ghassan Almaless (2008,2009,2010,2011,2012) 5 5 * Alain Greiner (2016) … … 50 50 51 51 // get first block available 52 int32_t index = bitmap_ffs( page->bitmap , kcm->blocks_nr ); 52 int32_t index = bitmap_ffs( page->bitmap , kcm->blocks_nr ); 53 53 54 54 assert( (index != -1) , __FUNCTION__ , "kcm page should not be full" ); 55 55 56 56 // allocate block 57 57 bitmap_clear( page->bitmap , index ); … … 87 87 { 88 88 kcm_page_t * page; 89 uint32_t index; 90 89 uint32_t index; 90 91 91 page = (kcm_page_t*)((intptr_t)ptr & CONFIG_PPM_PAGE_MASK); 92 92 index = ((uint8_t*)ptr - page->base) / kcm->block_size; 93 93 94 94 bitmap_set( page->bitmap , index ); 95 95 page->refcount --; 96 96 97 97 // change the page to active if it was busy 98 98 if( page->busy ) … … 121 121 ///////////////////////////////////////////////////////////////////////////////////// 122 122 // This static function allocates one page from PPM. It initializes 123 // the KCM-page descriptor, and introduces the new page into freelist. 123 // the KCM-page descriptor, and introduces the new page into freelist. 124 124 ///////////////////////////////////////////////////////////////////////////////////// 125 125 static error_t freelist_populate( kcm_t * kcm ) … … 134 134 req.flags = AF_KERNEL; 135 135 page = kmem_alloc( &req ); 136 136 137 137 if( page == NULL ) 138 138 { 139 printk("\n[ERROR] in %s : failed to allocate page in cluster %d\n", 139 printk("\n[ERROR] in %s : failed to allocate page in cluster %d\n", 140 140 __FUNCTION__ , local_cxy ); 141 141 return ENOMEM; … … 158 158 list_add_first( &kcm->free_root , &ptr->list ); 159 159 kcm->free_pages_nr ++; 160 160 161 161 return 0; 162 162 … … 179 179 } 180 180 181 // get first KCM page from freelist and change its status to active 181 // get first KCM page from freelist and change its status to active 182 182 page = LIST_FIRST( &kcm->free_root, kcm_page_t , list ); 183 183 list_unlink( &page->list ); … … 200 200 spinlock_init( &kcm->lock ); 201 201 202 // initialize KCM type 202 // initialize KCM type 203 203 kcm->type = type; 204 204 … … 219 219 kcm->blocks_nr = blocks_nr; 220 220 kcm->block_size = block_size; 221 221 222 222 kcm_dmsg("\n[INFO] %s : KCM %s initialised / block_size = %d / blocks_nr = %d\n", 223 223 __FUNCTION__ , kmem_type_str( type ) , block_size , blocks_nr ); … … 230 230 kcm_page_t * page; 231 231 list_entry_t * iter; 232 232 233 233 // get KCM lock 234 234 spinlock_lock( &kcm->lock ); … … 274 274 // get lock 275 275 spinlock_lock( &kcm->lock ); 276 276 277 277 // get an active page 278 278 if( list_is_empty( &kcm->active_root ) ) // no active page => get one … … 303 303 ptr = kcm_get_block( kcm , page ); 304 304 305 // release lock 305 // release lock 306 306 spinlock_unlock(&kcm->lock); 307 307 … … 318 318 kcm_page_t * page; 319 319 kcm_t * kcm; 320 320 321 321 if( ptr == NULL ) return; 322 322 323 323 page = (kcm_page_t *)((intptr_t)ptr & CONFIG_PPM_PAGE_MASK); 324 324 kcm = page->kcm; … … 330 330 kcm_put_block( kcm , ptr ); 331 331 332 // release lock 332 // release lock 333 333 spinlock_unlock( &kcm->lock ); 334 334 } … … 338 338 { 339 339 printk("*** KCM type = %s / free_pages = %d / busy_pages = %d / active_pages = %d\n", 340 kmem_type_str( kcm->type ) , 341 kcm->free_pages_nr , 340 kmem_type_str( kcm->type ) , 341 kcm->free_pages_nr , 342 342 kcm->busy_pages_nr , 343 343 kcm->active_pages_nr ); -
trunk/kernel/mm/kcm.h
r7 r18 39 39 * contain one single object. 40 40 * The various KCM allocators themselves are not statically allocated in the cluster 41 * manager, but are dynamically allocated when required, using the embedded KCM 41 * manager, but are dynamically allocated when required, using the embedded KCM 42 42 * allocator defined in the cluster manager, to allocate the other ones... 43 43 ***************************************************************************************/ 44 44 45 typedef struct kcm_s 45 typedef struct kcm_s 46 46 { 47 47 spinlock_t lock; /*! protect exclusive access to allocator */ … … 58 58 59 59 uint32_t type; /*! KCM type */ 60 } 60 } 61 61 kcm_t; 62 62 … … 79 79 uint8_t active; /*! page active if non zero */ 80 80 uint8_t unused; /*! */ 81 } 81 } 82 82 kcm_page_t; 83 83 84 84 /**************************************************************************************** 85 * This function initializes a generic Kernel Cache Manager. 85 * This function initializes a generic Kernel Cache Manager. 86 86 **************************************************************************************** 87 87 * @ kcm : pointer on KCM manager to initialize. … … 92 92 93 93 /**************************************************************************************** 94 * This function releases all memory allocated to a generic Kernel Cache Manager. 94 * This function releases all memory allocated to a generic Kernel Cache Manager. 95 95 **************************************************************************************** 96 96 * @ kcm : pointer on KCM manager to destroy. … … 99 99 100 100 /**************************************************************************************** 101 * This function allocates one single object in a Kernel Cache Manager 101 * This function allocates one single object in a Kernel Cache Manager 102 102 * The object size must be smaller than one page size. 103 103 **************************************************************************************** -
trunk/kernel/mm/khm.c
r14 r18 1 1 /* 2 2 * khm.c - kernel heap manager implementation. 3 * 3 * 4 4 * Authors Ghassan Almaless (2008,2009,2010,2011,2012) 5 5 * Alain Greiner (2016) … … 44 44 45 45 // initialize lock 46 spinlock_init( &khm->lock ); 47 48 // compute kernel heap size46 spinlock_init( &khm->lock ); 47 48 // compute kernel heap size 49 49 intptr_t heap_size = (1 << CONFIG_PPM_HEAP_ORDER) << CONFIG_PPM_PAGE_SHIFT; 50 50 51 // get kernel heap base from PPM 51 // get kernel heap base from PPM 52 52 page_t * page = ppm_alloc_pages( CONFIG_PPM_HEAP_ORDER ); 53 53 void * heap_base = ppm_page2base( page ); … … 65 65 66 66 ///////////////////////////////// 67 void * khm_alloc( khm_t * khm, 67 void * khm_alloc( khm_t * khm, 68 68 uint32_t size ) 69 69 { 70 khm_block_t * current; 70 khm_block_t * current; 71 71 khm_block_t * next; 72 72 uint32_t effective_size; … … 78 78 // get lock protecting heap 79 79 spinlock_lock( &khm->lock ); 80 81 // define a starting block to scan existing blocks 80 81 // define a starting block to scan existing blocks 82 82 if( ((khm_block_t*)khm->next)->size < effective_size ) current = (khm_block_t*)khm->base; 83 83 else current = (khm_block_t*)khm->next; 84 84 85 85 // scan all existing blocks to find a large enough free block 86 while( current->busy || (current->size < effective_size)) 86 while( current->busy || (current->size < effective_size)) 87 87 { 88 88 // get next block pointer 89 89 current = (khm_block_t*)((char*)current + current->size); 90 90 91 91 if( (intptr_t)current >= (khm->base + khm->size) ) // heap full 92 92 { 93 93 spinlock_unlock(&khm->lock); 94 94 95 printk("\n[ERROR] in %s : failed to allocate block of size %d\n", 95 printk("\n[ERROR] in %s : failed to allocate block of size %d\n", 96 96 __FUNCTION__ , effective_size ); 97 97 return NULL; … … 133 133 khm_block_t * current; 134 134 khm_block_t * next; 135 135 136 136 if(ptr == NULL) return; 137 137 138 138 current = (khm_block_t *)((char*)ptr - sizeof(khm_block_t)); 139 139 140 140 // get lock protecting heap 141 141 spinlock_lock(&khm->lock); 142 142 143 // release block 143 // release block 144 144 current->busy = 0; 145 145 146 146 // try to merge released block with the next 147 147 while ( 1 ) 148 { 148 { 149 149 next = (khm_block_t*)((char*)current + current->size); 150 150 if ( ((intptr_t)next >= (khm->base + khm->size)) || (next->busy == 1) ) break; … … 153 153 154 154 if( (intptr_t)current < khm->next ) khm->next = (intptr_t)current; 155 155 156 156 // release lock protecting heap 157 157 spinlock_unlock( &khm->lock ); -
trunk/kernel/mm/khm.h
r14 r18 1 1 /* 2 2 * khm.h - kernel heap manager used for variable size memory allocation. 3 * 3 * 4 4 * Authors Ghassan Almaless (2008,2009,2010,2011,2012) 5 5 * Mohamed Lamine Karaoui (2015) … … 33 33 /******************************************************************************************* 34 34 * This structure defines a Kernel Heap Manager (KHM) in a given cluster. 35 * It is used to allocate memory objects, that are not 35 * It is used to allocate memory objects, that are not 36 36 * enough replicated to justify a dedicated KCM allocator. 37 37 ******************************************************************************************/ … … 48 48 /******************************************************************************************* 49 49 * This structure defines an allocated block descriptor for the KHM. 50 * This block descriptor is stored at the beginning of the allocated block. 50 * This block descriptor is stored at the beginning of the allocated block. 51 51 * The returned pointer is the allocated memory block base + block descriptor size. 52 52 ******************************************************************************************/ … … 56 56 uint32_t busy:1; /*! free block if zero */ 57 57 uint32_t size:31; /*! size coded on 31 bits */ 58 } 58 } 59 59 khm_block_t; 60 60 … … 62 62 /******************************************************************************************* 63 63 * This function initializes a KHM heap manager in a given cluster. 64 * It is used to allocate variable size memory objects, that are not 64 * It is used to allocate variable size memory objects, that are not 65 65 * enough replicated to justify a dedicated KCM allocator. 66 66 ******************************************************************************************* … … 72 72 73 73 /******************************************************************************************* 74 * This function allocates a memory block from the local KHM. 74 * This function allocates a memory block from the local KHM. 75 75 * The actual size of the allocated block is the requested size, plus the block descriptor 76 76 * size, rounded to a cache line size. -
trunk/kernel/mm/kmem.c
r14 r18 63 63 if( kcm != NULL ) 64 64 { 65 if( index == kcm->type ) 65 if( index == kcm->type ) 66 66 { 67 67 printk(" - KCM[%s] (at address %x) is OK\n", 68 68 kmem_type_str( index ) , (intptr_t)kcm ); 69 69 } 70 else 70 else 71 71 { 72 72 printk(" - KCM[%s] (at address %x) is KO : has type %s\n", 73 73 kmem_type_str( index ) , (intptr_t)kcm , kmem_type_str( kcm->type ) ); 74 } 74 } 75 75 } 76 76 } … … 100 100 else if( type == KMEM_SEM ) return sizeof( remote_sem_t ); 101 101 else return 0; 102 } 102 } 103 103 104 104 ///////////////////////////////////// … … 121 121 else if( type == KMEM_VFS_CTX ) return "KMEM_VFS_CTX"; 122 122 else if( type == KMEM_VFS_INODE ) return "KMEM_VFS_INODE"; 123 else if( type == KMEM_VFS_DENTRY ) return "KMEM_VFS_DENTRY"; 123 else if( type == KMEM_VFS_DENTRY ) return "KMEM_VFS_DENTRY"; 124 124 else if( type == KMEM_VFS_FILE ) return "KMEM_VFS_FILE"; 125 125 else if( type == KMEM_SEM ) return "KMEM_SEM"; … … 153 153 } 154 154 155 // initializes the new KCM allocator 155 // initializes the new KCM allocator 156 156 kcm_init( kcm , type ); 157 157 … … 168 168 } // end kmem_create_kcm() 169 169 170 170 171 171 172 172 ///////////////////////////////////// … … 184 184 size = req->size; 185 185 flags = req->flags; 186 186 187 187 assert( (type < KMEM_TYPES_NR) , __FUNCTION__ , "illegal KMEM request type" ); 188 188 189 189 kmem_dmsg("\n[INFO] %s : enters in cluster %x for type %s / size %d\n", 190 190 __FUNCTION__ , local_cxy , kmem_type_str( type ) , size ); … … 192 192 // analyse request type 193 193 if( type == KMEM_PAGE ) // PPM allocator 194 { 194 { 195 195 // allocate the number of requested pages 196 196 ptr = (void *)ppm_alloc_pages( size ); … … 198 198 // reset page if required 199 199 if( flags & AF_ZERO ) page_zero( (page_t *)ptr ); 200 200 201 201 kmem_dmsg("\n[INFO] %s : exit in cluster %x for type %s / page = %x / base = %x\n", 202 __FUNCTION__, local_cxy , kmem_type_str( type ) , 202 __FUNCTION__, local_cxy , kmem_type_str( type ) , 203 203 (intptr_t)ptr , (intptr_t)ppm_page2base( ptr ) ); 204 204 } … … 218 218 // initialize the KCM allocator if not already done 219 219 if( cluster->kcm_tbl[type] == NULL ) 220 { 220 { 221 221 spinlock_lock( &cluster->kcm_lock ); 222 222 error_t error = kmem_create_kcm( type ); … … 225 225 } 226 226 227 // allocate memory from KCM 227 // allocate memory from KCM 228 228 ptr = kcm_alloc( cluster->kcm_tbl[type] ); 229 229 … … 237 237 if( ptr == NULL ) 238 238 { 239 printk("\n[ERROR] in %s : failed for type %d / size %d in cluster %x\n", 239 printk("\n[ERROR] in %s : failed for type %d / size %d in cluster %x\n", 240 240 __FUNCTION__ , type , size , local_cxy ); 241 241 242 242 return NULL; 243 243 } … … 255 255 hal_core_sleep(); 256 256 } 257 257 258 258 switch(req->type) 259 259 { -
trunk/kernel/mm/kmem.h
r7 r18 62 62 63 63 /************************************************************************************* 64 * This defines the generic Allocation Flags that can be associated to 64 * This defines the generic Allocation Flags that can be associated to 65 65 * a Kernel Memory Request. 66 66 ************************************************************************************/ 67 67 68 68 #define AF_NONE 0x0000 // no attributes 69 #define AF_KERNEL 0x0001 // for kernel use 69 #define AF_KERNEL 0x0001 // for kernel use 70 70 #define AF_ZERO 0x0002 // must be reset to 0 71 71 72 72 /************************************************************************************* 73 * This structure defines a Kernel Memory Request. 73 * This structure defines a Kernel Memory Request. 74 74 ************************************************************************************/ 75 75 … … 80 80 uint32_t flags; /*! request attributes */ 81 81 void * ptr; /*! local pointer on allocated buffer (only used by free) */ 82 } 82 } 83 83 kmem_req_t; 84 84 … … 124 124 125 125 /************************************************************************************* 126 * This function s displaythe content of the KCM pointers Table126 * This function displays the content of the KCM pointers Table 127 127 ************************************************************************************/ 128 128 void kmem_print_kcm_table(); -
trunk/kernel/mm/mapper.c
r14 r18 105 105 page = (page_t *)grdxt_get_first( &mapper->radix , start_index , &found_index ); 106 106 107 if( page != NULL ) 107 if( page != NULL ) 108 108 { 109 109 // remove page from mapper and release to PPM … … 127 127 128 128 return 0; 129 129 130 130 } // end mapper_destroy() 131 131 … … 147 147 page = (page_t *)grdxt_lookup( &mapper->radix , index ); 148 148 149 // test if page available in mapper 149 // test if page available in mapper 150 150 if( ( page == NULL) || page_is_flag( page , PG_INLOAD ) ) // page not available / 151 151 { … … 167 167 req.flags = AF_NONE; 168 168 page = kmem_alloc( &req ); 169 169 170 170 if( page == NULL ) 171 171 { … … 190 190 rwlock_wr_unlock( &mapper->lock ); 191 191 192 if( error ) 192 if( error ) 193 193 { 194 194 printk("\n[ERROR] in %s : thread %x cannot insert page in mapper\n", … … 201 201 return NULL; 202 202 } 203 203 204 204 // launch I/O operation to load page from file system 205 205 error = mapper_updt_page( mapper , index , page ); … … 236 236 if( page_is_flag( page , PG_INLOAD ) ) break; 237 237 238 // deschedule 238 // deschedule 239 239 sched_yield(); 240 240 } 241 241 242 242 } 243 243 244 244 return page; 245 245 } 246 else 246 else 247 247 { 248 248 // release lock from READ_MODE … … 268 268 return EIO; 269 269 } 270 270 271 271 // take mapper lock in WRITE_MODE 272 272 rwlock_wr_lock( &mapper->lock ); … … 331 331 rwlock_wr_unlock( &mapper->lock ); 332 332 333 // release page lock 333 // release page lock 334 334 page_unlock( page ); 335 335 … … 339 339 return EIO; 340 340 } 341 341 342 342 return 0; 343 343 } // end mapper_updt_page … … 364 364 } 365 365 366 if( page_is_flag( page , PG_DIRTY ) ) 366 if( page_is_flag( page , PG_DIRTY ) ) 367 367 { 368 368 // get file system type and inode pointer … … 388 388 rwlock_rd_unlock( &mapper->lock ); 389 389 390 // release page lock 390 // release page lock 391 391 page_unlock( page ); 392 392 … … 396 396 return EIO; 397 397 } 398 398 399 399 // clear dirty bit 400 400 page_undo_dirty( page ); 401 401 } 402 402 403 403 return 0; 404 404 … … 408 408 // This static function is called by the mapper_move fragments() function. 409 409 // It moves one fragment between an user buffer and the kernel mapper. 410 // Implementation Note: It can require access to one or two pages in mapper: 410 // Implementation Note: It can require access to one or two pages in mapper: 411 411 // [max_page_index == min_page_index] <=> fragment fit in one mapper page 412 412 // [max_page index == min_page_index + 1] <=> fragment spread on two mapper pages … … 416 416 fragment_t * fragment ) 417 417 { 418 uint32_t size; // number of bytes in fragment 418 uint32_t size; // number of bytes in fragment 419 419 cxy_t buf_cxy; // cluster identifier for user buffer 420 420 uint8_t * buf_ptr; // local pointer on first byte in user buffer 421 421 422 422 xptr_t xp_buf; // extended pointer on byte in user buffer 423 423 xptr_t xp_map; // extended pointer on byte in kernel mapper 424 424 425 uint32_t min_file_offset; // offset of first byte in file 426 uint32_t max_file_offset; // offset of last byte in file 425 uint32_t min_file_offset; // offset of first byte in file 426 uint32_t max_file_offset; // offset of last byte in file 427 427 428 428 uint32_t first_page_index; // index of first page in mapper 429 uint32_t first_page_offset; // offset of first byte in first page in mapper 430 uint32_t first_page_size; // offset of first byte in first page in mapper 431 432 uint32_t second_page_index; // index of last page in mapper 433 uint32_t second_page_offset; // offset of last byte in last page in mapper 434 uint32_t second_page_size; // offset of last byte in last page in mapper 435 429 uint32_t first_page_offset; // offset of first byte in first page in mapper 430 uint32_t first_page_size; // offset of first byte in first page in mapper 431 432 uint32_t second_page_index; // index of last page in mapper 433 uint32_t second_page_offset; // offset of last byte in last page in mapper 434 uint32_t second_page_size; // offset of last byte in last page in mapper 435 436 436 page_t * page; // pointer on one page descriptor in mapper 437 437 uint8_t * map_ptr; // local pointer on first byte in mapper … … 448 448 return EINVAL; 449 449 } 450 450 451 451 // compute offsets of first and last bytes in file 452 452 min_file_offset = fragment->file_offset; … … 459 459 if ( first_page_index == second_page_index ) // only one page in mapper 460 460 { 461 // compute offset and size for page in mapper 461 // compute offset and size for page in mapper 462 462 first_page_offset = min_file_offset & (1<<CONFIG_PPM_PAGE_SHIFT); 463 463 first_page_size = size; … … 499 499 // compute local pointer on first byte in first page in mapper 500 500 map_ptr = (uint8_t *)ppm_page2base(page) + first_page_offset; 501 501 502 502 // compute extended pointers 503 503 xp_map = XPTR( local_cxy , map_ptr ); … … 517 517 // compute offset and size for second page in mapper 518 518 second_page_offset = 0; 519 second_page_size = size - first_page_size; 519 second_page_size = size - first_page_size; 520 520 521 521 // get pointer on second page in mapper … … 526 526 // compute local pointer on first byte in second page in mapper 527 527 map_ptr = (uint8_t *)ppm_page2base( page ) + second_page_offset; 528 528 529 529 // compute extended pointers 530 530 xp_map = XPTR( local_cxy , map_ptr ); … … 573 573 frags_array = client_frags; 574 574 } 575 else // make a local copy of fragments array 575 else // make a local copy of fragments array 576 576 { 577 577 hal_remote_memcpy( XPTR( local_cxy , local_frags ) , xp_frags , -
trunk/kernel/mm/mapper.h
r1 r18 38 38 39 39 /******************************************************************************************* 40 * The mapper implement the kernel cache for a given file or directory.40 * The mapper implements the kernel cache for a given file or directory. 41 41 * There is one mapper per file. It is implemented as a three levels radix tree, 42 42 * entirely stored in the same cluster as the inode representing the file/dir. 43 * - The fast retrieval key is the page index in the file. 43 * - The fast retrieval key is the page index in the file. 44 44 * The ix1_width, ix2_width, ix3_width sub-indexes are configuration parameters. 45 45 * - The leaves are pointers on physical page descriptors, dynamically allocated … … 52 52 * used to move pages to or from the relevant file system on IOC device. 53 53 * - the mapper_move fragments() function is used to move data to or from a distributed 54 * user buffer. 54 * user buffer. 55 55 * - The mapper_get_page() function that return a page descriptor pointer from a page 56 56 * index in file is in charge of handling the miss on the mapper cache. … … 80 80 * and an user buffer, that can be split in several distributed physical pages located 81 81 * in different clusters. A fragment is a set of contiguous bytes in the file. 82 * - It can be stored in one single physical page in the user buffer. 82 * - It can be stored in one single physical page in the user buffer. 83 83 * - It can spread two successive physical pages in the kernel mapper. 84 84 ******************************************************************************************/ … … 94 94 95 95 /******************************************************************************************* 96 * This function allocates physical memory for a mapper descriptor, and initi talizes it96 * This function allocates physical memory for a mapper descriptor, and initializes it 97 97 * (refcount <= 0) / inode <= NULL). 98 98 * It must be executed by a thread running in the cluster containing the mapper. … … 105 105 * This function releases all physical pages allocated for the mapper. 106 106 * It synchronizes all dirty pages (i.e. update the file on disk) if required. 107 * The mapper descriptor and the radix t hree themselveare released.107 * The mapper descriptor and the radix tree themselves are released. 108 108 * It must be executed by a thread running in the cluster containing the mapper. 109 109 ******************************************************************************************* … … 114 114 115 115 /******************************************************************************************* 116 * This function moves all fragments covering a distributed user buffer between 116 * This function moves all fragments covering a distributed user buffer between 117 117 * a mapper (associated to a local inode), and the user buffer. 118 * [See the fragment definition in the mapper.h file] 118 * [See the fragment definition in the mapper.h file] 119 119 * It must be executed by a thread running in the cluster containing the mapper. 120 120 * The lock protecting the mapper must have been taken in WRITE_MODE or READ_MODE 121 121 * by the caller thread, depending on the transfer direction. 122 122 * In case of write, the dirty bit is set for all pages written in the mapper. 123 * The offset in the file descriptor is not modified by this function. 123 * The offset in the file descriptor is not modified by this function. 124 124 * Implementation note: 125 125 * For each fragment, this function makes ONE hal_remote_memcpy() when the fragment is … … 155 155 156 156 /******************************************************************************************* 157 * This function search a physical page descriptor from its index in mapper.157 * This function searches a physical page descriptor from its index in mapper. 158 158 * It must be executed by a thread running in the cluster containing the mapper. 159 159 * In case of miss, it takes the mapper lock in WRITE_MODE, load the missing 160 * page from device to the mapper, and release the mapper lock. 160 * page from device to the mapper, and release the mapper lock. 161 161 ******************************************************************************************* 162 162 * @ mapper : local pointer on the mapper. … … 174 174 * @ mapper : local pointer on the mapper. 175 175 * @ index : page index in file. 176 * @ page : local pointer on the page descriptor in mapper. 176 * @ page : local pointer on the page descriptor in mapper. 177 177 * @ returns 0 if success / return EINVAL if it cannot access the device. 178 178 ******************************************************************************************/ 179 179 error_t mapper_updt_page( mapper_t * mapper, 180 180 uint32_t index, 181 struct page_s * page ); 182 181 struct page_s * page ); 182 183 183 /******************************************************************************************* 184 184 * This function makes an I/0 operation to move one page from mapper to FS. … … 191 191 * @ mapper : local pointer on the mapper. 192 192 * @ index : page index in file. 193 * @ page : local pointer on the page descriptor in mapper. 193 * @ page : local pointer on the page descriptor in mapper. 194 194 * @ returns 0 if success / return EINVAL if it cannot access the device. 195 195 ******************************************************************************************/ 196 196 error_t mapper_sync_page( mapper_t * mapper, 197 197 uint32_t index, 198 struct page_s * page ); 198 struct page_s * page ); 199 199 200 200 #endif /* _MAPPER_H_ */ -
trunk/kernel/mm/page.c
r7 r18 89 89 // set dirty flag in page descriptor 90 90 page_set_flag( page , PG_DIRTY ); 91 91 92 92 // register page in PPM dirty list 93 93 list_add_first( &ppm->dirty_root , &page->list ); … … 97 97 // unlock the PPM dirty_list 98 98 spinlock_unlock( &ppm->dirty_lock ); 99 99 100 100 return done; 101 101 } … … 128 128 129 129 ///////////////////// 130 void sync_all_pages() 130 void sync_all_pages() 131 131 { 132 132 page_t * page; … … 138 138 spinlock_lock( &ppm->dirty_lock ); 139 139 140 while( !list_is_empty( &ppm->dirty_root ) ) 140 while( !list_is_empty( &ppm->dirty_root ) ) 141 141 { 142 142 page = LIST_FIRST( &ppm->dirty_root , page_t , list ); … … 147 147 mapper = page->mapper; 148 148 index = page->index; 149 149 150 150 // lock the page 151 151 page_lock( page ); … … 203 203 // take the spinlock protecting the PG_LOCKED flag 204 204 spinlock_lock( &page->lock ); 205 205 206 206 // check the page waiting list 207 207 bool_t is_empty = xlist_is_empty( XPTR( local_cxy , &page->wait_root ) ); … … 210 210 { 211 211 // get an extended pointer on the first waiting thread 212 xptr_t root_xp = XPTR( local_cxy , &page->wait_root ); 212 xptr_t root_xp = XPTR( local_cxy , &page->wait_root ); 213 213 xptr_t thread_xp = XLIST_FIRST_ELEMENT( root_xp , thread_t , wait_list ); 214 214 … … 244 244 void * src_base; 245 245 void * dst_base; 246 246 247 247 if( dst->order != src->order ) 248 248 { … … 272 272 // size , (uint32_t)base , (uint32_t)(&LOCAL_CLUSTER->kcm_tbl[0] ) ); 273 273 274 memset( base , 0 , size ); 274 memset( base , 0 , size ); 275 275 276 276 // kmem_print_kcm_table(); … … 282 282 { 283 283 printk("*** Page %d : base = %x / flags = %x / order = %d / count = %d\n", 284 page->index, 284 page->index, 285 285 ppm_page2base( page ), 286 page->flags, 287 page->order, 286 page->flags, 287 page->order, 288 288 page->refcount ); 289 289 } -
trunk/kernel/mm/page.h
r14 r18 36 36 37 37 struct mapper_s; 38 38 39 39 /************************************************************************************* 40 40 * This defines the flags that can be attached to a physical page. … … 44 44 #define PG_RESERVED 0x0002 // cannot be allocated by PPM 45 45 #define PG_FREE 0x0004 // page can be allocated by PPM 46 #define PG_INLOAD 0x0008 // on-going load from disk 46 #define PG_INLOAD 0x0008 // on-going load from disk 47 47 #define PG_IO_ERR 0x0010 // mapper signals a read/write access error 48 48 #define PG_BUFFER 0x0020 // used in blockio.c … … 64 64 struct mapper_s * mapper; /*! local pointer on associated mapper (4) */ 65 65 uint32_t index; /*! page index in mapper (4) */ 66 66 67 67 union /*! (4) */ 68 68 { … … 72 72 }; 73 73 74 list_entry_t list; /*! for both dirty pages and free pages (8) */ 74 list_entry_t list; /*! for both dirty pages and free pages (8) */ 75 75 76 76 xlist_entry_t wait_root; /*! root of list of waiting threads (16) */ … … 90 90 * This function set one or several flags in page descriptor flags. 91 91 * @ page : pointer to page descriptor. 92 * @ value : all non zero bits in value will be set. 92 * @ value : all non zero bits in value will be set. 93 93 ************************************************************************************/ 94 94 inline void page_set_flag( page_t * page, … … 98 98 * This function reset one or several flags in page descriptor flags. 99 99 * @ page : pointer to page descriptor. 100 * @ value : all non zero bits in value will be cleared. 100 * @ value : all non zero bits in value will be cleared. 101 101 ************************************************************************************/ 102 102 inline void page_clear_flag( page_t * page, … … 107 107 * @ page : pointer to page descriptor. 108 108 * @ value : all non zero bits will be tested. 109 * @ returns true if at least one non zero bit in value is set / false otherwise. 109 * @ returns true if at least one non zero bit in value is set / false otherwise. 110 110 ************************************************************************************/ 111 111 inline bool_t page_is_flag( page_t * page, … … 114 114 /************************************************************************************* 115 115 * This function synchronizes (i.e. update the disk) all dirty pages in a cluster. 116 * It scan the PPM dirty list, that should be empty when this operation is completed. 116 * It scan the PPM dirty list, that should be empty when this operation is completed. 117 117 ************************************************************************************/ 118 118 void sync_all_pages(); … … 130 130 * and remove the page from the dirty list in PPM. 131 131 * @ page : pointer on page descriptor. 132 * @ returns true if page was dirty / returns false if page was not dirty 132 * @ returns true if page was dirty / returns false if page was not dirty 133 133 ************************************************************************************/ 134 134 bool_t page_undo_dirty( page_t * page ); … … 143 143 144 144 /************************************************************************************* 145 * This function reset to 0 all bytes in a given page. 145 * This function reset to 0 all bytes in a given page. 146 146 * @ page : pointer on page descriptor. 147 147 ************************************************************************************/ … … 151 151 * This blocking function set the PG_LOCKED flag on the page. 152 152 * It deschedule if the page has already been locked by another thread, 153 * and returns only when the flag has been successfully set. 153 * and returns only when the flag has been successfully set. 154 154 * @ page : pointer on page descriptor. 155 155 ************************************************************************************/ -
trunk/kernel/mm/ppm.c
r14 r18 56 56 57 57 //////////////////////////////////////////// 58 inline page_t * ppm_base2page( void * base ) 58 inline page_t * ppm_base2page( void * base ) 59 59 { 60 60 ppm_t * ppm = &LOCAL_CLUSTER->ppm; … … 105 105 // search the buddy page descriptor 106 106 // - merge with current page descriptor if found 107 // - exit to release the current page descriptor if not found 108 current = page , 107 // - exit to release the current page descriptor if not found 108 current = page , 109 109 current_index = (uint32_t)(page - ppm->pages_tbl); 110 for( current_order = page->order ; 110 for( current_order = page->order ; 111 111 current_order < CONFIG_PPM_MAX_ORDER ; 112 112 current_order++ ) … … 114 114 buddy_index = current_index ^ (1 << current_order); 115 115 buddy = pages_tbl + buddy_index; 116 116 117 117 if( !page_is_flag( buddy , PG_FREE ) || (buddy->order != current_order) ) break; 118 118 119 // remove buddy from free list 119 // remove buddy from free list 120 120 list_unlink( &buddy->list ); 121 121 ppm->free_pages_nr[current_order] --; 122 122 ppm->total_free_pages -= (1 << current_order); 123 123 124 124 // merge buddy with current 125 125 buddy->order = 0; 126 126 current_index &= buddy_index; 127 127 } 128 128 129 129 // update merged page descriptor order 130 130 current = pages_tbl + current_index; … … 162 162 list_root_init( &ppm->dirty_root ); 163 163 164 // initialize pointer on page descriptors array 164 // initialize pointer on page descriptors array 165 165 ppm->pages_tbl = (page_t*)( pages_offset << CONFIG_PPM_PAGE_SHIFT ); 166 166 … … 174 174 uint32_t reserved_pages = pages_offset + pages_array; 175 175 176 // set pages numbers 176 // set pages numbers 177 177 ppm->pages_nr = pages_nr; 178 178 ppm->pages_offset = reserved_pages; … … 180 180 // initialises all page descriptors in pages_tbl[] 181 181 for( i = 0 ; i < pages_nr ; i++ ) 182 { 182 { 183 183 page_init( &ppm->pages_tbl[i] ); 184 184 … … 186 186 // complete the initialisation when page is allocated [AG] 187 187 // ppm->pages_tbl[i].flags = 0; 188 } 189 190 // - set PG_RESERVED flag for reserved pages (kernel code & pages_tbl[]) 188 } 189 190 // - set PG_RESERVED flag for reserved pages (kernel code & pages_tbl[]) 191 191 // - release all other pages to populate the free lists 192 for( i = 0 ; i < reserved_pages ; i++) 192 for( i = 0 ; i < reserved_pages ; i++) 193 193 { 194 194 page_set_flag( &ppm->pages_tbl[i] , PG_RESERVED ); … … 250 250 return NULL; 251 251 } 252 253 // update free-lists after removing a block 252 253 // update free-lists after removing a block 254 254 ppm->total_free_pages -= (1 << current_order); 255 ppm->free_pages_nr[current_order] --; 255 ppm->free_pages_nr[current_order] --; 256 256 current_size = (1 << current_order); 257 257 … … 262 262 current_order --; 263 263 current_size >>= 1; 264 264 265 265 remaining_block = block + current_size; 266 266 remaining_block->order = current_order; … … 270 270 ppm->total_free_pages += (1 << current_order); 271 271 } 272 272 273 273 // update page descriptor 274 274 page_clear_flag( block , PG_FREE ); … … 278 278 // release lock protecting free lists 279 279 spinlock_unlock( &ppm->free_lock ); 280 280 281 281 ppm_dmsg("\n[INFO] %s : base = %x / order = %d\n", 282 282 __FUNCTION__ , (uint32_t)ppm_page2base( block ) , order ); … … 294 294 { 295 295 ppm_t * ppm = &LOCAL_CLUSTER->ppm; 296 296 297 297 // get lock protecting free_pages[] array 298 298 spinlock_lock( &ppm->free_lock ); 299 299 300 ppm_free_pages_nolock( page ); 300 ppm_free_pages_nolock( page ); 301 301 302 302 // release lock protecting free_pages[] array … … 316 316 317 317 printk("\n*** PPM state in cluster %x %s : pages = %d / offset = %d / free = %d ***\n", 318 local_cxy , string , ppm->pages_nr , ppm->pages_offset , ppm->total_free_pages ); 319 318 local_cxy , string , ppm->pages_nr , ppm->pages_offset , ppm->total_free_pages ); 319 320 320 for( order = 0 ; order < CONFIG_PPM_MAX_ORDER ; order++ ) 321 321 { 322 322 printk("- order = %d / free_pages = %d [", 323 323 order , ppm->free_pages_nr[order] ); 324 324 325 325 LIST_FOREACH( &ppm->free_pages_root[order] , iter ) 326 326 { … … 328 328 printk("%d," , page - ppm->pages_tbl ); 329 329 } 330 330 331 331 printk("]\n", NULL ); 332 332 } … … 336 336 337 337 } // end ppm_print() 338 338 339 339 //////////////////////////u///////// 340 340 void ppm_assert_order( ppm_t * ppm ) … … 343 343 list_entry_t * iter; 344 344 page_t * page; 345 345 346 346 for(order=0; order < CONFIG_PPM_MAX_ORDER; order++) 347 347 { 348 348 if( list_is_empty( &ppm->free_pages_root[order] ) ) continue; 349 349 350 350 LIST_FOREACH( &ppm->free_pages_root[order] , iter ) 351 351 { -
trunk/kernel/mm/ppm.h
r7 r18 1 1 /* 2 2 * ppm.h - Per-cluster Physical Pages Manager Interface 3 * 3 * 4 4 * Authors Ghassan Almaless (2008,2009,2010,2011,2012) 5 5 * Alain Greiner (2016) … … 39 39 * The segments kcode and kdata are mapped in the first "offset" pages. 40 40 * The physical page descriptors array is implemented just after this offset zone. 41 * The main service provided by the PMM is the dynamic allocation of physical pages. 42 * This low-level allocator implements the buddy algorithm: an allocated block is 41 * The main service provided by the PMM is the dynamic allocation of physical pages. 42 * This low-level allocator implements the buddy algorithm: an allocated block is 43 43 * is an integer number n of 4 Kbytes pages, and n (called order) is a power of 2. 44 44 ****************************************************************************************/ … … 62 62 * This function initializes a PPM (Physical Pages Manager) in a cluster. 63 63 * The physical memory base address in all clusters is zero. 64 * The physical memory size is NOT constrained to be smaller than 4 Gbytes. 64 * The physical memory size is NOT constrained to be smaller than 4 Gbytes. 65 65 ***************************************************************************************** 66 66 * @ ppm : pointer on physical pages manager. … … 68 68 * @ pages_offset : number of pages already allocated in this physical memory. 69 69 ****************************************************************************************/ 70 void ppm_init( ppm_t * ppm, 70 void ppm_init( ppm_t * ppm, 71 71 uint32_t pages_nr, 72 72 uint32_t pages_offset ); … … 75 75 * This is the low-level physical pages allocation function. 76 76 * It allocates N contiguous physical pages. N is a power of 2. 77 * In normal use, you don't need to call it directly, as the recomm anded way to get77 * In normal use, you don't need to call it directly, as the recommended way to get 78 78 * physical pages is to call the generic allocator defined in kmem.h. 79 79 ***************************************************************************************** … … 85 85 /***************************************************************************************** 86 86 * This is the low-level physical pages release function. 87 * In normal use, you do not need to call it directly, as the recomm anded way to free87 * In normal use, you do not need to call it directly, as the recommended way to free 88 88 * physical pages is to call the generic allocator defined in kmem.h. 89 89 ***************************************************************************************** -
trunk/kernel/mm/vmm.h
r1 r18 5 5 * Mohamed Lamine Karaoui (2015) 6 6 * Alain Greiner (2016) 7 * 7 * 8 8 * Copyright (c) UPMC Sorbonne Universites 9 9 * … … 50 50 * The slot index can be computed form the slot base address, and reversely. 51 51 * All allocation / release operations are registered in the stack_bitmap, that completely 52 * define the STACK zone state. 52 * define the STACK zone state. 53 53 * In this implementation, the max number of slots is 32. 54 54 ********************************************************************************************/ … … 65 65 * This structure defines the MMAP allocator used by the VMM to dynamically allocate 66 66 * MMAP vsegs requested or released by an user process. 67 * This allocator s ould be only used in the reference cluster.68 * - allocation policy : all allocated vsegs occupy an integer number of pages that is 67 * This allocator should be only used in the reference cluster. 68 * - allocation policy : all allocated vsegs occupy an integer number of pages that is 69 69 * power of 2, and are aligned on a page boundary. The requested number of pages is 70 * rounded if req ired. The first_free_vpn variable defines completely the MMAP zone state.70 * rounded if required. The first_free_vpn variable defines completely the MMAP zone state. 71 71 * It is never decremented, as the released vsegs are simply registered in a zombi_list. 72 * The relevant zombi_list is checked first for each allocation request. 72 * The relevant zombi_list is checked first for each allocation request. 73 73 * - release policy : a released MMAP vseg is registered in an array of zombi_lists. 74 74 * This array is indexed by ln(number of pages), and each entry contains the root of … … 95 95 * and in the associated radix-tree. 96 96 * 2) It allocates virtual memory space for the STACKS and MMAP vsegs, 97 * using dedicated allocators. 97 * using dedicated allocators. 98 98 * 3) It contains the local copy of the generic page table descriptor. 99 99 ********************************************************************************************/ … … 136 136 typedef struct mmap_attr_s 137 137 { 138 void * addr; /*! requested virtual address (unused : should be NULL) */ 138 void * addr; /*! requested virtual address (unused : should be NULL) */ 139 139 uint32_t length; /*! requested vseg size (bytes) */ 140 140 uint32_t prot; /*! access modes */ … … 159 159 /********************************************************************************************* 160 160 * This function removes all vsegs registered in in a virtual memory manager, 161 * and releases the memory allocated to the local generic page table. 161 * and releases the memory allocated to the local generic page table. 162 162 ********************************************************************************************* 163 163 * @ vmm : pointer on process descriptor. … … 166 166 167 167 /********************************************************************************************* 168 * This function scan the list of vsegs registered in the VMM of a given process descriptor168 * This function scans the list of vsegs registered in the VMM of a given process descriptor 169 169 * to check if a given virtual region (defined by a base and size) overlap an existing vseg. 170 170 ********************************************************************************************* … … 179 179 180 180 /********************************************************************************************* 181 * This function allocates memory for a vseg descriptor, initialises it, and register it 181 * This function allocates memory for a vseg descriptor, initialises it, and register it 182 182 * in the VMM of the process. It checks the collision with pre-existing vsegs in VMM. 183 183 * For STACK and MMAP types vseg, it does not use the base argument, but uses the VMM STACK 184 184 * and MMAP specific allocators to get a base address in virtual space. 185 * To comply with the "on-demand" paging policy, this function does NOT mo fify the185 * To comply with the "on-demand" paging policy, this function does NOT modify the 186 186 * page table, and does not allocate physical memory for vseg data. 187 187 ********************************************************************************************* … … 193 193 ********************************************************************************************/ 194 194 vseg_t * vmm_create_vseg( struct process_s * process, 195 intptr_t base, 196 intptr_t size, 195 intptr_t base, 196 intptr_t size, 197 197 uint32_t type ); 198 198 199 199 /********************************************************************************************* 200 * Th s function removes a vseg identified by it's pointer from the VMM of the calling process.200 * This function removes a vseg identified by it's pointer from the VMM of the calling process. 201 201 * - If the vseg has not the STACK or MMAP type, it is removed from the vsegs list, 202 202 * and the physical memory allocated to vseg descriptor is released to KMEM. … … 213 213 214 214 /********************************************************************************************* 215 * This function allocates physical memory from the local cluster to map all PTEs 215 * This function allocates physical memory from the local cluster to map all PTEs 216 216 * of a "kernel" vseg (type KCODE , KDATA, or KDEV) in the page table of process_zero. 217 * It should not be used for other vseg types, because "user" vsegs use the 217 * It should not be used for other vseg types, because "user" vsegs use the 218 218 * "on-demand-paging" policy. 219 219 ********************************************************************************************* … … 226 226 227 227 /********************************************************************************************* 228 * This function unmap all PTEs of a given vseg, in the generic page table asociated228 * This function unmaps all PTEs of a given vseg, in the generic page table associated 229 229 * to a given process descriptor, and releases the corresponding physical memory. 230 230 * It can be used for any type of vseg. … … 237 237 238 238 /********************************************************************************************* 239 * This function remove a given region (defined by a base address and a size) from239 * This function removes a given region (defined by a base address and a size) from 240 240 * the VMM of a given process descriptor. This can modify several vsegs: 241 241 * (a) if the region is not entirely mapped in an existing vseg, it's an error. … … 254 254 255 255 /********************************************************************************************* 256 * This function search if a given virtual address is contained in a vseg registered in256 * This function searches if a given virtual address is contained in a vseg registered in 257 257 * the local process VMM and returns the vseg pointer if success. 258 258 ********************************************************************************************* … … 265 265 266 266 /********************************************************************************************* 267 * This function is called by the architecture specific exception handler when a 268 * page fault has been detected in a given cluster. 267 * This function is called by the architecture specific exception handler when a 268 * page fault has been detected in a given cluster. 269 269 * If the local cluster is not the reference cluster, it send a RPC_VMM_GET_PTE 270 270 * to the reference cluster to get the missing PTE attributes and PPN, and update … … 279 279 error_t vmm_handle_page_fault( struct process_s * process, 280 280 vseg_t * vseg, 281 vpn_t vpn ); 281 vpn_t vpn ); 282 282 283 283 /********************************************************************************************* 284 284 * This function returns in the "attr" and "ppn" arguments the PTE associated to a given 285 * VPN for a given process. This function must be called on the reference cluster. 285 * VPN for a given process. This function must be called on the reference cluster. 286 286 * To get the PTE from another cluster, use the RPC_VMM_GET_PTE. 287 287 * The vseg containing the searched VPN should be registered in the reference VMM. 288 288 * If the PTE in the reference page table is unmapped, this function allocates the missing 289 289 * physical page from the target cluster defined by the vseg type, and update the reference 290 * page table. It can call a RPC_PMEM_GET_PAGES to get the missing physical page, 290 * page table. It can call a RPC_PMEM_GET_PAGES to get the missing physical page, 291 291 * if the target cluster is not the reference cluster. 292 292 ********************************************************************************************* … … 303 303 304 304 /********************************************************************************************* 305 * This function makes the virtual to physical address translation, using the calling 306 * process page table. It uses identity mapping if required by the ident flag. 305 * This function makes the virtual to physical address translation, using the calling 306 * process page table. It uses identity mapping if required by the ident flag. 307 307 * This address translation is required to configure the devices 308 * that have a DMA capability, or to implement the software L2/L3 cache cohérence, 308 * that have a DMA capability, or to implement the software L2/L3 cache cohérence, 309 309 * using the MMC device synchronisation primitives. 310 310 * WARNING : the <ident> value must be defined by the CONFIG_KERNEL_IDENT parameter. … … 333 333 /********************************************************************************************* 334 334 ********************************************************************************************/ 335 int sys_sbrk( uint32_t current_heap_ptr, 335 int sys_sbrk( uint32_t current_heap_ptr, 336 336 uint32_t size ); 337 337 338 338 /********************************************************************************************* 339 339 ********************************************************************************************/ 340 error_t vmm_sbrk( vmm_t * vmm, 340 error_t vmm_sbrk( vmm_t * vmm, 341 341 uint32_t current, 342 342 uint32_t size ); … … 345 345 ********************************************************************************************/ 346 346 error_t vmm_madvise_migrate( vmm_t * vmm, 347 uint32_t start, 347 uint32_t start, 348 348 uint32_t len ); 349 349 … … 361 361 362 362 /********************************************************************************************* 363 * Hypothesis: the region is shared-anon, mapper list is rdlocked, page is locked 363 * Hypothesis: the region is shared-anon, mapper list is rdlocked, page is locked 364 364 ********************************************************************************************/ 365 365 error_t vmm_broadcast_inval( vseg_t * region, … … 368 368 369 369 /********************************************************************************************* 370 * Hypothesis: the region is shared-anon, mapper list is rdlocked, page is locked 370 * Hypothesis: the region is shared-anon, mapper list is rdlocked, page is locked 371 371 ********************************************************************************************/ 372 372 error_t vmm_migrate_shared_page_seq( vseg_t * region, -
trunk/kernel/mm/vseg.c
r1 r18 43 43 44 44 //////////////////////////////////////////////////////////////////////////////////////// 45 // global variables for display / must be consist ant with enum in "vseg.h"45 // global variables for display / must be consistent with enum in "vseg.h" 46 46 //////////////////////////////////////////////////////////////////////////////////////// 47 47 … … 85 85 /////////////////////////////////// 86 86 void vseg_init( vseg_t * vseg, 87 intptr_t base, 87 intptr_t base, 88 88 intptr_t size, 89 89 vpn_t vpn_base, … … 105 105 106 106 // set vseg flags depending on type 107 if ( type == VSEG_TYPE_CODE ) 108 { 109 vseg->flags = VSEG_USER | 110 VSEG_EXEC | 107 if ( type == VSEG_TYPE_CODE ) 108 { 109 vseg->flags = VSEG_USER | 110 VSEG_EXEC | 111 111 VSEG_CACHE | 112 112 VSEG_PRIVATE ; … … 117 117 VSEG_WRITE | 118 118 VSEG_CACHE | 119 VSEG_PRIVATE ; 120 } 121 else if( type == VSEG_TYPE_DATA ) 122 { 123 vseg->flags = VSEG_USER | 124 VSEG_WRITE | 125 VSEG_CACHE | 119 VSEG_PRIVATE ; 120 } 121 else if( type == VSEG_TYPE_DATA ) 122 { 123 vseg->flags = VSEG_USER | 124 VSEG_WRITE | 125 VSEG_CACHE | 126 126 VSEG_DISTRIB ; 127 127 } 128 else if( type == VSEG_TYPE_HEAP ) 128 else if( type == VSEG_TYPE_HEAP ) 129 129 { 130 130 vseg->flags = VSEG_USER | … … 133 133 VSEG_DISTRIB ; 134 134 } 135 else if( type == VSEG_TYPE_REMOTE ) 136 { 137 vseg->flags = VSEG_USER | 138 VSEG_WRITE | 139 VSEG_CACHE ; 140 } 141 else if( type == VSEG_TYPE_ANON ) 135 else if( type == VSEG_TYPE_REMOTE ) 136 { 137 vseg->flags = VSEG_USER | 138 VSEG_WRITE | 139 VSEG_CACHE ; 140 } 141 else if( type == VSEG_TYPE_ANON ) 142 142 { 143 143 vseg->flags = VSEG_USER | … … 146 146 VSEG_DISTRIB ; 147 147 } 148 else if( type == VSEG_TYPE_FILE ) 148 else if( type == VSEG_TYPE_FILE ) 149 149 { 150 150 vseg->flags = VSEG_USER | … … 152 152 VSEG_CACHE ; 153 153 } 154 else if( type == VSEG_TYPE_KCODE ) 154 else if( type == VSEG_TYPE_KCODE ) 155 155 { 156 156 vseg->flags = VSEG_EXEC | … … 158 158 VSEG_PRIVATE ; 159 159 } 160 else if( type == VSEG_TYPE_KDATA ) 160 else if( type == VSEG_TYPE_KDATA ) 161 161 { 162 162 vseg->flags = VSEG_WRITE | … … 164 164 VSEG_PRIVATE ; 165 165 } 166 else 166 else 167 167 { 168 168 printk("\n[PANIC] in %s : illegal vseg type\n", __FUNCTION__); 169 169 hal_core_sleep(); 170 } 170 } 171 171 } // end vseg_init() 172 172 -
trunk/kernel/mm/vseg.h
r16 r18 57 57 58 58 /********************************************************************************************** 59 * These masks define the vseg generic (hardware independ ant) flags.59 * These masks define the vseg generic (hardware independent) flags. 60 60 *********************************************************************************************/ 61 61 … … 79 79 intptr_t max; /*! segment max virtual address (excluded) */ 80 80 vpn_t vpn_base; /*! first page of vseg */ 81 vpn_t vpn_size; /*! numb reof pages occupied */81 vpn_t vpn_size; /*! number of pages occupied */ 82 82 uint32_t flags; /*! vseg attributes */ 83 83 xptr_t mapper; /*! extended pointer on associated mapper */ … … 119 119 *********************************************************************************************/ 120 120 void vseg_init( vseg_t * vseg, 121 intptr_t base, 122 intptr_t size, 121 intptr_t base, 122 intptr_t size, 123 123 vpn_t vpn_base, 124 124 vpn_t vpn_size, … … 139 139 140 140 /********************************************************************************************** 141 * This function add a vseg descriptor in the set of vsegs controled by a given VMM,142 * and update the vmm field in the vseg descriptor.141 * This function adds a vseg descriptor in the set of vsegs controlled by a given VMM, 142 * and updates the vmm field in the vseg descriptor. 143 143 * The lock protecting the vsegs list in VMM must be taken by the caller. 144 144 ********************************************************************************************** … … 147 147 * @ returns 0 if success / returns ENOMEM if failure. 148 148 *********************************************************************************************/ 149 error_t vseg_attach( struct vmm_s * vmm, 149 error_t vseg_attach( struct vmm_s * vmm, 150 150 vseg_t * vseg ); 151 151 152 152 /********************************************************************************************** 153 * This function removes a vseg descriptor from the set of vsegs control ed by a given VMM,154 * and update the vmm field in the vseg descriptor. No memory is released.153 * This function removes a vseg descriptor from the set of vsegs controlled by a given VMM, 154 * and updates the vmm field in the vseg descriptor. No memory is released. 155 155 * The lock protecting the vsegs list in VMM must be taken by the caller. 156 156 ********************************************************************************************** … … 158 158 * @ vseg : pointer on the vseg descriptor 159 159 *********************************************************************************************/ 160 void vseg_detach( struct vmm_s * vmm, 160 void vseg_detach( struct vmm_s * vmm, 161 161 vseg_t * vseg ); 162 162
Note: See TracChangeset
for help on using the changeset viewer.