Changeset 567 for trunk/kernel/mm
- Timestamp:
- Oct 5, 2018, 12:01:52 AM (6 years ago)
- Location:
- trunk/kernel/mm
- Files:
-
- 15 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/mm/kcm.c
r551 r567 1 1 /* 2 * kcm.c - Per cluster & per type Kernel Cache Manager access functions2 * kcm.c - Per cluster Kernel Cache Manager implementation. 3 3 * 4 4 * Author Ghassan Almaless (2008,2009,2010,2011,2012) … … 26 26 #include <hal_kernel_types.h> 27 27 #include <hal_special.h> 28 #include <busylock.h> 28 29 #include <list.h> 29 30 #include <printk.h> … … 36 37 #include <kcm.h> 37 38 39 38 40 ////////////////////////////////////////////////////////////////////////////////////// 39 41 // This static function returns pointer on an allocated block from an active page. … … 219 221 220 222 // initialize lock 221 spinlock_init( &kcm->lock);223 busylock_init( &kcm->lock , LOCK_KCM_STATE ); 222 224 223 225 // initialize KCM type … … 248 250 249 251 // get KCM lock 250 spinlock_lock( &kcm->lock );252 busylock_acquire( &kcm->lock ); 251 253 252 254 // release all free pages … … 278 280 279 281 // release KCM lock 280 spinlock_unlock( &kcm->lock );282 busylock_release( &kcm->lock ); 281 283 } 282 284 … … 288 290 289 291 // get lock 290 uint32_t irq_state; 291 spinlock_lock_busy( &kcm->lock, &irq_state ); 292 busylock_acquire( &kcm->lock ); 292 293 293 294 // get an active page … … 299 300 if( kcm_page == NULL ) 300 301 { 301 spinlock_unlock_busy( &kcm->lock, irq_state);302 busylock_release( &kcm->lock ); 302 303 return NULL; 303 304 } … … 319 320 320 321 // release lock 321 spinlock_unlock_busy( &kcm->lock, irq_state);322 busylock_release( &kcm->lock ); 322 323 323 324 return ptr; … … 336 337 337 338 // get lock 338 spinlock_lock( &kcm->lock );339 busylock_acquire( &kcm->lock ); 339 340 340 341 // release block … … 342 343 343 344 // release lock 344 spinlock_unlock( &kcm->lock );345 busylock_release( &kcm->lock ); 345 346 } 346 347 -
trunk/kernel/mm/kcm.h
r457 r567 1 1 /* 2 * kcm.h - Per-cluster Kernel Cache Manager Interface2 * kcm.h - Per-cluster Kernel Cache Manager definition. 3 3 * 4 4 * Authors Ghassan Almaless (2008,2009,2010,2011,2012) … … 28 28 #include <list.h> 29 29 #include <hal_kernel_types.h> 30 #include < spinlock.h>30 #include <busylock.h> 31 31 #include <page.h> 32 32 #include <bits.h> … … 46 46 typedef struct kcm_s 47 47 { 48 spinlock_t lock; /*! protect exclusive access to allocator*/48 busylock_t lock; /*! protect KCM ammocator */ 49 49 uint32_t block_size; /*! rounded block size (bytes) */ 50 50 uint32_t blocks_nr; /*! max number of blocks per page */ -
trunk/kernel/mm/khm.c
r551 r567 1 1 /* 2 * khm.c - kernel heap manager implementation.2 * khm.c - Kernel Heap Manager implementation. 3 3 * 4 4 * Authors Ghassan Almaless (2008,2009,2010,2011,2012) 5 * Alain Greiner (2016 )5 * Alain Greiner (2016,2017,2018) 6 6 * 7 7 * Copyright (c) UPMC Sorbonne Universites … … 26 26 #include <hal_kernel_types.h> 27 27 #include <hal_special.h> 28 #include < spinlock.h>28 #include <busylock.h> 29 29 #include <bits.h> 30 30 #include <printk.h> … … 44 44 45 45 // initialize lock 46 spinlock_init( &khm->lock);46 busylock_init( &khm->lock , LOCK_KHM_STATE ); 47 47 48 48 // compute kernel heap size … … 78 78 79 79 // get lock protecting heap 80 uint32_t irq_state; 81 spinlock_lock_busy( &khm->lock, &irq_state ); 80 busylock_acquire( &khm->lock ); 82 81 83 82 // define a starting block to scan existing blocks … … 93 92 if( (intptr_t)current >= (khm->base + khm->size) ) // heap full 94 93 { 95 spinlock_unlock_busy(&khm->lock, irq_state);94 busylock_release(&khm->lock); 96 95 97 96 printk("\n[ERROR] in %s : failed to allocate block of size %d\n", … … 123 122 124 123 // release lock protecting heap 125 spinlock_unlock_busy( &khm->lock, irq_state);124 busylock_release( &khm->lock ); 126 125 127 126 return (char*)current + sizeof(khm_block_t); … … 141 140 142 141 // get lock protecting heap 143 spinlock_lock(&khm->lock);142 busylock_acquire(&khm->lock); 144 143 145 144 assert( (current->busy == 1) , "page already freed" ); … … 159 158 160 159 // release lock protecting heap 161 spinlock_unlock( &khm->lock );160 busylock_release( &khm->lock ); 162 161 } 163 162 -
trunk/kernel/mm/khm.h
r457 r567 1 1 /* 2 * khm.h - kernel heap manager used for variable size memory allocation.2 * khm.h - Kernel Heap Manager definition. 3 3 * 4 4 * Authors Ghassan Almaless (2008,2009,2010,2011,2012) 5 * Mohamed Lamine Karaoui (2015) 6 * Alain Greiner (2016) 5 * Alain Greiner (2016,2017,2018) 7 6 * 8 7 * Copyright (c) UPMC Sorbonne Universites … … 29 28 #include <kernel_config.h> 30 29 #include <hal_kernel_types.h> 31 #include < spinlock.h>30 #include <busylock.h> 32 31 33 32 /******************************************************************************************* 34 33 * This structure defines a Kernel Heap Manager (KHM) in a given cluster. 35 * It is used to allocate memory objects, that are not 36 * enough replicated to justifya dedicated KCM allocator.34 * It is used to allocate memory objects, that are not enough replicated to justify 35 * a dedicated KCM allocator. 37 36 ******************************************************************************************/ 38 37 39 38 typedef struct khm_s 40 39 { 41 spinlock_t lock; /*! lock protecting exclusive access to heap*/40 busylock_t lock; /*! lock protecting KHM allocator */ 42 41 intptr_t base; /*! heap base address */ 43 42 uint32_t size; /*! heap size (bytes) */ … … 54 53 typedef struct khm_block_s 55 54 { 56 uint32_t busy :1;/*! free block if zero */57 uint32_t size :31; /*! size coded on 31 bits*/55 uint32_t busy; /*! free block if zero */ 56 uint32_t size; /*! block size */ 58 57 } 59 58 khm_block_t; -
trunk/kernel/mm/kmem.c
r551 r567 3 3 * 4 4 * Authors Ghassan Almaless (2008,2009,2010,2011,2012) 5 * Mohamed Lamine Karaoui (2015) 6 * Alain Greiner (2016) 5 * Alain Greiner (2016,2017,2018) 7 6 * 8 7 * Copyright (c) UPMC Sorbonne Universites … … 28 27 #include <hal_special.h> 29 28 #include <printk.h> 30 #include <spinlock.h> 31 #include <readlock.h> 29 #include <busylock.h> 32 30 #include <memcpy.h> 33 31 #include <khm.h> … … 51 49 #include <kmem.h> 52 50 53 /////////////////////////// 51 ///////////////////////////////// 54 52 void kmem_print_kcm_table( void ) 55 53 { … … 168 166 kcm_init( kcm , type ); 169 167 170 // register it i fthe KCM pointers Table168 // register it in the KCM pointers Table 171 169 cluster->kcm_tbl[type] = kcm; 172 170 … … 258 256 if( cluster->kcm_tbl[type] == NULL ) 259 257 { 260 spinlock_lock_busy( &cluster->kcm_lock, &irq_state ); 258 // get lock protecting local kcm_tbl[] array 259 busylock_acquire( &cluster->kcm_lock ); 260 261 // create missing KCM 261 262 error_t error = kmem_create_kcm( type ); 262 spinlock_unlock_busy( &cluster->kcm_lock, irq_state ); 263 if ( error ) return NULL; 263 264 // release lock protecting local kcm_tbl[] array 265 busylock_release( &cluster->kcm_lock ); 266 267 if ( error ) 268 { 269 printk("\n[ERROR] in %s : cannot create KCM type %d in cluster %x\n", 270 __FUNCTION__, type, local_cxy ); 271 return NULL; 272 } 264 273 } 265 274 … … 269 278 { 270 279 printk("\n[ERROR] in %s : failed for type %d / size %d in cluster %x\n", 271 280 __FUNCTION__ , type , size , local_cxy ); 272 281 return NULL; 273 282 } -
trunk/kernel/mm/kmem.h
r486 r567 3 3 * 4 4 * Authors Ghassan Almaless (2008,2009,2010,2011,2012) 5 * Mohamed Lamine Karaoui (2015) 6 * Alain Greiner (2016) 5 * Alain Greiner (2016,2017,2018) 7 6 * 8 7 * Copyright (c) UPMC Sorbonne Universites -
trunk/kernel/mm/mapper.c
r457 r567 36 36 #include <kmem.h> 37 37 #include <kcm.h> 38 #include <ppm.h> 38 39 #include <page.h> 39 40 #include <cluster.h> 40 41 #include <vfs.h> 41 42 #include <mapper.h> 43 42 44 43 45 ////////////////////////////////////////////// … … 83 85 84 86 // initialize mapper lock 85 rwlock_init( &mapper->lock );87 rwlock_init( &mapper->lock , LOCK_MAPPER_STATE ); 86 88 87 89 // initialize waiting threads xlist (empty) … … 153 155 154 156 // take mapper lock in READ_MODE 155 rwlock_rd_ lock( &mapper->lock );157 rwlock_rd_acquire( &mapper->lock ); 156 158 157 159 // search page in radix tree … … 163 165 164 166 // release the lock in READ_MODE and take it in WRITE_MODE 165 rwlock_rd_ unlock( &mapper->lock );166 rwlock_wr_ lock( &mapper->lock );167 rwlock_rd_release( &mapper->lock ); 168 rwlock_wr_acquire( &mapper->lock ); 167 169 168 170 // second test on missing page because the page status can have been modified … … 189 191 printk("\n[ERROR] in %s : thread %x cannot allocate a page in cluster %x\n", 190 192 __FUNCTION__ , this->trdid , local_cxy ); 191 rwlock_wr_ unlock( &mapper->lock );193 rwlock_wr_release( &mapper->lock ); 192 194 return NULL; 193 195 } … … 204 206 205 207 // release mapper lock from WRITE_MODE 206 rwlock_wr_ unlock( &mapper->lock );208 rwlock_wr_release( &mapper->lock ); 207 209 208 210 if( error ) … … 239 241 { 240 242 // release mapper lock from WRITE_MODE 241 rwlock_wr_ unlock( &mapper->lock );243 rwlock_wr_release( &mapper->lock ); 242 244 243 245 // wait load completion 244 while( 1 )246 while( page_is_flag( page , PG_INLOAD ) == false ) 245 247 { 246 // exit waiting loop when loaded 247 if( page_is_flag( page , PG_INLOAD ) == false ) break; 248 249 // deschedule 248 // deschedule without blocking 250 249 sched_yield("waiting page loading"); 251 250 } … … 254 253 else // page available in mapper 255 254 { 256 rwlock_rd_ unlock( &mapper->lock );255 rwlock_rd_release( &mapper->lock ); 257 256 } 258 257 … … 284 283 285 284 // take mapper lock in WRITE_MODE 286 rwlock_wr_ lock( &mapper->lock );285 rwlock_wr_acquire( &mapper->lock ); 287 286 288 287 // remove physical page from radix tree … … 290 289 291 290 // release mapper lock from WRITE_MODE 292 rwlock_wr_ unlock( &mapper->lock );291 rwlock_wr_release( &mapper->lock ); 293 292 294 293 // release page to PPM … … 372 371 else 373 372 { 374 p age_do_dirty( page );373 ppm_page_do_dirty( page ); 375 374 hal_copy_from_uspace( map_ptr , buf_ptr , page_count ); 376 375 } … … 485 484 dst_ptr = base_ptr + page_offset; 486 485 487 p age_do_dirty( page );486 ppm_page_do_dirty( page ); 488 487 } 489 488 -
trunk/kernel/mm/page.c
r486 r567 27 27 #include <hal_atomic.h> 28 28 #include <list.h> 29 #include < xlist.h>29 #include <queuelock.h> 30 30 #include <memcpy.h> 31 #include <thread.h>32 #include <scheduler.h>33 #include <cluster.h>34 #include <ppm.h>35 #include <mapper.h>36 31 #include <printk.h> 37 32 #include <vfs.h> 38 33 #include <process.h> 39 34 #include <page.h> 35 40 36 41 37 //////////////////////////////////////// … … 49 45 page->forks = 0; 50 46 51 spinlock_init( &page->lock ); 47 remote_busylock_init( XPTR( local_cxy , &page->lock ), LOCK_PAGE_STATE ); 48 52 49 list_entry_init( &page->list ); 53 xlist_root_init( XPTR( local_cxy , &page->wait_root ) );54 50 } 55 51 … … 73 69 { 74 70 return ( (page->flags & value) ? 1 : 0 ); 75 }76 77 //////////////////////////////////////78 bool_t page_do_dirty( page_t * page )79 {80 bool_t done = false;81 82 ppm_t * ppm = &LOCAL_CLUSTER->ppm;83 84 // lock the PPM dirty_list85 spinlock_lock( &ppm->dirty_lock );86 87 if( !page_is_flag( page , PG_DIRTY ) )88 {89 // set dirty flag in page descriptor90 page_set_flag( page , PG_DIRTY );91 92 // register page in PPM dirty list93 list_add_first( &ppm->dirty_root , &page->list );94 done = true;95 }96 97 // unlock the PPM dirty_list98 spinlock_unlock( &ppm->dirty_lock );99 100 return done;101 }102 103 ////////////////////////////////////////104 bool_t page_undo_dirty( page_t * page )105 {106 bool_t done = false;107 108 ppm_t * ppm = &LOCAL_CLUSTER->ppm;109 110 // lock the dirty_list111 spinlock_lock( &ppm->dirty_lock );112 113 if( page_is_flag( page , PG_DIRTY) )114 {115 // clear dirty flag in page descriptor116 page_clear_flag( page , PG_DIRTY );117 118 // remove page from PPM dirty list119 list_unlink( &page->list );120 done = true;121 }122 123 // unlock the dirty_list124 spinlock_unlock( &ppm->dirty_lock );125 126 return done;127 }128 129 /////////////////////130 void sync_all_pages( void )131 {132 page_t * page;133 ppm_t * ppm = &LOCAL_CLUSTER->ppm;134 135 // lock the dirty_list136 spinlock_lock( &ppm->dirty_lock );137 138 while( !list_is_empty( &ppm->dirty_root ) )139 {140 page = LIST_FIRST( &ppm->dirty_root , page_t , list );141 142 // unlock the dirty_list143 spinlock_unlock( &ppm->dirty_lock );144 145 // lock the page146 page_lock( page );147 148 // sync the page149 vfs_mapper_move_page( page , false ); // from mapper150 151 // unlock the page152 page_unlock( page );153 154 // lock the dirty_list155 spinlock_lock( &ppm->dirty_lock );156 }157 158 // unlock the dirty_list159 spinlock_unlock( &ppm->dirty_lock );160 161 }162 163 ///////////////////////////////164 void page_lock( page_t * page )165 {166 // take the spinlock protecting the PG_LOCKED flag167 spinlock_lock( &page->lock );168 169 if( page_is_flag( page , PG_LOCKED ) ) // page is already locked170 {171 // get pointer on calling thread172 thread_t * thread = CURRENT_THREAD;173 174 // register thread in the page waiting queue175 xlist_add_last( XPTR( local_cxy , &page->wait_root ),176 XPTR( local_cxy , &thread->wait_list ) );177 178 // release the spinlock179 spinlock_unlock( &page->lock );180 181 // deschedule the calling thread182 thread_block( XPTR( local_cxy , thread ) , THREAD_BLOCKED_PAGE );183 sched_yield("cannot lock a page");184 }185 else // page is not locked186 {187 // set the PG_LOCKED flag188 page_set_flag( page , PG_LOCKED );189 190 // release the spinlock191 spinlock_unlock( &page->lock );192 }193 }194 195 /////////////////////////////////196 void page_unlock( page_t * page )197 {198 // take the spinlock protecting the PG_LOCKED flag199 spinlock_lock( &page->lock );200 201 // check the page waiting list202 bool_t is_empty = xlist_is_empty( XPTR( local_cxy , &page->wait_root ) );203 204 if( is_empty == false ) // at least one waiting thread => resume it205 {206 // get an extended pointer on the first waiting thread207 xptr_t root_xp = XPTR( local_cxy , &page->wait_root );208 xptr_t thread_xp = XLIST_FIRST_ELEMENT( root_xp , thread_t , wait_list );209 210 // reactivate the first waiting thread211 thread_unblock( thread_xp , THREAD_BLOCKED_PAGE );212 }213 else // no waiting thread => clear the PG_LOCKED flag214 {215 page_clear_flag( page , PG_LOCKED );216 }217 218 // release the spinlock219 spinlock_unlock( &page->lock );220 71 } 221 72 -
trunk/kernel/mm/page.h
r486 r567 28 28 #include <kernel_config.h> 29 29 #include <hal_kernel_types.h> 30 #include < spinlock.h>30 #include <remote_busylock.h> 31 31 #include <list.h> 32 #include <slist.h>33 #include <xlist.h>34 32 35 33 /*** Forward declarations ***/ … … 39 37 /************************************************************************************* 40 38 * This defines the flags that can be attached to a physical page. 39 * TODO : the PG_BUFFER and PG_IO_ERR flags semantic is not defined 41 40 ************************************************************************************/ 42 41 … … 45 44 #define PG_FREE 0x0004 // page can be allocated by PPM 46 45 #define PG_INLOAD 0x0008 // on-going load from disk 47 #define PG_IO_ERR 0x0010 // mapper signals a read/write access error48 #define PG_BUFFER 0x0020 // used in blockio.c 46 #define PG_IO_ERR 0x0010 // mapper signals access error TODO ??? [AG] 47 #define PG_BUFFER 0x0020 // used in blockio.c TODO ??? [AG] 49 48 #define PG_DIRTY 0x0040 // page has been written 50 #define PG_LOCKED 0x0080 // page is locked 51 #define PG_COW 0x0100 // page is copy-on-write 49 #define PG_COW 0x0080 // page is copy-on-write 52 50 53 51 #define PG_ALL 0xFFFF // All flags … … 55 53 /************************************************************************************* 56 54 * This structure defines a physical page descriptor. 57 * Size is 64 bytes for a 32 bits core... 58 * The spinlock is used to test/modify the forks counter. 59 * TODO : the list of waiting threads seems to be unused [AG] 60 * TODO : the refcount use has to be clarified 55 * The busylock is used to test/modify the forks counter. 56 * NOTE: Size is 44 bytes for a 32 bits core... 57 * TODO : the refcount use has to be clarified [AG] 61 58 ************************************************************************************/ 62 59 … … 68 65 uint32_t index; /*! page index in mapper (4) */ 69 66 list_entry_t list; /*! for both dirty pages and free pages (8) */ 70 xlist_entry_t wait_root; /*! root of list of waiting threads (16) */ 71 uint32_t refcount; /*! reference counter (4) */ 67 uint32_t refcount; /*! reference counter TODO ??? [AG] (4) */ 72 68 uint32_t forks; /*! number of pending forks (4) */ 73 spinlock_t lock; /*! protect the forks field (4)*/69 remote_busylock_t lock; /*! protect all accesses to page (12) */ 74 70 } 75 71 page_t; … … 111 107 112 108 /************************************************************************************* 113 * This function synchronizes (i.e. update the disk) all dirty pages in a cluster.114 * It scans the PPM dirty list, that should be empty when this operation is completed.115 ************************************************************************************/116 void sync_all_pages( void );117 118 /*************************************************************************************119 * This function sets the PG_DIRTY flag in the page descriptor,120 * and registers the page in the dirty list in PPM.121 *************************************************************************************122 * @ page : pointer on page descriptor.123 * @ returns true if page was not dirty / returns false if page was dirty124 ************************************************************************************/125 bool_t page_do_dirty( page_t * page );126 127 /*************************************************************************************128 * This function resets the PG_DIRTY flag in the page descriptor,129 * and removes the page from the dirty list in PPM.130 *************************************************************************************131 * @ page : pointer on page descriptor.132 * @ returns true if page was dirty / returns false if page was not dirty133 ************************************************************************************/134 bool_t page_undo_dirty( page_t * page );135 136 /*************************************************************************************137 109 * This function resets to 0 all bytes in a given page. 138 110 ************************************************************************************* … … 140 112 ************************************************************************************/ 141 113 void page_zero( page_t * page ); 142 143 /*************************************************************************************144 * This blocking function set the PG_LOCKED flag on the page.145 * It deschedule if the page has already been locked by another thread,146 * and returns only when the flag has been successfully set.147 *************************************************************************************148 * @ page : pointer on page descriptor.149 ************************************************************************************/150 void page_lock( page_t * page );151 152 /*************************************************************************************153 * This blocking function resets the PG_LOCKED flag on the page, if there is no154 * other waiting thread. If there is waiting thread(s), it activates the first155 * waiting thread without modifying the PG_LOCKED flag.156 *************************************************************************************157 * @ page : pointer on page descriptor.158 ************************************************************************************/159 void page_unlock( page_t * page );160 114 161 115 /************************************************************************************* -
trunk/kernel/mm/ppm.c
r551 r567 3 3 * 4 4 * Authors Ghassan Almaless (2008,2009,2010,2011,2012) 5 * Alain Greiner (2016,2017 )5 * Alain Greiner (2016,2017,2018) 6 6 * 7 7 * Copyright (c) UPMC Sorbonne Universites … … 30 30 #include <bits.h> 31 31 #include <page.h> 32 #include <spinlock.h> 32 #include <busylock.h> 33 #include <queuelock.h> 33 34 #include <thread.h> 34 35 #include <cluster.h> 35 36 #include <kmem.h> 36 37 #include <process.h> 37 #include < dqdt.h>38 #include <mapper.h> 38 39 #include <ppm.h> 40 41 //////////////////////////////////////////////////////////////////////////////////////// 42 // functions to translate [ page <-> base <-> ppn ] 43 //////////////////////////////////////////////////////////////////////////////////////// 39 44 40 45 //////////////////////////////////////////////// … … 46 51 } 47 52 48 49 50 53 ///////////////////////////////////////////// 51 54 inline xptr_t ppm_page2base( xptr_t page_xp ) … … 139 142 140 143 144 //////////////////////////////////////////////////////////////////////////////////////// 145 // functions to allocate / release physical pages 146 //////////////////////////////////////////////////////////////////////////////////////// 141 147 142 148 /////////////////////////////////////////// … … 204 210 uint32_t cycle = (uint32_t)hal_get_cycles(); 205 211 if( DEBUG_PPM_ALLOC_PAGES < cycle ) 206 printk("\n[DBG] in %s : thread %x enter for %d page(s) / cycle %d\n",207 __FUNCTION__ , CURRENT_THREAD, 1<<order, cycle );212 printk("\n[DBG] in %s : thread %x in process %x enter for %d page(s) / cycle %d\n", 213 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, 1<<order, cycle ); 208 214 #endif 209 215 … … 221 227 222 228 // take lock protecting free lists 223 uint32_t irq_state; 224 spinlock_lock_busy( &ppm->free_lock, &irq_state ); 229 busylock_acquire( &ppm->free_lock ); 225 230 226 231 // find a free block equal or larger to requested size … … 238 243 { 239 244 // release lock protecting free lists 240 spinlock_unlock_busy( &ppm->free_lock, irq_state);245 busylock_release( &ppm->free_lock ); 241 246 242 247 #if DEBUG_PPM_ALLOC_PAGES 243 248 cycle = (uint32_t)hal_get_cycles(); 244 249 if( DEBUG_PPM_ALLOC_PAGES < cycle ) 245 printk("\n[DBG] in %s : thread %x cannot allocate %d page(s) atcycle %d\n",246 __FUNCTION__ , CURRENT_THREAD, 1<<order, cycle );250 printk("\n[DBG] in %s : thread %x in process %x cannot allocate %d page(s) / cycle %d\n", 251 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, 1<<order, cycle ); 247 252 #endif 248 253 … … 274 279 275 280 // release lock protecting free lists 276 spinlock_unlock_busy( &ppm->free_lock, irq_state);281 busylock_release( &ppm->free_lock ); 277 282 278 283 #if DEBUG_PPM_ALLOC_PAGES 279 284 cycle = (uint32_t)hal_get_cycles(); 280 285 if( DEBUG_PPM_ALLOC_PAGES < cycle ) 281 printk("\n[DBG] in %s : thread %x exit / %d page(s) allocated / ppn = %x / cycle %d\n", 282 __FUNCTION__, CURRENT_THREAD, 1<<order, ppm_page2ppn(XPTR( local_cxy , block )), cycle ); 286 printk("\n[DBG] in %s : thread %x in process %x exit for %d page(s) / ppn = %x / cycle %d\n", 287 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, 288 1<<order, ppm_page2ppn(XPTR( local_cxy , block )), cycle ); 283 289 #endif 284 290 … … 296 302 uint32_t cycle = (uint32_t)hal_get_cycles(); 297 303 if( DEBUG_PPM_FREE_PAGES < cycle ) 298 printk("\n[DBG] in %s : thread %x enter for %d page(s) / cycle %d\n", 299 __FUNCTION__ , CURRENT_THREAD , 1<<page->order , cycle ); 304 printk("\n[DBG] in %s : thread %x in process %x enter for %d page(s) / ppn %x / cycle %d\n", 305 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, 306 1<<page->order, ppm_page2ppn(XPTR(local_cxy , page)), cycle ); 300 307 #endif 301 308 … … 306 313 307 314 // get lock protecting free_pages[] array 308 spinlock_lock( &ppm->free_lock );315 busylock_acquire( &ppm->free_lock ); 309 316 310 317 ppm_free_pages_nolock( page ); 311 318 312 319 // release lock protecting free_pages[] array 313 spinlock_unlock( &ppm->free_lock );320 busylock_release( &ppm->free_lock ); 314 321 315 322 #if DEBUG_PPM_FREE_PAGES 316 323 cycle = (uint32_t)hal_get_cycles(); 317 324 if( DEBUG_PPM_FREE_PAGES < cycle ) 318 printk("\n[DBG] in %s : thread %x exit / %d page(s) released / ppn = %x / cycle %d\n", 319 __FUNCTION__, CURRENT_THREAD, 1<<page->order, ppm_page2ppn(XPTR(local_cxy , page)), cycle ); 320 #endif 321 322 } 323 324 //////////////// 325 printk("\n[DBG] in %s : thread %x in process %x exit for %d page(s) / ppn %x / cycle %d\n", 326 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, 327 1<<page->order, ppm_page2ppn(XPTR(local_cxy , page)), cycle ); 328 #endif 329 330 } // end ppm_free_pages() 331 332 ////////////////////// 325 333 void ppm_print( void ) 326 334 { … … 332 340 333 341 // get lock protecting free lists 334 spinlock_lock( &ppm->free_lock );342 busylock_acquire( &ppm->free_lock ); 335 343 336 344 printk("\n*** PPM in cluster %x : %d pages ***\n", local_cxy , ppm->pages_nr ); … … 351 359 352 360 // release lock protecting free lists 353 spinlock_unlock( &ppm->free_lock );361 busylock_release( &ppm->free_lock ); 354 362 } 355 363 … … 368 376 { 369 377 page = LIST_ELEMENT( iter , page_t , list ); 370 371 378 if( page->order != order ) return -1; 372 379 } … … 376 383 } 377 384 385 386 ////////////////////////////////////////////////////////////////////////////////////// 387 // functions to handle dirty physical pages 388 ////////////////////////////////////////////////////////////////////////////////////// 389 390 ///////////////////////////////////////// 391 bool_t ppm_page_do_dirty( page_t * page ) 392 { 393 bool_t done = false; 394 395 ppm_t * ppm = &LOCAL_CLUSTER->ppm; 396 397 // lock the PPM dirty_list 398 queuelock_acquire( &ppm->dirty_lock ); 399 400 if( !page_is_flag( page , PG_DIRTY ) ) 401 { 402 // set dirty flag in page descriptor 403 page_set_flag( page , PG_DIRTY ); 404 405 // register page in PPM dirty list 406 list_add_first( &ppm->dirty_root , &page->list ); 407 done = true; 408 } 409 410 // unlock the PPM dirty_list 411 queuelock_release( &ppm->dirty_lock ); 412 413 return done; 414 } 415 416 /////////////////////////////////////////// 417 bool_t ppm_page_undo_dirty( page_t * page ) 418 { 419 bool_t done = false; 420 421 ppm_t * ppm = &LOCAL_CLUSTER->ppm; 422 423 // lock the dirty_list 424 queuelock_acquire( &ppm->dirty_lock ); 425 426 if( page_is_flag( page , PG_DIRTY) ) 427 { 428 // clear dirty flag in page descriptor 429 page_clear_flag( page , PG_DIRTY ); 430 431 // remove page from PPM dirty list 432 list_unlink( &page->list ); 433 done = true; 434 } 435 436 // unlock the dirty_list 437 queuelock_release( &ppm->dirty_lock ); 438 439 return done; 440 } 441 442 /////////////////////////////// 443 void ppm_sync_all_pages( void ) 444 { 445 page_t * page; 446 ppm_t * ppm = &LOCAL_CLUSTER->ppm; 447 448 // get the PPM dirty_list lock 449 queuelock_acquire( &ppm->dirty_lock ); 450 451 while( !list_is_empty( &ppm->dirty_root ) ) 452 { 453 page = LIST_FIRST( &ppm->dirty_root , page_t , list ); 454 455 // get the page lock 456 remote_busylock_acquire( XPTR( local_cxy, &page->lock ) ); 457 458 // sync the page 459 vfs_mapper_move_page( page , false ); // from mapper 460 461 // release the page lock 462 remote_busylock_release( XPTR( local_cxy , &page->lock ) ); 463 } 464 465 // release the PPM dirty_list lock 466 queuelock_release( &ppm->dirty_lock ); 467 } 468 -
trunk/kernel/mm/ppm.h
r486 r567 1 1 /* 2 * ppm.h - Per-cluster Physical Pages Manager Interface2 * ppm.h - Per-cluster Physical Pages Manager definition. 3 3 * 4 4 * Authors Ghassan Almaless (2008,2009,2010,2011,2012) 5 * Alain Greiner (2016 )5 * Alain Greiner (2016,2017,2018) 6 6 * 7 7 * Copyright (c) UPMC Sorbonne Universites … … 28 28 #include <hal_kernel_types.h> 29 29 #include <list.h> 30 #include <spinlock.h> 30 #include <busylock.h> 31 #include <queuelock.h> 31 32 #include <boot_info.h> 32 33 #include <page.h> … … 36 37 * This structure defines the Physical Pages Manager in a cluster. 37 38 * In each cluster, the physical memory bank starts at local physical address 0 and 38 * contains an integer number of pages, isdefined by the <pages_nr> field in the39 * contains an integer number of pages, defined by the <pages_nr> field in the 39 40 * boot_info structure. It is split in three parts: 40 41 * - the "kernel_code" section contains the kernel code, loaded by the boot-loader. … … 43 44 * - the "pages_tbl" section contains the physical page descriptors array. It starts 44 45 * at PPN = pages_offset, and it contains one entry per small physical page in cluster. 45 * It is created and initialized by the hal_ppm_create() function. "the46 * It is created and initialized by the hal_ppm_create() function. 46 47 * - The "kernel_heap" section contains all physical pages that are are not in the 47 * in thekernel_code and pages_tbl sections, and that have not been reserved by the48 * kernel_code and pages_tbl sections, and that have not been reserved by the 48 49 * architecture specific bootloader. The reserved pages are defined in the boot_info 49 50 * structure. 50 51 * 51 52 * The main service provided by the PMM is the dynamic allocation of physical pages 52 * from the "kernel_heap" section. 53 * This low-level allocator implements the buddy algorithm: an allocated block is 54 * an integer number n of 4 small pages, and n (called order) is a power of 2. 53 * from the "kernel_heap" section. This low-level allocator implements the buddy 54 * algorithm: an allocated block is an integer number n of small pages, where n 55 * is a power of 2, and ln(n) is called order. 56 * This allocator being shared by the local threads, the free_page lists rooted 57 * in the PPM descriptor are protected by a local busylock, because it is used 58 * by the idle_thread during kernel_init(). 59 * 60 * Another service is to register the dirty pages in a specific dirty_list, that is 61 * also rooted in the PPM, in order to be able to save all dirty pages on disk. 62 * This dirty list is protected by a specific local queuelock. 55 63 ****************************************************************************************/ 56 64 57 65 typedef struct ppm_s 58 66 { 59 spinlock_t free_lock; /*! lock protecting free_pages[] lists */67 busylock_t free_lock; /*! lock protecting free_pages[] lists */ 60 68 list_entry_t free_pages_root[CONFIG_PPM_MAX_ORDER]; /*! roots of free lists */ 61 69 uint32_t free_pages_nr[CONFIG_PPM_MAX_ORDER]; /*! numbers of free pages */ 62 70 page_t * pages_tbl; /*! pointer on page descriptors array */ 63 71 uint32_t pages_nr; /*! total number of small physical page */ 64 spinlock_t dirty_lock; /*! lock protecting the dirty pages list*/72 queuelock_t dirty_lock; /*! lock protecting dirty pages list */ 65 73 list_entry_t dirty_root; /*! root of dirty pages list */ 66 74 void * vaddr_base; /*! pointer on local physical memory base */ … … 68 76 ppm_t; 69 77 78 /************** functions to allocate / release physical pages *************************/ 79 70 80 /***************************************************************************************** 71 81 * This is the low-level physical pages allocation function. … … 107 117 108 118 119 /************** functions to translate [ page <-> base <-> ppn ] ***********************/ 109 120 110 121 /***************************************************************************************** … … 175 186 error_t ppm_assert_order( ppm_t * ppm ); 176 187 188 189 /*********** functions to handle dirty pages *******************************************/ 190 191 /***************************************************************************************** 192 * This function registers a physical page as dirty. 193 * - it takes the queuelock protecting the PPM dirty_list. 194 * - it test the PG_DIRTY flag in the page descriptor. 195 * . if page already dirty => do nothing 196 * . it page not dirty => set the PG_DIRTY flag and register page in PPM dirty list. 197 * - it releases the queuelock protecting the PPM dirty_list. 198 ***************************************************************************************** 199 * @ page : pointer on page descriptor. 200 * @ returns true if page was not dirty / returns false if page was dirty 201 ****************************************************************************************/ 202 bool_t ppm_page_do_dirty( page_t * page ); 203 204 /***************************************************************************************** 205 * This function unregisters a physical page as dirty. 206 * - it takes the queuelock protecting the PPM dirty_list. 207 * - it test the PG_DIRTY flag in the page descriptor. 208 * . if page not dirty => do nothing 209 * . it page dirty => reset the PG_DIRTY flag and remove page from PPM dirty list. 210 * - it releases the queuelock protecting the PPM dirty_list. 211 ***************************************************************************************** 212 * @ page : pointer on page descriptor. 213 * @ returns true if page was dirty / returns false if page was not dirty 214 ****************************************************************************************/ 215 bool_t ppm_page_undo_dirty( page_t * page ); 216 217 /***************************************************************************************** 218 * This function synchronizes (i.e. update the disk) all dirty pages in a cluster. 219 * - it takes the queuelock protecting the PPM dirty_list. 220 * - it scans the PPM dirty list, and for each page: 221 * . it takes the lock protecting the page. 222 * . it removes the page from the PPM dirty_list. 223 * . it reset the PG_DIRTY flag. 224 * . it releases the lock protecting the page. 225 * - it releases the queuelock protecting the PPM dirty_list. 226 $ The PPM dirty_list is empty when the sync operation completes. 227 ****************************************************************************************/ 228 void ppm_sync_all_pages( void ); 229 177 230 #endif /* _PPM_H_ */ -
trunk/kernel/mm/vmm.c
r561 r567 31 31 #include <printk.h> 32 32 #include <memcpy.h> 33 #include <rwlock.h> 33 #include <remote_rwlock.h> 34 #include <remote_queuelock.h> 34 35 #include <list.h> 35 36 #include <xlist.h> … … 51 52 ////////////////////////////////////////////////////////////////////////////////// 52 53 53 extern process_t process_zero; // defined in cluster.c file 54 54 extern process_t process_zero; // allocated in cluster.c 55 55 56 56 /////////////////////////////////////// … … 65 65 66 66 #if DEBUG_VMM_INIT 67 thread_t * this = CURRENT_THREAD; 67 68 uint32_t cycle = (uint32_t)hal_get_cycles(); 68 69 if( DEBUG_VMM_INIT ) 69 printk("\n[DBG] %s : thread %x enter for process %x / cycle %d\n",70 __FUNCTION__ , CURRENT_THREAD, process->pid , cycle );70 printk("\n[DBG] %s : thread %x in process %x enter for process %x / cycle %d\n", 71 __FUNCTION__ , this->trdid, this->process->pid, process->pid , cycle ); 71 72 #endif 72 73 … … 77 78 vmm->vsegs_nr = 0; 78 79 xlist_root_init( XPTR( local_cxy , &vmm->vsegs_root ) ); 79 remote_rwlock_init( XPTR( local_cxy , &vmm->vsegs_lock ) );80 81 82 83 84 assert( (CONFIG_THREAD_MAX_PER_CLUSTER <= 32) ,85 86 87 assert( ((CONFIG_VMM_STACK_SIZE * CONFIG_THREAD_MAX_PER_CLUSTER) <=88 89 80 remote_rwlock_init( XPTR( local_cxy , &vmm->vsegs_lock ),LOCK_VMM_VSL ); 81 82 assert( ((CONFIG_VMM_KENTRY_SIZE + CONFIG_VMM_ARGS_SIZE + CONFIG_VMM_ENVS_SIZE) 83 <= CONFIG_VMM_ELF_BASE) , "UTILS zone too small\n" ); 84 85 assert( (CONFIG_THREADS_MAX_PER_CLUSTER <= 32) , 86 "no more than 32 threads per cluster for a single process\n"); 87 88 assert( ((CONFIG_VMM_STACK_SIZE * CONFIG_THREADS_MAX_PER_CLUSTER) <= 89 (CONFIG_VMM_VSPACE_SIZE - CONFIG_VMM_STACK_BASE)) , 90 "STACK zone too small\n"); 90 91 91 92 // register kentry vseg in VSL … … 171 172 vmm->stack_mgr.bitmap = 0; 172 173 vmm->stack_mgr.vpn_base = CONFIG_VMM_STACK_BASE; 173 spinlock_init( &vmm->stack_mgr.lock);174 busylock_init( &vmm->stack_mgr.lock , LOCK_VMM_STACK ); 174 175 175 176 // initialize MMAP allocator … … 177 178 vmm->mmap_mgr.vpn_size = CONFIG_VMM_STACK_BASE - CONFIG_VMM_HEAP_BASE; 178 179 vmm->mmap_mgr.first_free_vpn = CONFIG_VMM_HEAP_BASE; 179 spinlock_init( &vmm->mmap_mgr.lock);180 busylock_init( &vmm->mmap_mgr.lock , LOCK_VMM_MMAP ); 180 181 181 182 uint32_t i; … … 190 191 cycle = (uint32_t)hal_get_cycles(); 191 192 if( DEBUG_VMM_INIT ) 192 printk("\n[DBG] %s : thread %x exit forprocess %x / entry_point = %x / cycle %d\n",193 __FUNCTION__ , CURRENT_THREAD, process->pid , process->vmm.entry_point , cycle );193 printk("\n[DBG] %s : thread %x in process %x exit / process %x / entry_point = %x / cycle %d\n", 194 __FUNCTION__, this->trdid, this->process->pid, process->pid , process->vmm.entry_point , cycle ); 194 195 #endif 195 196 … … 209 210 210 211 // get lock protecting the vseg list 211 remote_rwlock_rd_ lock( XPTR( local_cxy , &vmm->vsegs_lock ) );212 remote_rwlock_rd_acquire( XPTR( local_cxy , &vmm->vsegs_lock ) ); 212 213 213 214 // scan the list of vsegs … … 243 244 244 245 // release the lock 245 remote_rwlock_rd_ unlock( XPTR( local_cxy , &vmm->vsegs_lock ) );246 remote_rwlock_rd_release( XPTR( local_cxy , &vmm->vsegs_lock ) ); 246 247 247 248 } // vmm_display() 249 250 /////////////////////////////////// 251 void vmm_vseg_attach( vmm_t * vmm, 252 vseg_t * vseg ) 253 { 254 // build extended pointer on rwlock protecting VSL 255 xptr_t lock_xp = XPTR( local_cxy , &vmm->vsegs_lock ); 256 257 // get rwlock in write mode 258 remote_rwlock_wr_acquire( lock_xp ); 259 260 // update vseg descriptor 261 vseg->vmm = vmm; 262 263 // add vseg in vmm list 264 xlist_add_last( XPTR( local_cxy , &vmm->vsegs_root ), 265 XPTR( local_cxy , &vseg->xlist ) ); 266 267 // release rwlock in write mode 268 remote_rwlock_wr_release( lock_xp ); 269 } 270 271 /////////////////////////////////// 272 void vmm_vseg_detach( vmm_t * vmm, 273 vseg_t * vseg ) 274 { 275 // build extended pointer on rwlock protecting VSL 276 xptr_t lock_xp = XPTR( local_cxy , &vmm->vsegs_lock ); 277 278 // get rwlock in write mode 279 remote_rwlock_wr_acquire( lock_xp ); 280 281 // update vseg descriptor 282 vseg->vmm = NULL; 283 284 // remove vseg from vmm list 285 xlist_unlink( XPTR( local_cxy , &vseg->xlist ) ); 286 287 // release rwlock in write mode 288 remote_rwlock_wr_release( lock_xp ); 289 } 248 290 249 291 /////////////////////i////////////////////////// … … 274 316 #endif 275 317 276 277 278 318 // check cluster is reference 319 assert( (GET_CXY( process->ref_xp ) == local_cxy) , 320 "not called in reference cluster\n"); 279 321 280 322 // get extended pointer on root of process copies xlist in owner cluster … … 346 388 #endif 347 389 348 349 350 390 // check cluster is reference 391 assert( (GET_CXY( process->ref_xp ) == local_cxy) , 392 "local cluster is not process reference cluster\n"); 351 393 352 394 // get pointer on reference VMM … … 387 429 vseg = GET_PTR( vseg_xp ); 388 430 389 390 431 assert( (GET_CXY( vseg_xp ) == local_cxy) , 432 "all vsegs in reference VSL must be local\n" ); 391 433 392 434 // get vseg type, base and size … … 444 486 lock_xp = XPTR( page_cxy , &page_ptr->lock ); 445 487 488 // take lock protecting "forks" counter 489 remote_busylock_acquire( lock_xp ); 490 446 491 // increment "forks" 447 remote_spinlock_lock( lock_xp );448 492 hal_remote_atomic_add( forks_xp , 1 ); 449 remote_spinlock_unlock( lock_xp ); 493 494 // release lock protecting "forks" counter 495 remote_busylock_release( lock_xp ); 450 496 } 451 497 } // end loop on vpn … … 511 557 512 558 // initialize the lock protecting the child VSL 513 remote_rwlock_init( XPTR( local_cxy , &child_vmm->vsegs_lock ) );559 remote_rwlock_init( XPTR( local_cxy , &child_vmm->vsegs_lock ), LOCK_VMM_STACK ); 514 560 515 561 // initialize the child VSL as empty … … 529 575 parent_root_xp = XPTR( parent_cxy , &parent_vmm->vsegs_root ); 530 576 531 // take the lock protecting the parent VSL 532 remote_rwlock_rd_ lock( parent_lock_xp );577 // take the lock protecting the parent VSL in read mode 578 remote_rwlock_rd_acquire( parent_lock_xp ); 533 579 534 580 // loop on parent VSL xlist … … 540 586 541 587 // get vseg type 542 type = hal_remote_l w( XPTR( parent_cxy , &parent_vseg->type ) );588 type = hal_remote_l32( XPTR( parent_cxy , &parent_vseg->type ) ); 543 589 544 590 #if DEBUG_VMM_FORK_COPY … … 547 593 printk("\n[DBG] %s : thread %x found parent vseg %s / vpn_base = %x / cycle %d\n", 548 594 __FUNCTION__ , CURRENT_THREAD, vseg_type_str(type), 549 hal_remote_l w( XPTR( parent_cxy , &parent_vseg->vpn_base ) ) , cycle );595 hal_remote_l32( XPTR( parent_cxy , &parent_vseg->vpn_base ) ) , cycle ); 550 596 #endif 551 597 … … 566 612 567 613 // register child vseg in child VSL 568 v seg_attach( child_vmm , child_vseg );614 vmm_vseg_attach( child_vmm , child_vseg ); 569 615 570 616 #if DEBUG_VMM_FORK_COPY … … 573 619 printk("\n[DBG] %s : thread %x copied vseg %s / vpn_base = %x to child VSL / cycle %d\n", 574 620 __FUNCTION__ , CURRENT_THREAD , vseg_type_str(type), 575 hal_remote_l w( XPTR( parent_cxy , &parent_vseg->vpn_base ) ) , cycle );621 hal_remote_l32( XPTR( parent_cxy , &parent_vseg->vpn_base ) ) , cycle ); 576 622 #endif 577 623 … … 613 659 lock_xp = XPTR( page_cxy , &page_ptr->lock ); 614 660 661 // get lock protecting "forks" counter 662 remote_busylock_acquire( lock_xp ); 663 615 664 // increment "forks" 616 remote_spinlock_lock( lock_xp );617 665 hal_remote_atomic_add( forks_xp , 1 ); 618 remote_spinlock_unlock( lock_xp ); 666 667 // release lock protecting "forks" counter 668 remote_busylock_release( lock_xp ); 619 669 620 670 #if DEBUG_VMM_FORK_COPY … … 630 680 } // end loop on vsegs 631 681 632 // release the parent vsegs lock633 remote_rwlock_rd_ unlock( parent_lock_xp );682 // release the parent VSL lock in read mode 683 remote_rwlock_rd_release( parent_lock_xp ); 634 684 635 685 // initialize child GPT (architecture specic) … … 703 753 // get extended pointer on VSL root and VSL lock 704 754 xptr_t root_xp = XPTR( local_cxy , &vmm->vsegs_root ); 705 xptr_t lock_xp = XPTR( local_cxy , &vmm->vsegs_lock );706 707 // get lock protecting vseg list708 remote_rwlock_wr_lock( lock_xp );709 755 710 756 // remove all user vsegs registered in VSL … … 712 758 { 713 759 // get pointer on first vseg in VSL 714 vseg_xp = XLIST_FIRST _ELEMENT( root_xp , vseg_t , xlist );760 vseg_xp = XLIST_FIRST( root_xp , vseg_t , xlist ); 715 761 vseg = GET_PTR( vseg_xp ); 716 762 … … 719 765 720 766 // remove vseg from VSL 721 v seg_detach(vseg );767 vmm_vseg_detach( vmm , vseg ); 722 768 723 769 // release memory allocated to vseg descriptor … … 732 778 } 733 779 734 // release lock protecting VSL735 remote_rwlock_wr_unlock( lock_xp );736 737 780 // remove all vsegs from zombi_lists in MMAP allocator 738 781 uint32_t i; … … 748 791 __FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size ); 749 792 #endif 750 v seg_detach(vseg );793 vmm_vseg_detach( vmm , vseg ); 751 794 vseg_free( vseg ); 752 795 … … 812 855 813 856 // get lock on stack allocator 814 spinlock_lock( &mgr->lock );857 busylock_acquire( &mgr->lock ); 815 858 816 859 // get first free slot index in bitmap … … 818 861 if( (index < 0) || (index > 31) ) 819 862 { 820 spinlock_unlock( &mgr->lock );821 return ENOMEM;863 busylock_release( &mgr->lock ); 864 return 0xFFFFFFFF; 822 865 } 823 866 … … 826 869 827 870 // release lock on stack allocator 828 spinlock_unlock( &mgr->lock );871 busylock_release( &mgr->lock ); 829 872 830 873 // returns vpn_base, vpn_size (one page non allocated) … … 864 907 865 908 // get lock on mmap allocator 866 spinlock_lock( &mgr->lock );909 busylock_acquire( &mgr->lock ); 867 910 868 911 // get vseg from zombi_list or from mmap zone … … 892 935 893 936 // release lock on mmap allocator 894 spinlock_unlock( &mgr->lock );937 busylock_release( &mgr->lock ); 895 938 896 939 // returns vpn_base, vpn_size … … 1002 1045 1003 1046 // attach vseg to VSL 1004 xptr_t lock_xp = XPTR( local_cxy , &vmm->vsegs_lock ); 1005 remote_rwlock_wr_lock( lock_xp ); 1006 vseg_attach( vmm , vseg ); 1007 remote_rwlock_wr_unlock( lock_xp ); 1047 vmm_vseg_attach( vmm , vseg ); 1008 1048 1009 1049 #if DEBUG_VMM_CREATE_VSEG … … 1027 1067 1028 1068 // detach vseg from VSL 1029 xptr_t lock_xp = XPTR( local_cxy , &vmm->vsegs_lock ); 1030 remote_rwlock_wr_lock( lock_xp ); 1031 vseg_detach( vseg ); 1032 remote_rwlock_wr_unlock( lock_xp ); 1069 vmm_vseg_detach( vmm , vseg ); 1033 1070 1034 1071 // release the stack slot to VMM stack allocator if STACK type … … 1042 1079 1043 1080 // update stacks_bitmap 1044 spinlock_lock( &mgr->lock );1081 busylock_acquire( &mgr->lock ); 1045 1082 bitmap_clear( &mgr->bitmap , index ); 1046 spinlock_unlock( &mgr->lock );1083 busylock_release( &mgr->lock ); 1047 1084 } 1048 1085 … … 1057 1094 1058 1095 // update zombi_list 1059 spinlock_lock( &mgr->lock );1096 busylock_acquire( &mgr->lock ); 1060 1097 list_add_first( &mgr->zombi_list[index] , &vseg->zlist ); 1061 spinlock_unlock( &mgr->lock );1098 busylock_release( &mgr->lock ); 1062 1099 } 1063 1100 … … 1112 1149 #endif 1113 1150 1114 1115 1116 1151 // check small page 1152 assert( (attr & GPT_SMALL) , 1153 "an user vseg must use small pages" ); 1117 1154 1118 1155 // unmap GPT entry in all GPT copies … … 1121 1158 // handle pending forks counter if 1122 1159 // 1) not identity mapped 1123 // 2) r unning in reference cluster1160 // 2) reference cluster 1124 1161 if( ((vseg->flags & VSEG_IDENT) == 0) && 1125 1162 (GET_CXY( process->ref_xp ) == local_cxy) ) … … 1134 1171 lock_xp = XPTR( page_cxy , &page_ptr->lock ); 1135 1172 1136 // get lock protecting page descriptor1137 remote_spinlock_lock( lock_xp );1138 1139 1173 // get pending forks counter 1140 forks = hal_remote_l w( forks_xp );1174 forks = hal_remote_l32( forks_xp ); 1141 1175 1142 1176 if( forks ) // decrement pending forks counter … … 1157 1191 } 1158 1192 } 1159 1160 // release lock protecting page descriptor1161 remote_spinlock_unlock( lock_xp );1162 1193 } 1163 1194 } … … 1194 1225 1195 1226 // get lock protecting the VSL 1196 remote_rwlock_rd_ lock( lock_xp );1227 remote_rwlock_rd_acquire( lock_xp ); 1197 1228 1198 1229 // scan the list of vsegs in VSL … … 1204 1235 { 1205 1236 // return success 1206 remote_rwlock_rd_ unlock( lock_xp );1237 remote_rwlock_rd_release( lock_xp ); 1207 1238 return vseg; 1208 1239 } … … 1210 1241 1211 1242 // return failure 1212 remote_rwlock_rd_ unlock( lock_xp );1243 remote_rwlock_rd_release( lock_xp ); 1213 1244 return NULL; 1214 1245 … … 1240 1271 1241 1272 // get lock protecting VSL 1242 remote_rwlock_wr_ lock( lock_xp );1273 remote_rwlock_wr_acquire( lock_xp ); 1243 1274 1244 1275 if( (vseg->min > addr_min) || (vseg->max < addr_max) ) // region not included in vseg … … 1301 1332 1302 1333 // release VMM lock 1303 remote_rwlock_wr_ unlock( lock_xp );1334 remote_rwlock_wr_release( lock_xp ); 1304 1335 1305 1336 return error; … … 1348 1379 1349 1380 // register local vseg in local VMM 1350 v seg_attach( &process->vmm , vseg );1381 vmm_vseg_attach( &process->vmm , vseg ); 1351 1382 } 1352 1383 … … 1381 1412 uint32_t flags = vseg->flags; 1382 1413 1383 assert( ( type != VSEG_TYPE_FILE ) , "illegal vseg type\n" ); 1414 // check vseg type 1415 assert( ( type != VSEG_TYPE_FILE ) , "illegal vseg type\n" ); 1384 1416 1385 1417 if( flags & VSEG_DISTRIB ) // distributed => cxy depends on vpn LSB … … 1400 1432 } 1401 1433 page_cxy = ( x << y_width ) + y; 1434 1435 // if ( LOCAL_CLUSTER->valid[x][y] == false ) page_cxy = cluster_random_select(); 1436 1402 1437 } 1403 1438 else // other cases => cxy specified in vseg … … 1457 1492 xptr_t mapper_xp = vseg->mapper_xp; 1458 1493 1459 1460 1494 assert( (mapper_xp != XPTR_NULL), 1495 "mapper not defined for a FILE vseg\n" ); 1461 1496 1462 1497 // get mapper cluster and local pointer … … 1495 1530 xptr_t mapper_xp = vseg->mapper_xp; 1496 1531 1497 1498 1532 assert( (mapper_xp != XPTR_NULL), 1533 "mapper not defined for a CODE or DATA vseg\n" ); 1499 1534 1500 1535 // get mapper cluster and local pointer … … 1513 1548 __FUNCTION__, this, vpn, elf_offset ); 1514 1549 #endif 1515 1516 1517 1550 // compute extended pointer on page base 1518 1551 xptr_t base_xp = ppm_page2base( page_xp ); … … 1529 1562 __FUNCTION__, this, vpn ); 1530 1563 #endif 1531 1532 1533 1564 if( GET_CXY( page_xp ) == local_cxy ) 1534 1565 { … … 1646 1677 error_t error; 1647 1678 1648 thread_t * this = CURRENT_THREAD;1649 1679 1650 1680 #if DEBUG_VMM_GET_PTE 1681 thread_t * this = CURRENT_THREAD; 1651 1682 uint32_t cycle = (uint32_t)hal_get_cycles(); 1652 1683 if( DEBUG_VMM_GET_PTE < cycle ) … … 1663 1694 &vseg ); 1664 1695 1665 1666 1696 // vseg has been checked by the vmm_handle_page_fault() function 1697 assert( (vseg != NULL) , "vseg undefined / vpn %x\n"); 1667 1698 1668 1699 if( cow ) //////////////// copy_on_write request ////////////////////// … … 1675 1706 hal_gpt_get_pte( &vmm->gpt , vpn , &old_attr , &old_ppn ); 1676 1707 1677 1678 1708 assert( (old_attr & GPT_MAPPED), 1709 "PTE unmapped for a COW exception / vpn %x\n" ); 1679 1710 1680 1711 #if( DEBUG_VMM_GET_PTE & 1 ) … … 1693 1724 xptr_t lock_xp = XPTR( page_cxy , &page_ptr->lock ); 1694 1725 1695 // take lock protecting page descriptor1696 remote_ spinlock_lock( lock_xp );1726 // take lock protecting "forks" counter 1727 remote_busylock_acquire( lock_xp ); 1697 1728 1698 1729 // get number of pending forks in page descriptor 1699 uint32_t forks = hal_remote_l w( forks_xp );1730 uint32_t forks = hal_remote_l32( forks_xp ); 1700 1731 1701 1732 if( forks ) // pending fork => allocate a new page, copy old to new … … 1728 1759 } 1729 1760 1730 // release lock protecting page descriptor1731 remote_ spinlock_unlock( lock_xp );1761 // release lock protecting "forks" counter 1762 remote_busylock_release( lock_xp ); 1732 1763 1733 1764 // build new_attr : reset COW and set WRITABLE, … … 1840 1871 type = vseg->type; 1841 1872 1842 // get re ferenceprocess cluster and local pointer1873 // get relevant process cluster and local pointer 1843 1874 // for private vsegs (CODE and DATA type), 1844 // the re ference is the local process descriptor.1875 // the relevant process descriptor is local. 1845 1876 if( (type == VSEG_TYPE_STACK) || (type == VSEG_TYPE_CODE) ) 1846 1877 { -
trunk/kernel/mm/vmm.h
r469 r567 30 30 #include <bits.h> 31 31 #include <list.h> 32 #include < spinlock.h>32 #include <queuelock.h> 33 33 #include <hal_gpt.h> 34 34 #include <vseg.h> … … 54 54 typedef struct stack_mgr_s 55 55 { 56 spinlock_t lock; /*! lock protecting STACK allocator */56 busylock_t lock; /*! lock protecting STACK allocator */ 57 57 vpn_t vpn_base; /*! first page of STACK zone */ 58 58 bitmap_t bitmap; /*! bit bector of allocated stacks */ … … 79 79 typedef struct mmap_mgr_s 80 80 { 81 spinlock_t lock; /*! lock protecting MMAP allocator */81 busylock_t lock; /*! lock protecting MMAP allocator */ 82 82 vpn_t vpn_base; /*! first page of MMAP zone */ 83 83 vpn_t vpn_size; /*! number of pages in MMAP zone */ … … 90 90 * This structure defines the Virtual Memory Manager for a given process in a given cluster. 91 91 * This local VMM provides four main services: 92 * 1) It registers all vsegs in the local copy of the vseg list (VSL).93 * 2) It contains the local copy of the generic page table (GPT) .92 * 1) It contains the local copy of vseg list (VSL), only complete in referrence. 93 * 2) It contains the local copy of the generic page table (GPT), only complete in reference. 94 94 * 3) The stack manager dynamically allocates virtual memory space for the STACK vsegs. 95 95 * 4) The mmap manager dynamically allocates virtual memory for the (FILE/ANON/REMOTE) vsegs. … … 105 105 typedef struct vmm_s 106 106 { 107 remote_rwlock_t vsegs_lock; /*! lock protecting the vsegs list*/107 remote_rwlock_t vsegs_lock; /*! lock protecting the local VSL */ 108 108 xlist_entry_t vsegs_root; /*! VSL root (VSL only complete in reference) */ 109 109 uint32_t vsegs_nr; /*! total number of local vsegs */ … … 153 153 void vmm_display( struct process_s * process, 154 154 bool_t mapping ); 155 156 /******************************************************************************************* 157 * This function adds a vseg descriptor in the VSL of a given VMM, 158 * and updates the vmm field in the vseg descriptor. 159 * It takes the lock protecting VSL. 160 ******************************************************************************************* 161 * @ vmm : pointer on the VMM 162 * @ vseg : pointer on the vseg descriptor 163 ******************************************************************************************/ 164 void vmm_vseg_attach( struct vmm_s * vmm, 165 vseg_t * vseg ); 166 167 /******************************************************************************************* 168 * This function removes a vseg descriptor from the set of vsegs controlled by a given VMM, 169 * and updates the vmm field in the vseg descriptor. No memory is released. 170 * It takes the lock protecting VSL. 171 ******************************************************************************************* 172 * @ vmm : pointer on the VMM 173 * @ vseg : pointer on the vseg descriptor 174 ******************************************************************************************/ 175 void vmm_vseg_detach( struct vmm_s * vmm, 176 vseg_t * vseg ); 155 177 156 178 /********************************************************************************************* -
trunk/kernel/mm/vseg.c
r503 r567 35 35 #include <ppm.h> 36 36 #include <mapper.h> 37 #include <spinlock.h>38 37 #include <vfs.h> 39 38 #include <page.h> … … 159 158 160 159 // initialize vseg with remote_read access 161 vseg->type = hal_remote_l w( XPTR( cxy , &ptr->type ) );160 vseg->type = hal_remote_l32 ( XPTR( cxy , &ptr->type ) ); 162 161 vseg->min = (intptr_t)hal_remote_lpt( XPTR( cxy , &ptr->min ) ); 163 162 vseg->max = (intptr_t)hal_remote_lpt( XPTR( cxy , &ptr->max ) ); 164 vseg->vpn_base = hal_remote_l w( XPTR( cxy , &ptr->vpn_base ) );165 vseg->vpn_size = hal_remote_l w( XPTR( cxy , &ptr->vpn_size ) );166 vseg->flags = hal_remote_l w( XPTR( cxy , &ptr->flags ) );167 vseg->file_offset = hal_remote_l w( XPTR( cxy , &ptr->file_offset ) );168 vseg->file_size = hal_remote_l w( XPTR( cxy , &ptr->file_size ) );169 vseg->mapper_xp = (xptr_t) hal_remote_l wd( XPTR( cxy , &ptr->mapper_xp ) );163 vseg->vpn_base = hal_remote_l32 ( XPTR( cxy , &ptr->vpn_base ) ); 164 vseg->vpn_size = hal_remote_l32 ( XPTR( cxy , &ptr->vpn_size ) ); 165 vseg->flags = hal_remote_l32 ( XPTR( cxy , &ptr->flags ) ); 166 vseg->file_offset = hal_remote_l32 ( XPTR( cxy , &ptr->file_offset ) ); 167 vseg->file_size = hal_remote_l32 ( XPTR( cxy , &ptr->file_size ) ); 168 vseg->mapper_xp = (xptr_t) hal_remote_l64( XPTR( cxy , &ptr->mapper_xp ) ); 170 169 171 170 switch (vseg->type) … … 186 185 case VSEG_TYPE_REMOTE: 187 186 { 188 vseg->cxy = (cxy_t) hal_remote_l w( XPTR(cxy, &ptr->cxy) );187 vseg->cxy = (cxy_t) hal_remote_l32( XPTR(cxy, &ptr->cxy) ); 189 188 break; 190 189 } … … 197 196 } 198 197 199 ///////////////////////////////200 void vseg_attach( vmm_t * vmm,201 vseg_t * vseg )202 {203 // update vseg descriptor204 vseg->vmm = vmm;205 198 206 // add vseg in vmm list207 xlist_add_last( XPTR( local_cxy , &vmm->vsegs_root ),208 XPTR( local_cxy , &vseg->xlist ) );209 }210 211 /////////////////////////////////212 void vseg_detach( vseg_t * vseg )213 {214 // update vseg descriptor215 vseg->vmm = NULL;216 217 // remove vseg from vmm list218 xlist_unlink( XPTR( local_cxy , &vseg->xlist ) );219 }220 -
trunk/kernel/mm/vseg.h
r503 r567 28 28 29 29 #include <hal_kernel_types.h> 30 #include <spinlock.h>31 30 #include <vfs.h> 32 31 … … 145 144 xptr_t ref_xp ); 146 145 147 /*******************************************************************************************148 * This function adds a vseg descriptor in the set of vsegs controlled by a given VMM,149 * and updates the vmm field in the vseg descriptor.150 * The lock protecting the vsegs list in VMM must be taken by the caller.151 *******************************************************************************************152 * @ vmm : pointer on the VMM153 * @ vseg : pointer on the vseg descriptor154 ******************************************************************************************/155 void vseg_attach( struct vmm_s * vmm,156 vseg_t * vseg );157 158 /*******************************************************************************************159 * This function removes a vseg descriptor from the set of vsegs controlled by a given VMM,160 * and updates the vmm field in the vseg descriptor. No memory is released.161 * The lock protecting the vsegs list in VMM must be taken by the caller.162 *******************************************************************************************163 * @ vseg : pointer on the vseg descriptor164 ******************************************************************************************/165 void vseg_detach( vseg_t * vseg );166 167 146 168 147 #endif /* _VSEG_H_ */
Note: See TracChangeset
for help on using the changeset viewer.