Changeset 656 for trunk/kernel/mm
- Timestamp:
- Dec 6, 2019, 12:07:51 PM (5 years ago)
- Location:
- trunk/kernel/mm
- Files:
-
- 10 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/mm/kcm.c
r635 r656 509 509 { 510 510 // get one 4 Kbytes page from remote PPM 511 page_t * page= ppm_remote_alloc_pages( kcm_cxy , 0 );512 513 if( page ==NULL )511 xptr_t page_xp = ppm_remote_alloc_pages( kcm_cxy , 0 ); 512 513 if( page_xp == XPTR_NULL ) 514 514 { 515 515 printk("\n[ERROR] in %s : failed to allocate page in cluster %x\n", … … 519 519 } 520 520 521 // get remote page base address522 xptr_t base_xp = ppm_page2base( XPTR( kcm_cxy , page ));521 // get extended pointer on allocated buffer 522 xptr_t base_xp = ppm_page2base( page_xp ); 523 523 524 524 // get local pointer on kcm_page … … 529 529 hal_remote_s64( XPTR( kcm_cxy , &kcm_page->status ) , 0 ); 530 530 hal_remote_spt( XPTR( kcm_cxy , &kcm_page->kcm ) , kcm_ptr ); 531 hal_remote_spt( XPTR( kcm_cxy , &kcm_page->page ) , page);531 hal_remote_spt( XPTR( kcm_cxy , &kcm_page->page ) , GET_PTR( page_xp ) ); 532 532 533 533 // introduce new page in remote KCM active_list -
trunk/kernel/mm/kmem.c
r635 r656 45 45 flags = req->flags; 46 46 47 ////////////////////// //////////// PPM47 ////////////////////// 48 48 if( type == KMEM_PPM ) 49 49 { … … 76 76 return ptr; 77 77 } 78 /////////////////////////// ////////// KCM78 /////////////////////////// 79 79 else if( type == KMEM_KCM ) 80 80 { … … 102 102 return ptr; 103 103 } 104 /////////////////////////// ///////// KHM104 /////////////////////////// 105 105 else if( type == KMEM_KHM ) 106 106 { … … 140 140 uint32_t type = req->type; 141 141 142 ////////////////////// 142 143 if( type == KMEM_PPM ) 143 144 { … … 146 147 ppm_free_pages( page ); 147 148 } 149 /////////////////////////// 148 150 else if( type == KMEM_KCM ) 149 151 { 150 152 kcm_free( req->ptr ); 151 153 } 154 /////////////////////////// 152 155 else if( type == KMEM_KHM ) 153 156 { … … 172 175 flags = req->flags; 173 176 174 ////////////////////// /////////// PPM175 if( type == KMEM_PPM ) 176 { 177 // allocate the number of requested pages 178 page_t * page_ptr= ppm_remote_alloc_pages( cxy , order );179 180 if( page_ ptr ==NULL )177 ////////////////////// 178 if( type == KMEM_PPM ) 179 { 180 // allocate the number of requested pages from remote cluster 181 xptr_t page_xp = ppm_remote_alloc_pages( cxy , order ); 182 183 if( page_xp == XPTR_NULL ) 181 184 { 182 185 printk("\n[ERROR] in %s : failed for PPM / order %d in cluster %x\n", … … 185 188 } 186 189 187 xptr_t page_xp = XPTR( cxy , page_ptr ); 188 189 // get pointer on buffer from the page descriptor 190 // get extended pointer on remote buffer 190 191 xptr_t base_xp = ppm_page2base( page_xp ); 191 192 … … 193 194 if( flags & AF_ZERO ) hal_remote_memset( base_xp , 0 , CONFIG_PPM_PAGE_SIZE ); 194 195 195 void * ptr = GET_PTR( base_xp );196 196 197 197 #if DEBUG_KMEM_REMOTE … … 201 201 printk("\n[%s] thread[%x,%x] from PPM / %d page(s) / ppn %x / cxy %x / cycle %d\n", 202 202 __FUNCTION__, this->process->pid, this->trdid, 203 1<<order, ppm_page2ppn( XPTR(local_cxy,ptr)), cxy, cycle );204 #endif 205 return ptr;206 } 207 /////////////////////////// //////// KCM203 1<<order, ppm_page2ppn( page_xp ), cxy, cycle ); 204 #endif 205 return GET_PTR( base_xp ); 206 } 207 /////////////////////////// 208 208 else if( type == KMEM_KCM ) 209 209 { … … 231 231 return ptr; 232 232 } 233 /////////////////////////// //////// KHM233 /////////////////////////// 234 234 else if( type == KMEM_KHM ) 235 235 { … … 250 250 uint32_t type = req->type; 251 251 252 ////////////////////// 252 253 if( type == KMEM_PPM ) 253 254 { … … 256 257 ppm_remote_free_pages( cxy , page ); 257 258 } 259 /////////////////////////// 258 260 else if( type == KMEM_KCM ) 259 261 { 260 262 kcm_remote_free( cxy , req->ptr ); 261 263 } 264 /////////////////////////// 262 265 else if( type == KMEM_KHM ) 263 266 { -
trunk/kernel/mm/kmem.h
r635 r656 29 29 30 30 /************************************************************************************* 31 * This enum defines the three Kernel Memory Allocaror types :31 * This enum defines the three Kernel Memory Allocaror types 32 32 ************************************************************************************/ 33 33 … … 71 71 * - KHM (Kernel Heap Manager) allocates physical memory buffers of M bytes, 72 72 * M can have any value, and req.order = M. 73 * 74 * WARNING: the physical memory allocated with a given allocator type must be 75 * released using the same allocator type. 73 76 ************************************************************************************* 74 77 * @ cxy : target cluster identifier for a remote access. -
trunk/kernel/mm/mapper.c
r651 r656 27 27 #include <hal_special.h> 28 28 #include <hal_uspace.h> 29 #include <hal_vmm.h> 29 30 #include <grdxt.h> 30 31 #include <string.h> … … 141 142 error_t error; 142 143 144 uint32_t inode_size; 145 uint32_t inode_type; 146 143 147 thread_t * this = CURRENT_THREAD; 144 148 145 149 // get target mapper cluster and local pointer 146 cxy_t mapper_cxy = GET_CXY( mapper_xp ); 147 mapper_t * mapper_ptr = GET_PTR( mapper_xp ); 150 cxy_t mapper_cxy = GET_CXY( mapper_xp ); 151 mapper_t * mapper_ptr = GET_PTR( mapper_xp ); 152 153 // get inode pointer 154 vfs_inode_t * inode = hal_remote_lpt( XPTR( mapper_cxy , &mapper_ptr->inode ) ); 155 156 // get inode size and type if relevant 157 if( inode != NULL ) 158 { 159 inode_size = hal_remote_l32( XPTR( mapper_cxy , &inode->size ) ); 160 inode_type = hal_remote_l32( XPTR( mapper_cxy , &inode->type ) ); 161 } 162 else 163 { 164 inode_size = 0; 165 inode_type = 0; 166 } 148 167 149 168 #if DEBUG_MAPPER_HANDLE_MISS 150 169 uint32_t cycle = (uint32_t)hal_get_cycles(); 151 170 char name[CONFIG_VFS_MAX_NAME_LENGTH]; 152 vfs_inode_t * inode = mapper->inode;153 171 if( (DEBUG_MAPPER_HANDLE_MISS < cycle) && (inode != NULL) ) 154 172 { 155 vfs_inode_get_name( XPTR( local_cxy , inode ) , name );156 printk("\n[%s] thread[%x,%x] enter for page %d in <%s> / c luster %x / cycle %d",173 vfs_inode_get_name( XPTR( mapper_cxy , inode ) , name ); 174 printk("\n[%s] thread[%x,%x] enter for page %d in <%s> / cxy %x / cycle %d\n", 157 175 __FUNCTION__, this->process->pid, this->trdid, page_id, name, mapper_cxy, cycle ); 158 if( DEBUG_MAPPER_HANDLE_MISS & 1 ) grdxt_display( XPTR(local_cxy,&mapper->rt), name );159 176 } 160 177 if( (DEBUG_MAPPER_HANDLE_MISS < cycle) && (inode == NULL) ) 161 178 { 162 printk("\n[%s] thread[%x,%x] enter for page %d in FAT / c luster %x / cycle %d",179 printk("\n[%s] thread[%x,%x] enter for page %d in FAT / cxy %x / cycle %d\n", 163 180 __FUNCTION__, this->process->pid, this->trdid, page_id, mapper_cxy, cycle ); 164 if( DEBUG_MAPPER_HANDLE_MISS & 1 ) grdxt_display( XPTR(local_cxy,&mapper->rt), "FAT" ); 181 } 182 #endif 183 184 #if( DEBUG_MAPPER_HANDLE_MISS & 2 ) 185 if( DEBUG_MAPPER_HANDLE_MISS < cycle ) 186 { 187 if (inode != NULL) grdxt_remote_display( XPTR( mapper_cxy , &mapper_ptr->rt ) , name ); 188 else grdxt_remote_display( XPTR( mapper_cxy , &mapper_ptr->rt ) , "FAT" ); 165 189 } 166 190 #endif 167 191 168 192 // allocate one 4 Kbytes page from the remote mapper cluster 169 page_t * page_ptr = ppm_remote_alloc_pages( mapper_cxy , 0 ); 193 xptr_t page_xp = ppm_remote_alloc_pages( mapper_cxy , 0 ); 194 page_t * page_ptr = GET_PTR( page_xp ); 170 195 171 if( page_ ptr ==NULL )196 if( page_xp == XPTR_NULL ) 172 197 { 173 198 printk("\n[ERROR] in %s : thread [%x,%x] cannot allocate page in cluster %x\n", … … 176 201 } 177 202 178 // build extended pointer on new page descriptor179 xptr_t page_xp = XPTR( mapper_cxy , page_ptr );180 181 203 // initialize the page descriptor 182 204 page_remote_init( page_xp ); 183 205 206 // initialize specific page descriptor fields 184 207 hal_remote_s32( XPTR( mapper_cxy , &page_ptr->refcount ) , 1 ); 185 208 hal_remote_s32( XPTR( mapper_cxy , &page_ptr->index ) , page_id ); … … 200 223 } 201 224 202 // launch I/O operation to load page from IOC device to mapper 203 error = vfs_fs_move_page( page_xp , IOC_SYNC_READ ); 204 205 if( error ) 206 { 207 printk("\n[ERROR] in %s : thread[%x,%x] cannot load page from device\n", 208 __FUNCTION__ , this->process->pid, this->trdid ); 209 mapper_remote_release_page( mapper_xp , page_ptr ); 210 return -1; 225 // launch I/O operation to load page from IOC device when required: 226 // - it is the FAT mapper 227 // - it is a directory mapper 228 // - it is a file mapper, and it exist data on IOC device for this page 229 if( (inode == NULL) || (inode_type == INODE_TYPE_DIR) || (inode_size > (page_id << 10) ) ) 230 { 231 error = vfs_fs_move_page( page_xp , IOC_SYNC_READ ); 232 233 if( error ) 234 { 235 printk("\n[ERROR] in %s : thread[%x,%x] cannot load page from device\n", 236 __FUNCTION__ , this->process->pid, this->trdid ); 237 mapper_remote_release_page( mapper_xp , page_ptr ); 238 return -1; 239 } 211 240 } 212 241 … … 215 244 216 245 #if DEBUG_MAPPER_HANDLE_MISS 217 cycle = (uint32_t)hal_get_cycles();246 ppn_t ppn = ppm_page2ppn( page_xp ); 218 247 if( (DEBUG_MAPPER_HANDLE_MISS < cycle) && (inode != NULL) ) 219 248 { 220 printk("\n[%s] thread[%x,%x] exit for page %d in <%s> / ppn %x / cycle %d", 221 __FUNCTION__, this->process->pid, this->trdid, 222 page_id, name, ppm_page2ppn( page_xp ), cycle ); 223 if( DEBUG_MAPPER_HANDLE_MISS & 1 ) grdxt_display( XPTR(local_cxy,&mapper->rt) , name ); 249 printk("\n[%s] thread[%x,%x] exit for page %d in <%s> / page %x / ppn %x\n", 250 __FUNCTION__, this->process->pid, this->trdid, page_id, name, page_ptr, ppn ); 224 251 } 225 252 if( (DEBUG_MAPPER_HANDLE_MISS < cycle) && (inode == NULL) ) 226 253 { 227 printk("\n[%s] thread[%x,%x] exit for page %d in FAT / ppn %x / cycle %d", 228 __FUNCTION__, this->process->pid, this->trdid, 229 page_id, ppm_page2ppn( page_xp ), cycle ); 230 if( DEBUG_MAPPER_HANDLE_MISS & 1 ) grdxt_display( XPTR(local_cxy,&mapper->rt ), "FAT" ); 254 printk("\n[%s] thread[%x,%x] exit for page %d in FAT / page %x / ppn %x\n", 255 __FUNCTION__, this->process->pid, this->trdid, page_id, page_ptr, ppn ); 256 } 257 #endif 258 259 #if( DEBUG_MAPPER_HANDLE_MISS & 2 ) 260 if( DEBUG_MAPPER_HANDLE_MISS < cycle ) 261 { 262 if (inode != NULL) grdxt_remote_display( XPTR( mapper_cxy , &mapper_ptr->rt ) , name ); 263 else grdxt_remote_display( XPTR( mapper_cxy , &mapper_ptr->rt ) , "FAT" ); 231 264 } 232 265 #endif … … 241 274 { 242 275 error_t error; 243 mapper_t * mapper_ptr;244 cxy_t mapper_cxy;245 xptr_t lock_xp; // extended pointer on mapper lock246 xptr_t page_xp; // extended pointer on searched page descriptor247 xptr_t rt_xp; // extended pointer on radix tree in mapper248 276 249 277 thread_t * this = CURRENT_THREAD; 250 278 251 279 // get mapper cluster and local pointer 252 mapper_ ptr = GET_PTR( mapper_xp );253 mapper_cxy = GET_CXY( mapper_xp );280 mapper_t * mapper_ptr = GET_PTR( mapper_xp ); 281 cxy_t mapper_cxy = GET_CXY( mapper_xp ); 254 282 255 283 #if DEBUG_MAPPER_GET_PAGE … … 270 298 #endif 271 299 300 #if( DEBUG_MAPPER_GET_PAGE & 2 ) 301 if( DEBUG_MAPPER_GET_PAGE < cycle ) 302 ppm_remote_display( local_cxy ); 303 #endif 304 272 305 // check thread can yield 273 306 thread_assert_can_yield( this , __FUNCTION__ ); 274 307 275 308 // build extended pointer on mapper lock and mapper rt 276 lock_xp = XPTR( mapper_cxy , &mapper_ptr->lock );277 rt_xp = XPTR( mapper_cxy , &mapper_ptr->rt );309 xptr_t lock_xp = XPTR( mapper_cxy , &mapper_ptr->lock ); 310 xptr_t rt_xp = XPTR( mapper_cxy , &mapper_ptr->rt ); 278 311 279 312 // take mapper lock in READ_MODE … … 281 314 282 315 // search page in radix tree 283 page_xp = grdxt_remote_lookup( rt_xp , page_id );316 xptr_t page_xp = grdxt_remote_lookup( rt_xp , page_id ); 284 317 285 318 // test mapper miss … … 310 343 311 344 #if (DEBUG_MAPPER_GET_PAGE & 1) 312 if( DEBUG_MAPPER_GET_PAGE < cycle ) 313 printk("\n[%s] thread[%x,%x] load missing page from FS : ppn %x\n", 314 __FUNCTION__, this->process->pid, this->trdid, ppm_page2ppn(page_xp) ); 345 if( (DEBUG_MAPPER_GET_PAGE < cycle) && (inode != NULL) ) 346 { 347 printk("\n[%s] thread[%x,%x] introduced missing page in <%s> mapper / ppn %x\n", 348 __FUNCTION__, this->process->pid, this->trdid, name, ppm_page2ppn(page_xp) ); 349 } 350 if( (DEBUG_MAPPER_GET_PAGE < cycle) && (inode == NULL) ) 351 { 352 printk("\n[%s] thread[%x,%x] introduced missing page in FAT mapper / ppn %x\n", 353 __FUNCTION__, this->process->pid, this->trdid, ppm_page2ppn(page_xp) ); 354 } 315 355 #endif 316 356 … … 328 368 if( (DEBUG_MAPPER_GET_PAGE < cycle) && (inode != NULL) ) 329 369 { 330 printk("\n[%s] thread[%x,%x] exit for page %d of <%s> mapper / ppn %x / cycle %d\n", 331 __FUNCTION__, this->process->pid, this->trdid, page_id, 332 name, ppm_page2ppn(page_xp), cycle ); 370 printk("\n[%s] thread[%x,%x] exit for page %d of <%s> mapper / ppn %x\n", 371 __FUNCTION__, this->process->pid, this->trdid, page_id, name, ppm_page2ppn(page_xp) ); 333 372 } 334 373 if( (DEBUG_MAPPER_GET_PAGE < cycle) && (inode == NULL) ) 335 374 { 336 printk("\n[%s] thread[%x,%x] exit for page %d of FAT mapper / ppn %x / cycle %d\n", 337 __FUNCTION__, this->process->pid, this->trdid, page_id, 338 ppm_page2ppn(page_xp), cycle ); 339 } 375 printk("\n[%s] thread[%x,%x] exit for page %d of FAT mapper / ppn %x\n", 376 __FUNCTION__, this->process->pid, this->trdid, page_id, ppm_page2ppn(page_xp) ); 377 } 378 #endif 379 380 #if( DEBUG_MAPPER_GET_PAGE & 2) 381 if( DEBUG_MAPPER_GET_PAGE < cycle ) 382 ppm_remote_display( local_cxy ); 340 383 #endif 341 384 … … 476 519 __FUNCTION__, this->process->pid, this->trdid, page_bytes, 477 520 local_cxy, buf_ptr, name, GET_CXY(map_xp), GET_PTR(map_xp) ); 478 mapper_display_page( mapper_xp , page_ id, 128 );521 mapper_display_page( mapper_xp , page_xp , 128 ); 479 522 #endif 480 523 … … 600 643 { 601 644 if( to_buffer ) 602 printk("\n[%s] mapper <%s> page %d => buffer (%x,%x) / %d bytes\n",645 printk("\n[%s] mapper <%s> page %d => buffer (%x,%x) / %d bytes\n", 603 646 __FUNCTION__, name, page_id, dst_cxy, dst_ptr, page_bytes ); 604 647 else 605 printk("\n[%s] buffer (%x,%x) => mapper <%s> page %d / %d bytes\n",648 printk("\n[%s] buffer (%x,%x) => mapper <%s> page %d / %d bytes\n", 606 649 __FUNCTION__, src_cxy, src_ptr, name, page_id, page_bytes ); 607 650 } … … 617 660 cycle = (uint32_t)hal_get_cycles(); 618 661 if( DEBUG_MAPPER_MOVE_KERNEL < cycle ) 619 printk("\n[%s] thread[%x,%x] exit / cycle %d\n",620 __FUNCTION__, this->process->pid, this->trdid, cycle );662 printk("\n[%s] thread[%x,%x] exit / mapper <%s> / buffer (%x,%x) / cycle %d\n", 663 __FUNCTION__, this->process->pid, this->trdid, name, buffer_cxy, buffer_ptr, cycle ); 621 664 #endif 622 665 … … 707 750 if( page == NULL ) break; 708 751 709 assert( (page->index == found_key ), " wrong page descriptor index");710 assert( (page->order == 0), "mapper page order must be 0");752 assert( (page->index == found_key ), "page_index (%d) != key (%d)", page->index, found_key ); 753 assert( (page->order == 0), "page_order (%d] != 0", page->order ); 711 754 712 755 // build extended pointer on page descriptor … … 753 796 } // end mapper_sync() 754 797 755 ////////////////////////////////////////////////// 756 error_t mapper_display_page( xptr_t mapper_xp, 757 uint32_t page_id, 758 uint32_t nbytes ) 759 { 760 xptr_t page_xp; // extended pointer on page descriptor 761 xptr_t base_xp; // extended pointer on page base 798 /////////////////////////////////////////////// 799 void mapper_display_page( xptr_t mapper_xp, 800 xptr_t page_xp, 801 uint32_t nbytes ) 802 { 762 803 char buffer[4096]; // local buffer 763 uint32_t * tabi; // pointer on uint32_t to scan buffer764 804 uint32_t line; // line index 765 805 uint32_t word; // word index 766 cxy_t mapper_cxy; // mapper cluster identifier767 mapper_t * mapper_ptr; // mapper local pointer768 vfs_inode_t * inode_ptr; // inode local pointer769 806 770 807 char name[CONFIG_VFS_MAX_NAME_LENGTH]; 771 808 772 if( nbytes > 4096) 773 { 774 printk("\n[ERROR] in %s : nbytes (%d) cannot be larger than 4096\n", 775 __FUNCTION__, nbytes ); 776 return -1; 777 } 778 779 // get extended pointer on page descriptor 780 page_xp = mapper_remote_get_page( mapper_xp , page_id ); 781 782 if( page_xp == XPTR_NULL) 783 { 784 printk("\n[ERROR] in %s : cannot access page %d in mapper\n", 785 __FUNCTION__, page_id ); 786 return -1; 787 } 788 789 // get cluster and local pointer 790 mapper_cxy = GET_CXY( mapper_xp ); 791 mapper_ptr = GET_PTR( mapper_xp ); 809 assert( (nbytes <= 4096) , "nbytes cannot be larger than 4096"); 810 assert( (mapper_xp != XPTR_NULL) , "mapper_xp argument cannot be null"); 811 assert( (page_xp != XPTR_NULL) , "page_xp argument cannot be null"); 812 813 // get mapper cluster and local pointer 814 cxy_t mapper_cxy = GET_CXY( mapper_xp ); 815 mapper_t * mapper_ptr = GET_PTR( mapper_xp ); 816 817 // get page cluster an local pointer 818 cxy_t page_cxy = GET_CXY( page_xp ); 819 page_t * page_ptr = GET_PTR( page_xp ); 820 821 // get page_id and mapper from page descriptor 822 uint32_t page_id = hal_remote_l32( XPTR( page_cxy , &page_ptr->index ) ); 823 mapper_t * mapper = hal_remote_lpt( XPTR( page_cxy , &page_ptr->mapper ) ); 824 825 assert( (mapper_cxy == page_cxy ) , "mapper and page must be in same cluster"); 826 assert( (mapper_ptr == mapper ) , "unconsistent mapper_xp & page_xp arguments"); 792 827 793 828 // get inode 794 inode_ptr = hal_remote_lpt( XPTR( mapper_cxy , &mapper_ptr->inode ) );829 vfs_inode_t * inode_ptr = hal_remote_lpt( XPTR( mapper_cxy , &mapper_ptr->inode ) ); 795 830 796 831 // get inode name 797 if( inode_ptr == NULL ) strcpy( name , " fat" );832 if( inode_ptr == NULL ) strcpy( name , "FAT" ); 798 833 else vfs_inode_get_name( XPTR( mapper_cxy , inode_ptr ) , name ); 799 834 800 835 // get extended pointer on page base 801 base_xp = ppm_page2base( page_xp );836 xptr_t base_xp = ppm_page2base( page_xp ); 802 837 803 838 // copy remote page to local buffer 804 839 hal_remote_memcpy( XPTR( local_cxy , buffer ) , base_xp , nbytes ); 805 840 841 // display header 842 uint32_t * tabi = (uint32_t *)buffer; 843 printk("\n***** mapper <%s> / page_id %d / cxy %x / mapper %x / buffer %x\n", 844 name, page_id, mapper_cxy, mapper_ptr, GET_PTR( base_xp ) ); 845 806 846 // display 8 words per line 807 tabi = (uint32_t *)buffer;808 printk("\n***** mapper <%s> / %d bytes in page %d (%x,%x)\n",809 name, nbytes, page_id, GET_CXY(base_xp), GET_PTR(base_xp) );810 847 for( line = 0 ; line < (nbytes >> 5) ; line++ ) 811 848 { … … 815 852 } 816 853 817 return 0; 818 819 } // end mapper_display_page 820 821 854 } // end mapper_display_page() 855 856 -
trunk/kernel/mm/mapper.h
r635 r656 62 62 * and the allocated memory is only released when the mapper/inode is destroyed. 63 63 * 64 * TODO (1) the mapper being only used to implement the VFS cache(s), the mapper.c 65 * and mapper.h file should be trandfered to the fs directory. 66 * TODO (2) the "type" field in mapper descriptor is redundant and probably unused. 64 * TODO the "type" field in mapper descriptor is redundant and probably unused. 67 65 ******************************************************************************************/ 68 66 … … 161 159 162 160 /******************************************************************************************** 163 * This function move data between a remote mapper, identified by the <mapper_xp> argument, 164 * and a localised remote kernel buffer. It can be called by a thread running any cluster. 161 * This function move <size> bytes from/to a remote mapper, identified by the <mapper_xp> 162 * argument, to/from a remote kernel buffer, identified by the <buffer_xp> argument. 163 * It can be called by a thread running in any cluster. 165 164 * If required, the data transfer is split in "fragments", where one fragment contains 166 * contiguous bytes in the same mapper page. 167 * It uses a "remote_memcpy" to move a fragment to/from the kernel buffer. 168 * In case of write, the dirty bit is set for all pages written in the mapper. 165 * contiguous bytes in the same mapper page. Each fragment uses a "remote_memcpy". 166 * In case of write to mapper, the dirty bit is set for all pages written in the mapper. 169 167 ******************************************************************************************* 170 168 * @ mapper_xp : extended pointer on mapper. … … 248 246 249 247 /******************************************************************************************* 250 * This debug function displays the content of a given page of a given mapper. 251 * - the mapper is identified by the <mapper_xp> argument. 252 * - the page is identified by the <page_id> argument. 253 * - the number of bytes to display in page is defined by the <nbytes> argument. 248 * This debug function displays the content of a given page of a given mapper, identified 249 * by the <mapper_xp> and <page_xp> arguments. 250 * The number of bytes to display in page is defined by the <nbytes> argument. 254 251 * The format is eigth (32 bits) words per line in hexadecimal. 255 252 * It can be called by any thread running in any cluster. 256 * In case of miss in mapper, it load the missing page from device to mapper.257 253 ******************************************************************************************* 258 254 * @ mapper_xp : [in] extended pointer on the mapper. 259 * @ page_ id : [in] page index in file.260 * @ nbytes : [in] value to be written.261 * @ returns 0 if success / return -1 if error. 262 ******************************************************************************************/ 263 error_tmapper_display_page( xptr_t mapper_xp,264 uint32_t page_id,265 255 * @ page_xp : [in] extended pointer on page descriptor. 256 * @ nbytes : [in] number of bytes in page. 257 * @ returns 0 if success / return -1 if error. 258 ******************************************************************************************/ 259 void mapper_display_page( xptr_t mapper_xp, 260 xptr_t page_xp, 261 uint32_t nbytes ); 266 262 267 263 -
trunk/kernel/mm/page.h
r635 r656 49 49 * - The remote_busylock is used to allows any remote thread to atomically 50 50 * test/modify the forks counter or the flags. 51 * - The list entry is used to register the page in a free list or in dirty list. 52 * The refcount is used for page release to KMEM. 51 * - The list field is used to register the page in a free list, or in dirty list, 52 * as a given page cannot be simultaneously dirty and free. 53 * - The refcount is used to release the page to the PPM. 53 54 * NOTE: the size is 48 bytes for a 32 bits core. 54 55 ************************************************************************************/ -
trunk/kernel/mm/ppm.c
r651 r656 151 151 page_t * buddy; // searched buddy page descriptor 152 152 uint32_t buddy_index; // buddy page index in page_tbl[] 153 page_t * current ;// current (merged) page descriptor153 page_t * current_ptr; // current (merged) page descriptor 154 154 uint32_t current_index; // current (merged) page index in page_tbl[] 155 155 uint32_t current_order; // current (merged) page order … … 168 168 169 169 // initialise loop variables 170 current 170 current_ptr = page; 171 171 current_order = page->order; 172 172 current_index = page - ppm->pages_tbl; … … 191 191 buddy->order = 0; 192 192 193 // compute next (merged) page index in page_tbl[]193 // compute next values for loop variables 194 194 current_index &= buddy_index; 195 196 // compute next (merged) page order197 195 current_order++; 198 199 // compute next (merged) page descripror 200 current = pages_tbl + current_index; 196 current_ptr = pages_tbl + current_index; 201 197 } 202 198 203 199 // update order field for merged page descriptor 204 current ->order = current_order;200 current_ptr->order = current_order; 205 201 206 202 // insert merged page in relevant free list 207 list_add_first( &ppm->free_pages_root[current_order] , ¤t ->list );203 list_add_first( &ppm->free_pages_root[current_order] , ¤t_ptr->list ); 208 204 ppm->free_pages_nr[current_order] ++; 209 205 210 206 } // end ppm_free_pages_nolock() 211 212 207 213 208 //////////////////////////////////////////// … … 221 216 thread_t * this = CURRENT_THREAD; 222 217 218 ppm_t * ppm = &LOCAL_CLUSTER->ppm; 219 223 220 #if DEBUG_PPM_ALLOC_PAGES 224 221 uint32_t cycle = (uint32_t)hal_get_cycles(); 225 222 #endif 226 223 227 #if (DEBUG_PPM_ALLOC_PAGES & 1)224 #if DEBUG_PPM_ALLOC_PAGES 228 225 if( DEBUG_PPM_ALLOC_PAGES < cycle ) 229 226 { 230 227 printk("\n[%s] thread[%x,%x] enter for %d page(s) in cluster %x / cycle %d\n", 231 228 __FUNCTION__, this->process->pid, this->trdid, 1<<order, local_cxy, cycle ); 232 ppm_remote_display( local_cxy ); 233 } 234 #endif 235 236 ppm_t * ppm = &LOCAL_CLUSTER->ppm; 229 if( DEBUG_PPM_ALLOC_PAGES & 1 ) ppm_remote_display( local_cxy ); 230 } 231 #endif 237 232 238 233 // check order … … 316 311 dqdt_increment_pages( local_cxy , order ); 317 312 313 hal_fence(); 314 318 315 #if DEBUG_PPM_ALLOC_PAGES 319 316 if( DEBUG_PPM_ALLOC_PAGES < cycle ) … … 322 319 __FUNCTION__, this->process->pid, this->trdid, 323 320 1<<order, local_cxy, ppm_page2ppn(XPTR( local_cxy , found_block )), cycle ); 324 ppm_remote_display( local_cxy );321 if( DEBUG_PPM_ALLOC_PAGES & 1 ) ppm_remote_display( local_cxy ); 325 322 } 326 323 #endif … … 340 337 #endif 341 338 342 #if ( DEBUG_PPM_FREE_PAGES & 1 )339 #if DEBUG_PPM_FREE_PAGES 343 340 if( DEBUG_PPM_FREE_PAGES < cycle ) 344 341 { … … 346 343 __FUNCTION__, this->process->pid, this->trdid, 347 344 1<<page->order, local_cxy, ppm_page2ppn(XPTR(local_cxy , page)), cycle ); 348 ppm_remote_display( local_cxy ); 345 if( DEBUG_PPM_FREE_PAGES & 1 ) ppm_remote_display( local_cxy ); 346 } 349 347 #endif 350 348 … … 362 360 // update DQDT 363 361 dqdt_decrement_pages( local_cxy , page->order ); 362 363 hal_fence(); 364 364 365 365 #if DEBUG_PPM_FREE_PAGES … … 369 369 __FUNCTION__, this->process->pid, this->trdid, 370 370 1<<page->order, local_cxy, ppm_page2ppn(XPTR(local_cxy , page)) , cycle ); 371 ppm_remote_display( local_cxy );371 if( DEBUG_PPM_FREE_PAGES & 1 ) ppm_remote_display( local_cxy ); 372 372 } 373 373 #endif … … 376 376 377 377 378 379 380 378 ///////////////////////////////////////////// 381 void *ppm_remote_alloc_pages( cxy_t cxy,379 xptr_t ppm_remote_alloc_pages( cxy_t cxy, 382 380 uint32_t order ) 383 381 { … … 389 387 thread_t * this = CURRENT_THREAD; 390 388 389 // check order 390 assert( (order < CONFIG_PPM_MAX_ORDER) , "illegal order argument = %d\n" , order ); 391 392 // get local pointer on PPM (same in all clusters) 393 ppm_t * ppm = &LOCAL_CLUSTER->ppm; 394 391 395 #if DEBUG_PPM_REMOTE_ALLOC_PAGES 392 396 uint32_t cycle = (uint32_t)hal_get_cycles(); 393 397 #endif 394 398 395 #if ( DEBUG_PPM_REMOTE_ALLOC_PAGES & 1 )399 #if DEBUG_PPM_REMOTE_ALLOC_PAGES 396 400 if( DEBUG_PPM_REMOTE_ALLOC_PAGES < cycle ) 397 401 { 398 printk("\n[%s] thread[%x,%x] enter for %d smallpage(s) in cluster %x / cycle %d\n",402 printk("\n[%s] thread[%x,%x] enter for %d page(s) in cluster %x / cycle %d\n", 399 403 __FUNCTION__, this->process->pid, this->trdid, 1<<order, cxy, cycle ); 400 ppm_remote_display( cxy ); 401 } 402 #endif 403 404 // check order 405 assert( (order < CONFIG_PPM_MAX_ORDER) , "illegal order argument = %d\n" , order ); 406 407 // get local pointer on PPM (same in all clusters) 408 ppm_t * ppm = &LOCAL_CLUSTER->ppm; 404 if( DEBUG_PPM_REMOTE_ALLOC_PAGES & 1 ) ppm_remote_display( cxy ); 405 } 406 #endif 409 407 410 408 //build extended pointer on lock protecting remote PPM … … 489 487 dqdt_increment_pages( cxy , order ); 490 488 489 hal_fence(); 490 491 491 #if DEBUG_PPM_REMOTE_ALLOC_PAGES 492 492 if( DEBUG_PPM_REMOTE_ALLOC_PAGES < cycle ) … … 495 495 __FUNCTION__, this->process->pid, this->trdid, 496 496 1<<order, cxy, ppm_page2ppn(XPTR( cxy , found_block )), cycle ); 497 ppm_remote_display( cxy );498 } 499 #endif 500 501 return found_block;497 if( DEBUG_PPM_REMOTE_ALLOC_PAGES & 1 ) ppm_remote_display( cxy ); 498 } 499 #endif 500 501 return XPTR( cxy , found_block ); 502 502 503 503 } // end ppm_remote_alloc_pages() … … 515 515 uint32_t current_order; // current (merged) page order 516 516 517 // get local pointer on PPM (same in all clusters) 518 ppm_t * ppm = &LOCAL_CLUSTER->ppm; 519 520 // get page ppn and order 521 uint32_t order = hal_remote_l32( XPTR( page_cxy , &page_ptr->order ) ); 522 517 523 #if DEBUG_PPM_REMOTE_FREE_PAGES 518 524 thread_t * this = CURRENT_THREAD; 519 525 uint32_t cycle = (uint32_t)hal_get_cycles(); 520 #endif 521 522 #if ( DEBUG_PPM_REMOTE_FREE_PAGES & 1 ) 526 ppn_t ppn = ppm_page2ppn( XPTR( page_cxy , page_ptr ) ); 527 #endif 528 529 #if DEBUG_PPM_REMOTE_FREE_PAGES 523 530 if( DEBUG_PPM_REMOTE_FREE_PAGES < cycle ) 524 531 { 525 532 printk("\n[%s] thread[%x,%x] enter for %d page(s) in cluster %x / ppn %x / cycle %d\n", 526 __FUNCTION__, this->process->pid, this->trdid, 527 1<<page_ptr->order, page_cxy, ppm_page2ppn(XPTR( page_cxy , page_ptr )), cycle ); 528 ppm_remote_display( page_cxy ); 533 __FUNCTION__, this->process->pid, this->trdid, 1<<order, page_cxy, ppn, cycle ); 534 if( DEBUG_PPM_REMOTE_FREE_PAGES & 1 ) ppm_remote_display( page_cxy ); 529 535 } 530 536 #endif … … 533 539 page_xp = XPTR( page_cxy , page_ptr ); 534 540 535 // get local pointer on PPM (same in all clusters)536 ppm_t * ppm = &LOCAL_CLUSTER->ppm;537 538 541 // build extended pointer on lock protecting remote PPM 539 542 xptr_t lock_xp = XPTR( page_cxy , &ppm->free_lock ); … … 556 559 // initialise loop variables 557 560 current_ptr = page_ptr; 558 current_order = hal_remote_l32( XPTR( page_cxy , &page_ptr->order ) );561 current_order = order; 559 562 current_index = page_ptr - ppm->pages_tbl; 560 563 … … 582 585 hal_remote_s32( XPTR( page_cxy , &buddy_ptr->order ) , 0 ); 583 586 584 // compute next (merged) page index in page_tbl[]587 // compute next values for loop variables 585 588 current_index &= buddy_index; 586 587 // compute next (merged) page order588 589 current_order++; 589 590 // compute next (merged) page descripror591 590 current_ptr = pages_tbl + current_index; 592 591 … … 594 593 595 594 // update current (merged) page descriptor order field 596 current_ptr = pages_tbl + current_index;597 595 hal_remote_s32( XPTR( page_cxy , ¤t_ptr->order ) , current_order ); 598 596 599 597 // insert current (merged) page into relevant free list 600 list_remote_add_first( page_cxy , &ppm->free_pages_root[current_order], ¤t_ptr->list );598 list_remote_add_first( page_cxy, &ppm->free_pages_root[current_order], ¤t_ptr->list ); 601 599 hal_remote_atomic_add( XPTR( page_cxy , &ppm->free_pages_nr[current_order] ) , 1 ); 602 600 … … 607 605 dqdt_decrement_pages( page_cxy , page_ptr->order ); 608 606 607 hal_fence(); 608 609 609 #if DEBUG_PPM_REMOTE_FREE_PAGES 610 610 if( DEBUG_PPM_REMOTE_FREE_PAGES < cycle ) 611 611 { 612 612 printk("\n[%s] thread[%x,%x] released %d page(s) in cluster %x / ppn %x / cycle %d\n", 613 __FUNCTION__, this->process->pid, this->trdid, 614 1<<page_ptr->order, page_cxy, ppm_page2ppn(XPTR( page_cxy , page_ptr ) ), cycle ); 615 ppm_remote_display( page_cxy ); 613 __FUNCTION__, this->process->pid, this->trdid, 1<<order, page_cxy, ppn, cycle ); 614 if( DEBUG_PPM_REMOTE_FREE_PAGES & 1 ) ppm_remote_display( page_cxy ); 616 615 } 617 616 #endif … … 658 657 uint32_t n = hal_remote_l32( XPTR( cxy , &ppm->free_pages_nr[order] ) ); 659 658 660 // display directfree_list[order]661 nolock_printk("- forward : order = %d / n = %d \t: ", order , n );659 // display forward free_list[order] 660 nolock_printk("- forward : order = %d / n = %d : ", order , n ); 662 661 LIST_REMOTE_FOREACH( cxy , &ppm->free_pages_root[order] , iter ) 662 { 663 page_xp = XPTR( cxy , LIST_ELEMENT( iter , page_t , list ) ); 664 nolock_printk("%x," , ppm_page2ppn( page_xp ) ); 665 } 666 nolock_printk("\n"); 667 668 // display backward free_list[order] 669 nolock_printk("- backward : order = %d / n = %d : ", order , n ); 670 LIST_REMOTE_FOREACH_BACKWARD( cxy , &ppm->free_pages_root[order] , iter ) 663 671 { 664 672 page_xp = XPTR( cxy , LIST_ELEMENT( iter , page_t , list ) ); -
trunk/kernel/mm/ppm.h
r635 r656 84 84 /***************************************************************************************** 85 85 * This local allocator must be called by a thread running in local cluster. 86 * It allocates ncontiguous physical 4 Kbytes pages from the local cluster, where87 * nis a power of 2 defined by the <order> argument.86 * It allocates N contiguous physical 4 Kbytes pages from the local cluster, where 87 * N is a power of 2 defined by the <order> argument. 88 88 * In normal use, it should not be called directly, as the recommended way to allocate 89 89 * physical pages is to call the generic allocator defined in kmem.h. … … 116 116 /***************************************************************************************** 117 117 * This remote allocator can be called by any thread running in any cluster. 118 * It allocates ncontiguous physical 4 Kbytes pages from cluster identified119 * by the <cxy> argument, where nis a power of 2 defined by the <order> argument.118 * It allocates N contiguous physical 4 Kbytes pages from cluster identified 119 * by the <cxy> argument, where N is a power of 2 defined by the <order> argument. 120 120 * In normal use, it should not be called directly, as the recommended way to allocate 121 121 * physical pages is to call the generic allocator defined in kmem.h. … … 123 123 * @ cxy : remote cluster identifier. 124 124 * @ order : ln2( number of 4 Kbytes pages) 125 * @ returns a local pointer on remotepage descriptor if success / XPTR_NULL if error.126 ****************************************************************************************/ 127 void *ppm_remote_alloc_pages( cxy_t cxy,125 * @ returns an extended pointer on page descriptor if success / XPTR_NULL if error. 126 ****************************************************************************************/ 127 xptr_t ppm_remote_alloc_pages( cxy_t cxy, 128 128 uint32_t order ); 129 129 -
trunk/kernel/mm/vmm.c
r651 r656 1745 1745 1746 1746 //////////////////////////////////////////////////////////////////////////////////////////// 1747 // This static function is called by the vmm_remove_vseg() and vmm_resize_vseg() functions. 1748 // Depending on the vseg <type>, it decrements the physical page refcount, and 1749 // conditionnally release to the relevant kmem the physical page identified by <ppn>. 1747 // This static function is called by the vmm_remove_vseg() and vmm_resize_vseg() functions 1748 // to update the physical page descriptor identified by the <ppn> argument. 1749 // It decrements the refcount, set the dirty bit when required, and releases the physical 1750 // page to kmem depending on the vseg type. 1751 // - KERNEL : refcount decremented / not released to kmem / dirty bit not set 1752 // - FILE : refcount decremented / not released to kmem / dirty bit set when required. 1753 // - CODE : refcount decremented / released to kmem / dirty bit not set. 1754 // - STAK : refcount decremented / released to kmem / dirty bit not set. 1755 // - DATA : refcount decremented / released to kmem if ref / dirty bit not set. 1756 // - MMAP : refcount decremented / released to kmem if ref / dirty bit not set. 1750 1757 //////////////////////////////////////////////////////////////////////////////////////////// 1751 1758 // @ process : local pointer on process. 1752 1759 // @ vseg : local pointer on vseg. 1753 1760 // @ ppn : released pysical page index. 1761 // @ dirty : set the dirty bit in page descriptor when non zero. 1754 1762 //////////////////////////////////////////////////////////////////////////////////////////// 1755 1763 static void vmm_ppn_release( process_t * process, 1756 1764 vseg_t * vseg, 1757 ppn_t ppn ) 1765 ppn_t ppn, 1766 uint32_t dirty ) 1758 1767 { 1759 bool_t do_ release;1768 bool_t do_kmem_release; 1760 1769 1761 1770 // get vseg type 1762 1771 vseg_type_t type = vseg->type; 1763 1772 1764 // compute is_ref 1773 // compute is_ref <=> this vseg is the reference vseg 1765 1774 bool_t is_ref = (GET_CXY( process->ref_xp ) == local_cxy); 1766 1775 … … 1774 1783 hal_remote_atomic_add( count_xp , -1 ); 1775 1784 1776 // compute the do_release condition depending on vseg type 1777 if( (type == VSEG_TYPE_FILE) || 1778 (type == VSEG_TYPE_KCODE) || 1785 // compute the do_kmem_release condition depending on vseg type 1786 if( (type == VSEG_TYPE_KCODE) || 1779 1787 (type == VSEG_TYPE_KDATA) || 1780 1788 (type == VSEG_TYPE_KDEV) ) 1781 1789 { 1782 // no physical page release for FILE and KERNEL 1783 do_release = false; 1784 } 1790 // no physical page release for KERNEL 1791 do_kmem_release = false; 1792 } 1793 else if( type == VSEG_TYPE_FILE ) 1794 { 1795 // no physical page release for KERNEL 1796 do_kmem_release = false; 1797 1798 // set dirty bit if required 1799 if( dirty ) ppm_page_do_dirty( page_xp ); 1800 } 1785 1801 else if( (type == VSEG_TYPE_CODE) || 1786 1802 (type == VSEG_TYPE_STACK) ) 1787 1803 { 1788 1804 // always release physical page for private vsegs 1789 do_ release = true;1805 do_kmem_release = true; 1790 1806 } 1791 1807 else if( (type == VSEG_TYPE_ANON) || … … 1793 1809 { 1794 1810 // release physical page if reference cluster 1795 do_ release = is_ref;1811 do_kmem_release = is_ref; 1796 1812 } 1797 1813 else if( is_ref ) // vseg_type == DATA in reference cluster … … 1814 1830 1815 1831 // release physical page if forks == 0 1816 do_ release = (forks == 0);1832 do_kmem_release = (forks == 0); 1817 1833 } 1818 1834 else // vseg_type == DATA not in reference cluster 1819 1835 { 1820 1836 // no physical page release if not in reference cluster 1821 do_ release = false;1837 do_kmem_release = false; 1822 1838 } 1823 1839 1824 1840 // release physical page to relevant kmem when required 1825 if( do_release ) 1826 { 1827 ppm_remote_free_pages( page_cxy , page_ptr ); 1841 if( do_kmem_release ) 1842 { 1843 kmem_req_t req; 1844 req.type = KMEM_PPM; 1845 req.ptr = GET_PTR( ppm_ppn2base( ppn ) ); 1846 1847 kmem_remote_free( page_cxy , &req ); 1828 1848 1829 1849 #if DEBUG_VMM_PPN_RELEASE … … 1892 1912 hal_gpt_reset_pte( gpt_xp , vpn ); 1893 1913 1894 // release physical page when required1895 vmm_ppn_release( process , vseg , ppn );1914 // release physical page depending on vseg type 1915 vmm_ppn_release( process , vseg , ppn , attr & GPT_DIRTY ); 1896 1916 } 1897 1917 } … … 1986 2006 1987 2007 // release physical page when required 1988 vmm_ppn_release( process , vseg , ppn );2008 vmm_ppn_release( process , vseg , ppn , attr & GPT_DIRTY ); 1989 2009 } 1990 2010 } … … 2008 2028 2009 2029 // release physical page when required 2010 vmm_ppn_release( process , vseg , ppn );2030 vmm_ppn_release( process , vseg , ppn , attr & GPT_DIRTY ); 2011 2031 } 2012 2032 } … … 2170 2190 // @ vseg : local pointer on vseg. 2171 2191 // @ vpn : unmapped vpn. 2172 // @ return an extended pointer on the allocated page 2192 // @ return an extended pointer on the allocated page descriptor. 2173 2193 ////////////////////////////////////////////////////////////////////////////////////// 2174 2194 static xptr_t vmm_page_allocate( vseg_t * vseg, … … 2186 2206 xptr_t page_xp; 2187 2207 cxy_t page_cxy; 2188 page_t * page_ptr;2189 2208 uint32_t index; 2190 2209 … … 2197 2216 assert( ( type != VSEG_TYPE_FILE ) , "illegal vseg type\n" ); 2198 2217 2218 // compute target cluster identifier 2199 2219 if( flags & VSEG_DISTRIB ) // distributed => cxy depends on vpn LSB 2200 2220 { … … 2214 2234 2215 2235 // allocate one small physical page from target cluster 2216 page_ptr = ppm_remote_alloc_pages( page_cxy , 0 ); 2217 2218 page_xp = XPTR( page_cxy , page_ptr ); 2236 kmem_req_t req; 2237 req.type = KMEM_PPM; 2238 req.order = 0; 2239 req.flags = AF_ZERO; 2240 2241 // get local pointer on page base 2242 void * ptr = kmem_remote_alloc( page_cxy , &req ); 2243 2244 // get extended pointer on page descriptor 2245 page_xp = ppm_base2page( XPTR( page_cxy , ptr ) ); 2219 2246 2220 2247 #if DEBUG_VMM_PAGE_ALLOCATE … … 2245 2272 uint32_t cycle = (uint32_t)hal_get_cycles(); 2246 2273 thread_t * this = CURRENT_THREAD; 2247 if( (DEBUG_VMM_GET_ONE_PPN < cycle) && (vpn == 0x40b))2248 printk("\n[%s] thread[%x,%x] enter for vpn %x / type%s / page_id %d / cycle %d\n",2274 if( DEBUG_VMM_GET_ONE_PPN < cycle ) 2275 printk("\n[%s] thread[%x,%x] enter for vpn %x / vseg %s / page_id %d / cycle %d\n", 2249 2276 __FUNCTION__, this->process->pid, this->trdid, vpn, vseg_type_str(type), page_id, cycle ); 2277 #endif 2278 2279 #if (DEBUG_VMM_GET_ONE_PPN & 2) 2280 if( DEBUG_VMM_GET_ONE_PPN < cycle ) 2281 hal_vmm_display( XPTR( local_cxy , this->process ) , true ); 2250 2282 #endif 2251 2283 … … 2291 2323 2292 2324 #if (DEBUG_VMM_GET_ONE_PPN & 0x1) 2293 if( (DEBUG_VMM_GET_ONE_PPN < cycle) && (vpn == 0x40b))2325 if( DEBUG_VMM_GET_ONE_PPN < cycle ) 2294 2326 printk("\n[%s] thread[%x,%x] for vpn = %x / elf_offset = %x\n", 2295 2327 __FUNCTION__, this->process->pid, this->trdid, vpn, elf_offset ); … … 2305 2337 2306 2338 #if (DEBUG_VMM_GET_ONE_PPN & 0x1) 2307 if( (DEBUG_VMM_GET_ONE_PPN < cycle) && (vpn == 0x40b))2339 if( DEBUG_VMM_GET_ONE_PPN < cycle ) 2308 2340 printk("\n[%s] thread[%x,%x] for vpn %x / fully in BSS\n", 2309 2341 __FUNCTION__, this->process->pid, this->trdid, vpn ); … … 2322 2354 2323 2355 #if (DEBUG_VMM_GET_ONE_PPN & 0x1) 2324 if( (DEBUG_VMM_GET_ONE_PPN < cycle) && (vpn == 0x40b))2356 if( DEBUG_VMM_GET_ONE_PPN < cycle ) 2325 2357 printk("\n[%s] thread[%x,%x] for vpn %x / fully in mapper\n", 2326 2358 __FUNCTION__, this->process->pid, this->trdid, vpn ); … … 2339 2371 2340 2372 #if (DEBUG_VMM_GET_ONE_PPN & 0x1) 2341 if( (DEBUG_VMM_GET_ONE_PPN < cycle) && (vpn == 0x40b))2373 if( DEBUG_VMM_GET_ONE_PPN < cycle ) 2342 2374 printk("\n[%s] thread[%x,%x] for vpn %x / both mapper & BSS\n" 2343 2375 " %d bytes from mapper / %d bytes from BSS\n", … … 2365 2397 } 2366 2398 } 2367 } // end initialisation for CODE or DATA types 2399 2400 } // end if CODE or DATA types 2368 2401 } 2369 2402 … … 2372 2405 2373 2406 #if DEBUG_VMM_GET_ONE_PPN 2374 cycle = (uint32_t)hal_get_cycles(); 2375 if( (DEBUG_VMM_GET_ONE_PPN < cycle) && (vpn == 0x40b) ) 2407 if( DEBUG_VMM_GET_ONE_PPN < cycle ) 2376 2408 printk("\n[%s] thread[%x,%x] exit for vpn %x / ppn %x / cycle %d\n", 2377 2409 __FUNCTION__ , this->process->pid, this->trdid , vpn , *ppn, cycle ); 2410 #endif 2411 2412 #if (DEBUG_VMM_GET_ONE_PPN & 2) 2413 if( DEBUG_VMM_GET_ONE_PPN < cycle ) 2414 hal_vmm_display( XPTR( local_cxy , this->process ) , true ); 2378 2415 #endif 2379 2416 … … 2404 2441 2405 2442 #if DEBUG_VMM_HANDLE_PAGE_FAULT 2406 if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) & &(vpn > 0) )2443 if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) & (vpn > 0) ) 2407 2444 printk("\n[%s] thread[%x,%x] enter for vpn %x / cycle %d\n", 2408 2445 __FUNCTION__, this->process->pid, this->trdid, vpn, start_cycle ); 2409 2446 #endif 2410 2447 2411 #if (DEBUG_VMM_HANDLE_PAGE_FAULT & 1)2448 #if (DEBUG_VMM_HANDLE_PAGE_FAULT & 2) 2412 2449 if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) ) 2413 hal_vmm_display( this->process, true );2450 hal_vmm_display( XPTR( local_cxy , this->process ) , true ); 2414 2451 #endif 2415 2452 … … 2504 2541 #if (CONFIG_INSTRUMENTATION_PGFAULTS || DEBUG_VMM_HANDLE_PAGE_FAULT) 2505 2542 uint32_t end_cycle = (uint32_t)hal_get_cycles(); 2506 uint32_t cost = end_cycle - start_cycle;2507 2543 #endif 2508 2544 … … 2513 2549 #endif 2514 2550 2551 #if (DEBUG_VMM_HANDLE_PAGE_FAULT & 2) 2552 if( (end_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) ) 2553 hal_vmm_display( XPTR( local_cxy , this->process ) , true ); 2554 #endif 2555 2515 2556 #if CONFIG_INSTRUMENTATION_PGFAULTS 2557 uint32_t cost = end_cycle - start_cycle; 2516 2558 this->info.local_pgfault_nr++; 2517 2559 this->info.local_pgfault_cost += cost; … … 2584 2626 #if (CONFIG_INSTRUMENTATION_PGFAULTS || DEBUG_VMM_HANDLE_PAGE_FAULT) 2585 2627 uint32_t end_cycle = (uint32_t)hal_get_cycles(); 2586 uint32_t cost = end_cycle - start_cycle;2587 2628 #endif 2588 2629 … … 2593 2634 #endif 2594 2635 2636 #if (DEBUG_VMM_HANDLE_PAGE_FAULT & 2) 2637 if( (end_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) ) 2638 hal_vmm_display( XPTR( local_cxy , this->process ) , true ); 2639 #endif 2640 2595 2641 #if CONFIG_INSTRUMENTATION_PGFAULTS 2642 uint32_t cost = end_cycle - start_cycle; 2596 2643 this->info.false_pgfault_nr++; 2597 2644 this->info.false_pgfault_cost += cost; … … 2651 2698 #if (CONFIG_INSTRUMENTATION_PGFAULTS || DEBUG_VMM_HANDLE_PAGE_FAULT) 2652 2699 uint32_t end_cycle = (uint32_t)hal_get_cycles(); 2653 uint32_t cost = end_cycle - start_cycle;2654 2700 #endif 2655 2701 … … 2660 2706 #endif 2661 2707 2708 #if (DEBUG_VMM_HANDLE_PAGE_FAULT & 2) 2709 if( (end_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) ) 2710 hal_vmm_display( XPTR( local_cxy , this->process ) , true ); 2711 #endif 2712 2662 2713 #if CONFIG_INSTRUMENTATION_PGFAULTS 2714 uint32_t cost = end_cycle - start_cycle; 2663 2715 this->info.global_pgfault_nr++; 2664 2716 this->info.global_pgfault_cost += cost; … … 2676 2728 #if (CONFIG_INSTRUMENTATION_PGFAULTS || DEBUG_VMM_HANDLE_PAGE_FAULT) 2677 2729 uint32_t end_cycle = (uint32_t)hal_get_cycles(); 2678 uint32_t cost = end_cycle - start_cycle;2679 2730 #endif 2680 2731 … … 2686 2737 2687 2738 #if CONFIG_INSTRUMENTATION_PGFAULTS 2739 uint32_t cost = end_cycle - start_cycle; 2688 2740 this->info.false_pgfault_nr++; 2689 2741 this->info.false_pgfault_cost += cost; … … 2720 2772 #endif 2721 2773 2722 #if ( (DEBUG_VMM_HANDLE_COW & 3) == 3)2774 #if (DEBUG_VMM_HANDLE_COW & 2) 2723 2775 hal_vmm_display( XPTR( local_cxy , process ) , true ); 2724 2776 #endif … … 2902 2954 #endif 2903 2955 2904 #if ( (DEBUG_VMM_HANDLE_COW & 3) == 3)2956 #if (DEBUG_VMM_HANDLE_COW & 2) 2905 2957 hal_vmm_display( XPTR( local_cxy , process ) , true ); 2906 2958 #endif -
trunk/kernel/mm/vmm.h
r651 r656 312 312 313 313 /********************************************************************************************* 314 * This function removes from the VMM of a process descriptor identified by the <process>315 * argumentthe vseg identified by the <vseg> argument.316 * It is called by the vmm_user_reset(), vmm_global_delete_vseg() andvmm_destroy() functions.314 * This function removes from the VMM of a local process descriptor, identified by 315 * the <process> argument, the vseg identified by the <vseg> argument. 316 * It is called by the vmm_user_reset(), vmm_global_delete_vseg(), vmm_destroy() functions. 317 317 * It must be called by a local thread, running in the cluster containing the modified VMM. 318 318 * Use the RPC_VMM_REMOVE_VSEG if required. … … 324 324 * . for ANON and REMOTE, the vseg is not released, but registered in local zombi_list. 325 325 * . for STACK the vseg is released to the local stack allocator. 326 * . for all other types, the vseg is released to the local kmem.326 * . for all other types, the vseg descriptor is released to the local kmem. 327 327 * Regarding the physical pages release: 328 328 * . for KERNEL and FILE, the pages are not released to kmem. 329 * . for CODE and STACK, the pages are released to local kmem when they are not COW.329 * . for CODE and STACK, the pages are released to local kmem. 330 330 * . for DATA, ANON and REMOTE, the pages are released to relevant kmem only when 331 331 * the local cluster is the reference cluster.
Note: See TracChangeset
for help on using the changeset viewer.