Changeset 406 for trunk/kernel/mm
- Timestamp:
- Aug 29, 2017, 12:03:37 PM (7 years ago)
- Location:
- trunk/kernel/mm
- Files:
-
- 8 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/mm/kcm.c
r352 r406 47 47 kcm_page_t * kcm_page ) 48 48 { 49 kcm_dmsg("\n[ INFO] %s : enters for %s / page %x / count = %d / active = %d\n",49 kcm_dmsg("\n[DMSG] %s : enters for %s / page %x / count = %d / active = %d\n", 50 50 __FUNCTION__ , kmem_type_str( kcm->type ) , 51 51 (intptr_t)kcm_page , kcm_page->count , kcm_page->active ); … … 80 80 + (index * kcm->block_size) ); 81 81 82 kcm_dmsg("\n[ INFO] %s : allocated one block %s / ptr = %p / page = %x / count = %d\n",82 kcm_dmsg("\n[DMSG] %s : allocated one block %s / ptr = %p / page = %x / count = %d\n", 83 83 __FUNCTION__ , kmem_type_str( kcm->type ) , ptr , 84 84 (intptr_t)kcm_page , kcm_page->count ); … … 231 231 kcm->blocks_nr = blocks_nr; 232 232 233 kcm_dmsg("\n[ INFO] %s : KCM %s initialised / block_size = %d / blocks_nr = %d\n",233 kcm_dmsg("\n[DMSG] %s : KCM %s initialised / block_size = %d / blocks_nr = %d\n", 234 234 __FUNCTION__ , kmem_type_str( type ) , kcm->block_size , kcm->blocks_nr ); 235 235 } … … 301 301 kcm_page->active = 1; 302 302 303 kcm_dmsg("\n[ INFO] %s : enters for type %s at cycle %d / new page = %x / count = %d\n",303 kcm_dmsg("\n[DMSG] %s : enters for type %s at cycle %d / new page = %x / count = %d\n", 304 304 __FUNCTION__ , kmem_type_str( kcm->type ) , hal_get_cycles() , 305 305 (intptr_t)kcm_page , kcm_page->count ); … … 311 311 kcm_page = (kcm_page_t *)LIST_FIRST( &kcm->active_root , kcm_page_t , list ); 312 312 313 kcm_dmsg("\n[ INFO] %s : enters for type %s at cycle %d / page = %x / count = %d\n",313 kcm_dmsg("\n[DMSG] %s : enters for type %s at cycle %d / page = %x / count = %d\n", 314 314 __FUNCTION__ , kmem_type_str( kcm->type ) , hal_get_cycles() , 315 315 (intptr_t)kcm_page , kcm_page->count ); -
trunk/kernel/mm/kmem.c
r394 r406 145 145 assert( ((type > 1) && (type < KMEM_TYPES_NR) ) , __FUNCTION__ , "illegal KCM type" ); 146 146 147 kmem_dmsg("\n[ INFO] %s : enters / KCM type %s missing in cluster %x\n",147 kmem_dmsg("\n[DMSG] %s : enters / KCM type %s missing in cluster %x\n", 148 148 __FUNCTION__ , kmem_type_str( type ) , local_cxy ); 149 149 … … 169 169 hal_fence(); 170 170 171 kmem_dmsg("\n[ INFO] %s : exit / KCM type %s created in cluster %x\n",171 kmem_dmsg("\n[DMSG] %s : exit / KCM type %s created in cluster %x\n", 172 172 __FUNCTION__ , kmem_type_str( type ) , local_cxy ); 173 173 … … 192 192 assert( (type < KMEM_TYPES_NR) , __FUNCTION__ , "illegal KMEM request type" ); 193 193 194 kmem_dmsg("\n[ INFO] %s : enters in cluster %x for type %s\n",194 kmem_dmsg("\n[DMSG] %s : enters in cluster %x for type %s\n", 195 195 __FUNCTION__ , local_cxy , kmem_type_str( type ) ); 196 196 … … 210 210 if( flags & AF_ZERO ) page_zero( (page_t *)ptr ); 211 211 212 kmem_dmsg("\n[ INFO] %s : exit in cluster %x for type %s / page = %x / base = %x\n",212 kmem_dmsg("\n[DMSG] %s : exit in cluster %x for type %s / page = %x / base = %x\n", 213 213 __FUNCTION__, local_cxy , kmem_type_str( type ) , 214 214 (intptr_t)ptr , (intptr_t)ppm_page2base( ptr ) ); … … 228 228 if( flags & AF_ZERO ) memset( ptr , 0 , size ); 229 229 230 kmem_dmsg("\n[ INFO] %s : exit in cluster %x for type %s / base = %x / size = %d\n",230 kmem_dmsg("\n[DMSG] %s : exit in cluster %x for type %s / base = %x / size = %d\n", 231 231 __FUNCTION__, local_cxy , kmem_type_str( type ) , 232 232 (intptr_t)ptr , req->size ); … … 255 255 if( flags & AF_ZERO ) memset( ptr , 0 , kmem_type_size( type ) ); 256 256 257 kmem_dmsg("\n[ INFO] %s : exit in cluster %x for type %s / base = %x / size = %d\n",257 kmem_dmsg("\n[DMSG] %s : exit in cluster %x for type %s / base = %x / size = %d\n", 258 258 __FUNCTION__, local_cxy , kmem_type_str( type ) , 259 259 (intptr_t)ptr , kmem_type_size( type ) ); -
trunk/kernel/mm/mapper.c
r367 r406 143 143 error_t error; 144 144 145 mapper_dmsg("\n[ INFO] %s : enters for page %d inmapper %x\n",145 mapper_dmsg("\n[DMSG] %s : enters for page %d / mapper %x\n", 146 146 __FUNCTION__ , index , mapper ); 147 147 … … 170 170 if ( page == NULL ) // missing page => create it and load it from file system 171 171 { 172 mapper_dmsg("\n[ INFO] %s : missing page => load from device\n", __FUNCTION__ );172 mapper_dmsg("\n[DMSG] %s : missing page => load from device\n", __FUNCTION__ ); 173 173 174 174 // allocate one page from PPM … … 212 212 } 213 213 214 // update the mapper and index fields in page descriptor215 // required by the vfs_move_page_to_mapper()216 page->mapper = mapper;217 page->index = index;218 219 214 // launch I/O operation to load page from file system 220 215 error = vfs_mapper_move_page( page, … … 259 254 } 260 255 261 mapper_dmsg("\n[ INFO] %s : exit for page %d inmapper %x / page_desc = %x\n",256 mapper_dmsg("\n[DMSG] %s : exit for page %d / mapper %x / page_desc = %x\n", 262 257 __FUNCTION__ , index , mapper , page ); 263 258 … … 315 310 uint8_t * buf_ptr; // current buffer address 316 311 317 mapper_dmsg("\n[ INFO] %s : enters / to_buf = %d / buffer = %x\n",312 mapper_dmsg("\n[DMSG] %s : enters / to_buf = %d / buffer = %x\n", 318 313 __FUNCTION__ , to_buffer , buffer ); 319 314 … … 341 336 else page_count = CONFIG_PPM_PAGE_SIZE; 342 337 343 mapper_dmsg("\n[ INFO] %s : index = %d / offset = %d / count = %d\n",338 mapper_dmsg("\n[DMSG] %s : index = %d / offset = %d / count = %d\n", 344 339 __FUNCTION__ , index , page_offset , page_count ); 345 340 … … 356 351 buf_ptr = (uint8_t *)buffer + done; 357 352 358 mapper_dmsg("\n[ INFO] %s : index = %d / buf_ptr = %x / map_ptr = %x\n",353 mapper_dmsg("\n[DMSG] %s : index = %d / buf_ptr = %x / map_ptr = %x\n", 359 354 __FUNCTION__ , index , buf_ptr , map_ptr ); 360 355 … … 373 368 } 374 369 375 mapper_dmsg("\n[ INFO] %s : exit for buffer %x\n",370 mapper_dmsg("\n[DMSG] %s : exit for buffer %x\n", 376 371 __FUNCTION__, buffer ); 377 372 … … 398 393 cxy_t dst_cxy; // destination cluster 399 394 400 mapper_dmsg("\n[INFO] %s : enters / to_buf = %d / buffer = %l / size = %x / cycle %d\n", 401 __FUNCTION__ , to_buffer , buffer_xp , size , hal_time_stamp() ); 395 // get buffer cluster and local pointer 396 cxy_t buffer_cxy = GET_CXY( buffer_xp ); 397 uint8_t * buffer_ptr = (uint8_t *)GET_PTR( buffer_xp ); 398 399 mapper_dmsg("\n[DMSG] %s : to_buf = %d / buf_cxy = %x / buf_ptr = %x / size = %x\n", 400 __FUNCTION__ , to_buffer , buffer_cxy , buffer_ptr , size ); 402 401 403 402 // compute offsets of first and last bytes in file … … 409 408 uint32_t last = max_byte >> CONFIG_PPM_PAGE_SHIFT; 410 409 411 // get buffer cluster and local pointer 412 cxy_t buffer_cxy = GET_CXY( buffer_xp ); 413 uint8_t * buffer_ptr = (uint8_t *)GET_PTR( buffer_xp ); 410 mapper_dmsg("\n[DMSG] %s : first_page = %d / last_page = %d\n", 411 __FUNCTION__ , first , last ); 414 412 415 413 // compute source and destination clusters … … 440 438 else page_count = CONFIG_PPM_PAGE_SIZE; 441 439 442 mapper_dmsg("\n[ INFO] %s : page_index = %d / offset = %d / count= %d\n",440 mapper_dmsg("\n[DMSG] %s : page_index = %d / offset = %d / bytes = %d\n", 443 441 __FUNCTION__ , index , page_offset , page_count ); 444 442 … … 466 464 } 467 465 468 mapper_dmsg("\n[INFO] %s : index = %d\n", __FUNCTION__ , index );469 470 466 // move fragment 471 467 hal_remote_memcpy( XPTR( dst_cxy , dst_ptr ), XPTR( src_cxy , src_ptr ), page_count ); … … 474 470 } 475 471 476 mapper_dmsg("\n[ INFO] %s : exit for buffer %l / size = %x / cycle %d\n",477 __FUNCTION__ , buffer_xp , size , hal_time_stamp());472 mapper_dmsg("\n[DMSG] %s : exit / buf_cxy = %x / buf_ptr = %x / size = %x\n", 473 __FUNCTION__ , buffer_cxy , buffer_ptr , size ); 478 474 479 475 return 0; 480 476 481 } // end mapper_move_kernel _buffer()482 477 } // end mapper_move_kernel() 478 -
trunk/kernel/mm/ppm.c
r315 r406 56 56 page_t * page_ptr = (page_t *)GET_PTR( page_xp ); 57 57 58 59 60 58 void * base_ptr = ppm->vaddr_base + 59 ((page_ptr - ppm->pages_tbl)<<CONFIG_PPM_PAGE_SHIFT); 60 61 61 return XPTR( page_cxy , base_ptr ); 62 62 … … 203 203 assert( (order < CONFIG_PPM_MAX_ORDER) , __FUNCTION__ , "illegal order argument" ); 204 204 205 page_t * block = NULL; 206 207 ppm_dmsg("\n[ INFO] %s : enters / order = %d\n",205 page_t * block = NULL; 206 207 ppm_dmsg("\n[DMSG] %s : enters / order = %d\n", 208 208 __FUNCTION__ , order ); 209 209 … … 256 256 spinlock_unlock( &ppm->free_lock ); 257 257 258 ppm_dmsg("\n[ INFO] %s : base = %x / order = %d\n",258 ppm_dmsg("\n[DMSG] %s : base = %x / order = %d\n", 259 259 __FUNCTION__ , (uint32_t)ppm_page2base( block ) , order ); 260 260 -
trunk/kernel/mm/vmm.c
r401 r406 62 62 intptr_t size; 63 63 64 vmm_dmsg("\n[ INFO] %s : enter for process %x\n", __FUNCTION__ , process->pid );64 vmm_dmsg("\n[DMSG] %s : enter for process %x\n", __FUNCTION__ , process->pid ); 65 65 66 66 // get pointer on VMM … … 83 83 vmm->vsegs_nr = 0; 84 84 list_root_init( &vmm->vsegs_root ); 85 error = grdxt_init( &vmm->grdxt,86 CONFIG_VMM_GRDXT_W1,87 CONFIG_VMM_GRDXT_W2,88 CONFIG_VMM_GRDXT_W3 );89 90 assert( (error == 0) , __FUNCTION__ , "cannot initialize radix tree\n" );91 85 92 86 // register kentry vseg in VMM 93 base = 1<< CONFIG_PPM_PAGE_SHIFT;87 base = CONFIG_VMM_KENTRY_BASE << CONFIG_PPM_PAGE_SHIFT; 94 88 size = CONFIG_VMM_KENTRY_SIZE << CONFIG_PPM_PAGE_SHIFT; 89 95 90 vseg_kentry = vmm_create_vseg( process , base , size , VSEG_TYPE_CODE ); 96 91 97 92 assert( (vseg_kentry != NULL) , __FUNCTION__ , "cannot register kentry vseg\n" ); 98 93 99 vmm->kent_vpn_base = 1; 100 101 // register the args vseg in VMM 102 base = (CONFIG_VMM_KENTRY_SIZE + 1 )<<CONFIG_PPM_PAGE_SHIFT; 94 vmm->kent_vpn_base = base; 95 96 // register args vseg in VMM 97 base = (CONFIG_VMM_KENTRY_BASE + 98 CONFIG_VMM_KENTRY_SIZE ) << CONFIG_PPM_PAGE_SHIFT; 103 99 size = CONFIG_VMM_ARGS_SIZE << CONFIG_PPM_PAGE_SHIFT; 100 104 101 vseg_args = vmm_create_vseg( process , base , size , VSEG_TYPE_DATA ); 105 102 106 103 assert( (vseg_args != NULL) , __FUNCTION__ , "cannot register args vseg\n" ); 107 104 108 vmm->args_vpn_base = CONFIG_VMM_KENTRY_SIZE + 1;105 vmm->args_vpn_base = base; 109 106 110 107 // register the envs vseg in VMM 111 base = (CONFIG_VMM_KENTRY_SIZE + CONFIG_VMM_ARGS_SIZE + 1 )<<CONFIG_PPM_PAGE_SHIFT; 108 base = (CONFIG_VMM_KENTRY_BASE + 109 CONFIG_VMM_KENTRY_SIZE + 110 CONFIG_VMM_ARGS_SIZE ) << CONFIG_PPM_PAGE_SHIFT; 112 111 size = CONFIG_VMM_ENVS_SIZE << CONFIG_PPM_PAGE_SHIFT; 112 113 113 vseg_envs = vmm_create_vseg( process , base , size , VSEG_TYPE_DATA ); 114 114 115 115 assert( (vseg_envs != NULL) , __FUNCTION__ , "cannot register envs vseg\n" ); 116 116 117 vmm->envs_vpn_base = CONFIG_VMM_KENTRY_SIZE + CONFIG_VMM_ARGS_SIZE + 1;117 vmm->envs_vpn_base = base; 118 118 119 119 // register the heap vseg in VMM 120 120 base = CONFIG_VMM_HEAP_BASE << CONFIG_PPM_PAGE_SHIFT; 121 121 size = (CONFIG_VMM_MMAP_BASE-CONFIG_VMM_HEAP_BASE) << CONFIG_PPM_PAGE_SHIFT; 122 122 123 vseg_heap = vmm_create_vseg( process , base , size , VSEG_TYPE_HEAP ); 123 124 124 125 assert( (vseg_heap != NULL) , __FUNCTION__ , "cannot register heap vseg\n" ); 125 126 126 vmm->heap_vpn_base = CONFIG_VMM_HEAP_BASE;127 vmm->heap_vpn_base = base; 127 128 128 129 // initialize generic page table … … 149 150 hal_fence(); 150 151 151 vmm_dmsg("\n[INFO] %s : exit for process %x\n", __FUNCTION__ , process->pid ); 152 vmm_dmsg("\n[DMSG] %s : exit for process %x / entry_point = %x\n", 153 __FUNCTION__ , process->pid , process->vmm.entry_point ); 152 154 153 155 } // end vmm_init() … … 171 173 dst_vmm->vsegs_nr = 0; 172 174 list_root_init( &dst_vmm->vsegs_root ); 173 error = grdxt_init( &dst_vmm->grdxt,174 CONFIG_VMM_GRDXT_W1,175 CONFIG_VMM_GRDXT_W2,176 CONFIG_VMM_GRDXT_W3 );177 if( error )178 {179 printk("\n[ERROR] in %s : cannot initialize radix tree for process %x\n",180 __FUNCTION__ , dst_process->pid );181 return ENOMEM;182 }183 175 184 176 // loop on src_vmm list of vsegs to create … … 292 284 vseg_free( vseg ); 293 285 } 294 295 // delete vsegs radix_tree296 grdxt_destroy( &vmm->grdxt );297 286 298 287 // release lock … … 456 445 vmm_t * vmm = &process->vmm; 457 446 458 vmm_dmsg("\n[ INFO] %s : enter for process %x / base = %x / size = %x / type = %s\n",447 vmm_dmsg("\n[DMSG] %s : enter for process %x / base = %x / size = %x / type = %s\n", 459 448 __FUNCTION__ , process->pid , base , size , vseg_type_str(type) ); 460 449 … … 527 516 528 517 // update "heap_vseg" in VMM 529 process->vmm.heap_vseg = vseg;518 if( type == VSEG_TYPE_HEAP ) process->vmm.heap_vseg = vseg; 530 519 531 520 // attach vseg to vmm … … 534 523 rwlock_wr_unlock( &vmm->vsegs_lock ); 535 524 536 vmm_dmsg("\n[ INFO] %s : exit for process %x / vseg [%x, %x] has been mapped\n",525 vmm_dmsg("\n[DMSG] %s : exit for process %x / vseg [%x, %x] registered\n", 537 526 __FUNCTION__ , process->pid , vseg->min , vseg->max ); 538 527 539 528 return vseg; 540 } 529 530 } // vmm_create_vseg() 541 531 542 532 ///////////////////////////////////// … … 665 655 } 666 656 657 /////////////////////////////////////////////////////////////////////////////////////// 658 // This low-level static function is called by the vmm_get_vseg() and vmm_resize_vseg() 659 // functions. It scan the list of registered vsegs to find the unique vseg containing 660 // a given virtual address. 661 /////////////////////////////////////////////////////////////////////////////////////// 662 // @ vmm : pointer on the process VMM. 663 // @ vaddr : virtual address. 664 // @ return vseg pointer if success / return NULL if not found. 665 /////////////////////////////////////////////////////////////////////////////////////// 666 static vseg_t * vseg_from_vaddr( vmm_t * vmm, 667 intptr_t vaddr ) 668 { 669 list_entry_t * iter; 670 vseg_t * vseg = NULL; 671 672 // get lock protecting the vseg list 673 rwlock_rd_lock( &vmm->vsegs_lock ); 674 675 // scan the list of vsegs 676 LIST_FOREACH( &vmm->vsegs_root , iter ) 677 { 678 vseg = LIST_ELEMENT( iter , vseg_t , list ); 679 if( (vaddr >= vseg->min) && (vaddr < vseg->max) ) break; 680 } 681 682 // release the lock 683 rwlock_rd_unlock( &vmm->vsegs_lock ); 684 685 return vseg; 686 } 687 667 688 ///////////////////////////////////////////// 668 689 error_t vmm_resize_vseg( process_t * process, … … 670 691 intptr_t size ) 671 692 { 672 error_t error; 693 error_t error; 694 vseg_t * new; 695 vpn_t vpn_min; 696 vpn_t vpn_max; 673 697 674 698 // get pointer on process VMM … … 677 701 intptr_t addr_min = base; 678 702 intptr_t addr_max = base + size; 679 uint32_t shift = CONFIG_PPM_PAGE_SHIFT;680 703 681 704 // get pointer on vseg 682 vseg_t * vseg = grdxt_lookup( &vmm->grdxt , (uint32_t)(base >> shift));705 vseg_t * vseg = vseg_from_vaddr( vmm , base ); 683 706 684 707 if( vseg == NULL) return EINVAL; … … 696 719 error = 0; 697 720 } 698 else if( vseg->min == addr_min ) // vseg must be resized 699 { 700 panic("resize not implemented yet"); 701 error = 0; 702 } 703 else if( vseg->max == addr_max ) // vseg must be resized 704 { 705 panic("resize not implemented yet"); 706 error = 0; 707 } 708 else // vseg cut in three regions => vseg must be resized & new vseg created 709 { 710 panic("resize not implemented yet"); 711 error = 0; 721 else if( vseg->min == addr_min ) // vseg must be resized 722 { 723 // update vseg base address 724 vseg->min = addr_max; 725 726 // update vpn_base and vpn_size 727 vpn_min = vseg->min >> CONFIG_PPM_PAGE_SHIFT; 728 vpn_max = (vseg->max - 1) >> CONFIG_PPM_PAGE_SHIFT; 729 vseg->vpn_base = vpn_min; 730 vseg->vpn_size = vpn_max - vpn_min + 1; 731 error = 0; 732 } 733 else if( vseg->max == addr_max ) // vseg must be resized 734 { 735 // update vseg max address 736 vseg->max = addr_min; 737 738 // update vpn_base and vpn_size 739 vpn_min = vseg->min >> CONFIG_PPM_PAGE_SHIFT; 740 vpn_max = (vseg->max - 1) >> CONFIG_PPM_PAGE_SHIFT; 741 vseg->vpn_base = vpn_min; 742 vseg->vpn_size = vpn_max - vpn_min + 1; 743 error = 0; 744 } 745 else // vseg cut in three regions 746 { 747 // resize existing vseg 748 vseg->max = addr_min; 749 750 // update vpn_base and vpn_size 751 vpn_min = vseg->min >> CONFIG_PPM_PAGE_SHIFT; 752 vpn_max = (vseg->max - 1) >> CONFIG_PPM_PAGE_SHIFT; 753 vseg->vpn_base = vpn_min; 754 vseg->vpn_size = vpn_max - vpn_min + 1; 755 756 // create new vseg 757 new = vmm_create_vseg( process , addr_min , (vseg->max - addr_max) , vseg->type ); 758 if( new == NULL ) error = EINVAL; 759 else error = 0; 712 760 } 713 761 … … 716 764 717 765 return error; 718 } 766 767 } // vmm_resize_vseg() 719 768 720 769 /////////////////////////////////////////// … … 723 772 vseg_t ** found_vseg ) 724 773 { 725 vmm_t * vmm; 726 vseg_t * vseg; 727 728 // get pointer on process VMM 729 vmm = &process->vmm; 730 731 // get lock protecting the vseg list 732 rwlock_rd_lock( &vmm->vsegs_lock ); 733 734 // get pointer on vseg from local radix tree 735 vseg = grdxt_lookup( &vmm->grdxt, (uint32_t)(vaddr >> CONFIG_PPM_PAGE_SHIFT) ); 736 737 // release the lock 738 rwlock_rd_unlock( &vmm->vsegs_lock ); 774 vmm_t * vmm = &process->vmm; 775 776 // get vseg from vaddr 777 vseg_t * vseg = vseg_from_vaddr( vmm , vaddr ); 739 778 740 779 if( vseg == NULL ) // vseg not found in local cluster => try to get it from ref … … 752 791 xptr_t vseg_xp; 753 792 error_t error; 793 754 794 rpc_vmm_get_vseg_client( ref_cxy , ref_ptr , vaddr , &vseg_xp , &error ); 755 795 … … 759 799 vseg = vseg_alloc(); 760 800 761 if( vseg == NULL ) panic("no memory for vseg copy in cluster %x", local_cxy );801 if( vseg == NULL ) return -1; 762 802 763 803 // initialise local vseg from reference … … 765 805 766 806 // register local vseg in local VMM 767 error = vseg_attach( &process->vmm , vseg ); 768 769 if( error ) panic("no memory for vseg registration in cluster %x", local_cxy ); 807 vseg_attach( &process->vmm , vseg ); 770 808 } 771 809 … … 784 822 cxy_t page_cxy; // physical page cluster 785 823 page_t * page_ptr; // local pointer on physical page descriptor 786 787 uint32_t type = vseg->type;788 xptr_t mapper_xp = vseg->mapper_xp; 789 uint32_t flags = vseg->flags;790 791 // get mapper cluster and local pointer 792 cxy_t mapper_cxy = GET_CXY( mapper_xp );793 mapper_t * mapper_ptr = (mapper_t *)GET_PTR( mapper_xp);794 795 // FILE type : simplyget the physical page from the file mapper824 uint32_t index; // missing page index in vseg mapper 825 uint32_t type; // vseg type; 826 827 type = vseg->type; 828 index = vpn - vseg->vpn_base; 829 830 vmm_dmsg("\n[DMSG] %s : core[%x,%d] enter for vpn = %x / type = %s / index = %d\n", 831 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, vpn, vseg_type_str(type), index ); 832 833 // FILE type : get the physical page from the file mapper 796 834 if( type == VSEG_TYPE_FILE ) 797 835 { 798 // compute index in file mapper 799 uint32_t index = vpn - vseg->vpn_base; 836 // get extended pointer on mapper 837 xptr_t mapper_xp = vseg->mapper_xp; 838 839 assert( (mapper_xp != XPTR_NULL), __FUNCTION__, 840 "mapper not defined for a FILE vseg\n" ); 841 842 // get mapper cluster and local pointer 843 cxy_t mapper_cxy = GET_CXY( mapper_xp ); 844 mapper_t * mapper_ptr = (mapper_t *)GET_PTR( mapper_xp ); 800 845 801 846 // get page descriptor from mapper … … 814 859 } 815 860 816 // all other types : allocate a physical page from target cluster,861 // Other types : allocate a physical page from target cluster, 817 862 else 818 863 { 864 uint32_t flags = vseg->flags; 865 819 866 // get target cluster for physical page 820 867 if( flags & VSEG_DISTRIB ) // depends on VPN LSB 821 868 { 822 uint32_t x_ width = LOCAL_CLUSTER->x_width;823 uint32_t y_ width = LOCAL_CLUSTER->y_width;824 page_cxy = vpn & (( 1<<(x_width + y_width)) - 1);869 uint32_t x_size = LOCAL_CLUSTER->x_size; 870 uint32_t y_size = LOCAL_CLUSTER->y_size; 871 page_cxy = vpn & ((x_size * y_size) - 1); 825 872 } 826 873 else // defined in vseg descriptor … … 831 878 // allocate a physical page in target cluster 832 879 kmem_req_t req; 833 if( page_cxy == local_cxy ) 880 if( page_cxy == local_cxy ) // target cluster is the local cluster 834 881 { 835 882 req.type = KMEM_PAGE; … … 845 892 if( page_ptr == NULL ) return ENOMEM; 846 893 847 // initialise page from .elf file mapper for DATA and CODE types 894 // initialise missing page from .elf file mapper for DATA and CODE types 895 // => the mapper_xp field is an extended pointer on the .elf file mapper 848 896 if( (type == VSEG_TYPE_CODE) || (type == VSEG_TYPE_DATA) ) 849 897 { 850 // compute missing page index in vseg 851 vpn_t page_index = vpn - vseg->vpn_base; 898 // get extended pointer on mapper 899 xptr_t mapper_xp = vseg->mapper_xp; 900 901 assert( (mapper_xp != XPTR_NULL), __FUNCTION__, 902 "mapper not defined for a CODE or DATA vseg\n" ); 903 904 // get mapper cluster and local pointer 905 cxy_t mapper_cxy = GET_CXY( mapper_xp ); 906 mapper_t * mapper_ptr = (mapper_t *)GET_PTR( mapper_xp ); 907 908 // compute missing page offset in vseg 909 uint32_t offset = index << CONFIG_PPM_PAGE_SHIFT; 852 910 853 911 // compute missing page offset in .elf file 854 intptr_t page_offset = vseg->file_offset + 855 (page_index << CONFIG_PPM_PAGE_SHIFT); 856 857 // compute extended pointer on page first byte 858 xptr_t base_xp = ppm_page2base( XPTR( page_cxy , page_ptr ) ); 859 860 // file_size can be smaller than vseg_size for BSS 861 intptr_t file_size = vseg->file_size; 862 863 if( file_size < page_offset ) // fully in BSS 912 uint32_t elf_offset = vseg->file_offset + offset; 913 914 vmm_dmsg("\n[DMSG] %s : core[%x,%d] for vpn = %x / elf_offset = %x\n", 915 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, vpn, elf_offset ); 916 917 // compute extended pointer on page base 918 xptr_t base_xp = ppm_page2base( XPTR( page_cxy , page_ptr ) ); 919 920 // file_size (in .elf mapper) can be smaller than vseg_size (BSS) 921 uint32_t file_size = vseg->file_size; 922 923 if( file_size < offset ) // missing page fully in BSS 864 924 { 925 vmm_dmsg("\n[DMSG] %s : core[%x,%d] for vpn = %x / fully in BSS\n", 926 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, vpn ); 927 865 928 if( page_cxy == local_cxy ) 866 929 { … … 872 935 } 873 936 } 874 else if( file_size >= ( page_offset + CONFIG_PPM_PAGE_SIZE) ) // fully in mapper937 else if( file_size >= (offset + CONFIG_PPM_PAGE_SIZE) ) // fully in mapper 875 938 { 939 vmm_dmsg("\n[DMSG] %s : core[%x,%d] for vpn = %x / fully in mapper\n", 940 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, vpn ); 941 876 942 if( mapper_cxy == local_cxy ) 877 943 { 878 944 error = mapper_move_kernel( mapper_ptr, 879 945 true, // to_buffer 880 page_offset,946 elf_offset, 881 947 base_xp, 882 948 CONFIG_PPM_PAGE_SIZE ); … … 888 954 true, // to buffer 889 955 false, // kernel buffer 890 page_offset,891 (uint64_t)base_xp,956 elf_offset, 957 base_xp, 892 958 CONFIG_PPM_PAGE_SIZE, 893 959 &error ); … … 895 961 if( error ) return EINVAL; 896 962 } 897 else // in mapper : from page_offset -> (file_size - page_offset) 898 // in BSS : from file_size -> (page_offset + page_size) 963 else // both in mapper and in BSS : 964 // - (file_size - offset) bytes from mapper 965 // - (page_size + offset - file_size) bytes from BSS 899 966 { 967 vmm_dmsg("\n[DMSG] %s : core[%x,%d] for vpn = %x / both mapper & BSS\n" 968 " %d bytes from mapper / %d bytes from BSS\n", 969 __FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, vpn, 970 file_size - offset , offset + CONFIG_PPM_PAGE_SIZE - file_size ); 971 900 972 // initialize mapper part 901 973 if( mapper_cxy == local_cxy ) … … 903 975 error = mapper_move_kernel( mapper_ptr, 904 976 true, // to buffer 905 page_offset,977 elf_offset, 906 978 base_xp, 907 file_size - page_offset );979 file_size - offset ); 908 980 } 909 981 else … … 913 985 true, // to buffer 914 986 false, // kernel buffer 915 page_offset,916 (uint64_t)base_xp,917 file_size - page_offset,987 elf_offset, 988 base_xp, 989 file_size - offset, 918 990 &error ); 919 991 } … … 923 995 if( page_cxy == local_cxy ) 924 996 { 925 memset( GET_PTR( base_xp ) + file_size - page_offset , 0 ,926 page_offset + CONFIG_PPM_PAGE_SIZE - file_size );997 memset( GET_PTR( base_xp ) + file_size - offset , 0 , 998 offset + CONFIG_PPM_PAGE_SIZE - file_size ); 927 999 } 928 1000 else 929 1001 { 930 hal_remote_memset( base_xp + file_size - page_offset , 0 ,931 page_offset + CONFIG_PPM_PAGE_SIZE - file_size );1002 hal_remote_memset( base_xp + file_size - offset , 0 , 1003 offset + CONFIG_PPM_PAGE_SIZE - file_size ); 932 1004 } 933 1005 } … … 937 1009 // return ppn 938 1010 *ppn = ppm_page2ppn( XPTR( page_cxy , page_ptr ) ); 1011 1012 vmm_dmsg("\n[DMSG] %s : core[%x,%d] exit for vpn = %x / ppn = %x\n", 1013 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vpn , *ppn ); 1014 939 1015 return 0; 940 1016 … … 954 1030 // this function must be called by a thread running in the reference cluster 955 1031 assert( (GET_CXY( process->ref_xp ) == local_cxy ) , __FUNCTION__ , 956 " not called in the reference cluster\n" ); 1032 "not called in the reference cluster\n" ); 1033 1034 vmm_dmsg("\n[DMSG] %s : core[%x,%d] enter for vpn = %x in process %x\n", 1035 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vpn , process->pid ); 957 1036 958 1037 // get VMM pointer … … 968 1047 if( (attr & GPT_MAPPED) == 0 ) 969 1048 { 1049 vmm_dmsg("\n[DMSG] %s : core[%x,%d] page %x unmapped => try to map it\n", 1050 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vpn ); 1051 970 1052 // 1. get vseg pointer 971 1053 error = vmm_get_vseg( process , vpn<<CONFIG_PPM_PAGE_SHIFT , &vseg ); … … 977 1059 return error; 978 1060 } 1061 1062 vmm_dmsg("\n[DMSG] %s : core[%x,%d] found vseg %s / vpn_base = %x / vpn_size = %x\n", 1063 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , 1064 vseg_type_str(vseg->type) , vseg->vpn_base , vseg->vpn_size ); 979 1065 980 1066 // 2. get physical page number, depending on vseg type … … 1005 1091 } // end new PTE 1006 1092 1093 vmm_dmsg("\n[DMSG] %s : core[%x,%d] exit for vpn = %x / ppn = %x\n", 1094 __FUNCTION__ , local_cxy , CURRENT_THREAD->core->lid , vpn , ppn ); 1095 1007 1096 *ret_ppn = ppn; 1008 1097 *ret_attr = attr; -
trunk/kernel/mm/vmm.h
r401 r406 30 30 #include <bits.h> 31 31 #include <list.h> 32 #include <grdxt.h>33 32 #include <spinlock.h> 34 33 #include <hal_gpt.h> … … 92 91 * This structure defines the Virtual Memory Manager for a given process in a given cluster. 93 92 * This local VMM provides three main services: 94 * 1) It registers all vsegs statically or dynamically defined in the vseg list, 95 * and in the associated radix-tree. 96 * 2) It allocates virtual memory space for the STACKS and MMAP vsegs, 97 * using dedicated allocators. 93 * 1) It registers all vsegs statically or dynamically defined in the vseg list. 94 * 2) It allocates virtual memory space for the STACKS and MMAP vsegs. 98 95 * 3) It contains the local copy of the generic page table descriptor. 99 96 ********************************************************************************************/ … … 104 101 list_entry_t vsegs_root; /*! all vsegs in same process and same cluster */ 105 102 uint32_t vsegs_nr; /*! total number of local vsegs */ 106 grdxt_t grdxt; /*! embedded generic vsegs radix tree (key is vpn) */107 103 108 104 gpt_t gpt; /*! embedded generic page table descriptor */ … … 144 140 145 141 /********************************************************************************************* 146 * This function initialises the virtual memory manager attached to a process. 147 * - It initializes the VSL (list of vsegs and associated radix tree). 148 * - It initializes the generic page table (empty). 142 * This function initialises the virtual memory manager attached to an user process. 143 * - It registers the "kentry", "args", "envs" and "heap" vsegs in the vsegs list. 144 * The "code" and "data" vsegs are registered by the elf_load_process() function, 145 * the "stack" vsegs are registered by the thread_user_create() function, and the 146 * "mmap" vsegs are dynamically created by syscalls. 147 * - It initializes the generic page table, calling the HAL specific hal_gpt_init() function. 148 * For TSAR it map all pages for the "kentry" vseg, that must be identity mapping. 149 149 * - It initializes the STAK and MMAP allocators. 150 * - It registers the "kentry", "args", "envs" and "heap" vsegs in the vsegs list. 151 * Any error in this function gives a kernel panic. 150 * TODO : Any error in this function gives a kernel panic => improve error handling. 152 151 ********************************************************************************************* 153 152 * @ process : pointer on process descriptor … … 249 248 * (a) if the region is not entirely mapped in an existing vseg, it's an error. 250 249 * (b) if the region has same base and size as an existing vseg, the vseg is removed. 251 * (c) if the removed region cut the vseg in two parts, it is removed and re-created.252 * (d) if the removed region cut the vseg in three parts, it is removed, and two are created.253 * TODO : cases (c) and (d) are not implemented [AG]250 * (c) if the removed region cut the vseg in two parts, it is modified. 251 * (d) if the removed region cut the vseg in three parts, it is modified, and a new 252 * vseg is created with same type. 254 253 ********************************************************************************************* 255 254 * @ process : pointer on process descriptor … … 267 266 * - if the vseg is missing in local VMM, it uses a RPC to get it from the reference cluster, 268 267 * register it in local VMM and returns the local vseg pointer, if success. 269 * - if the vseg is missing in reference VMM, it returns an user error. 270 * It creates a kernel panic if there is not enough memory to create a new vseg descriptor 271 * in the cluster containing the calling thread. 268 * - it returns an user error if the vseg is missing in the reference VMM, or if there is 269 * not enough memory for a new vseg descriptor in cluster containing the calling thread. 272 270 ********************************************************************************************* 273 271 * @ process : [in] pointer on process descriptor … … 320 318 * Depending on the vseg type, defined by the <vseg> argument, it returns the PPN 321 319 * (Physical Page Number) associated to a missing page defined by the <vpn> argument. 322 * - For the VSEG_TYPE_FILE, it returns the physical page from the file mapper. 323 * For all other types, it allocates a new physical page from the cluster defined 324 * by the <vseg->cxy> field, or by the <vpn> MSB bits for a distributed vseg. 325 * - For the VSEG_TYPE_CODE and VSEG_TYPE_DATA types, the allocated page is initialized 326 * from the .elf file mapper. For others vseg types it is not initialised. 320 * - For the FILE type, it returns directly the physical page from the file mapper. 321 * - For the CODE and DATA types, it allocates a new phsical page from the cluster defined 322 * by the <vseg->cxy> field, or by the <vpn> MSB bits for a distributed vseg, 323 * and initialize this page from the .elf file mapper. 324 * - For all other types, it allocates a new physical page from the cluster defined 325 * by the <vseg->cxy> field, or by the <vpn> MSB bits for a distributed vseg, 326 * but the new page is not initialized. 327 327 ********************************************************************************************* 328 328 * @ vseg : local pointer on vseg containing the mising page. -
trunk/kernel/mm/vseg.c
r394 r406 188 188 189 189 /////////////////////////////// 190 error_t vseg_attach( vmm_t * vmm, 191 vseg_t * vseg ) 192 { 193 // add vseg in radix-tree 194 error_t error = grdxt_insert( &vmm->grdxt , vseg->vpn_base , vseg ); 195 if ( error ) return ENOMEM; 196 190 void vseg_attach( vmm_t * vmm, 191 vseg_t * vseg ) 192 { 197 193 // update vseg descriptor 198 194 vseg->vmm = vmm; … … 200 196 // add vseg in vmm list 201 197 list_add_last( &vmm->vsegs_root , &vseg->list ); 202 203 return 0;204 198 } 205 199 … … 208 202 vseg_t * vseg ) 209 203 { 210 // remove vseg from radix-tree211 grdxt_remove( &vmm->grdxt , vseg->vpn_base );212 213 204 // update vseg descriptor 214 205 vseg->vmm = NULL; -
trunk/kernel/mm/vseg.h
r388 r406 149 149 * @ vmm : pointer on the VMM 150 150 * @ vseg : pointer on the vseg descriptor 151 * @ returns 0 if success / returns ENOMEM if registration in GRDXT unpossible.152 151 *********************************************************************************************/ 153 error_tvseg_attach( struct vmm_s * vmm,154 152 void vseg_attach( struct vmm_s * vmm, 153 vseg_t * vseg ); 155 154 156 155 /**********************************************************************************************
Note: See TracChangeset
for help on using the changeset viewer.