Changeset 624 for trunk/kernel/mm
- Timestamp:
- Mar 12, 2019, 1:37:38 PM (6 years ago)
- Location:
- trunk/kernel/mm
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/mm/mapper.c
r623 r624 440 440 ppm_page_do_dirty( page_xp ); 441 441 hal_copy_from_uspace( map_ptr , buf_ptr , page_count ); 442 443 putb(" in mapper_move_user()" , map_ptr , page_count );444 445 442 } 446 443 -
trunk/kernel/mm/vmm.c
r623 r624 76 76 vmm_t * vmm = &process->vmm; 77 77 78 // initialize local list of vsegs78 // initialize VSL (empty) 79 79 vmm->vsegs_nr = 0; 80 80 xlist_root_init( XPTR( local_cxy , &vmm->vsegs_root ) ); 81 81 remote_rwlock_init( XPTR( local_cxy , &vmm->vsegs_lock ) , LOCK_VMM_VSL ); 82 82 83 assert( ((CONFIG_VMM_KENTRY_SIZE + CONFIG_VMM_ARGS_SIZE + CONFIG_VMM_ENVS_SIZE) 84 <= CONFIG_VMM_ELF_BASE) , "UTILS zone too small\n" ); 85 86 assert( (CONFIG_THREADS_MAX_PER_CLUSTER <= 32) , 87 "no more than 32 threads per cluster for a single process\n"); 83 assert( ((CONFIG_VMM_ARGS_SIZE + CONFIG_VMM_ENVS_SIZE) <= 84 (CONFIG_VMM_ELF_BASE - CONFIG_VMM_UTILS_BASE)) , 85 "UTILS zone too small\n" ); 88 86 89 87 assert( ((CONFIG_VMM_STACK_SIZE * CONFIG_THREADS_MAX_PER_CLUSTER) <= … … 92 90 93 91 // register args vseg in VSL 94 base = (CONFIG_VMM_KENTRY_BASE + 95 CONFIG_VMM_KENTRY_SIZE ) << CONFIG_PPM_PAGE_SHIFT; 92 base = CONFIG_VMM_UTILS_BASE << CONFIG_PPM_PAGE_SHIFT; 96 93 size = CONFIG_VMM_ARGS_SIZE << CONFIG_PPM_PAGE_SHIFT; 97 94 … … 114 111 115 112 // register the envs vseg in VSL 116 base = (CONFIG_VMM_KENTRY_BASE + 117 CONFIG_VMM_KENTRY_SIZE + 118 CONFIG_VMM_ARGS_SIZE ) << CONFIG_PPM_PAGE_SHIFT; 113 base = (CONFIG_VMM_UTILS_BASE + CONFIG_VMM_ARGS_SIZE) << CONFIG_PPM_PAGE_SHIFT; 119 114 size = CONFIG_VMM_ENVS_SIZE << CONFIG_PPM_PAGE_SHIFT; 120 115 … … 148 143 remote_rwlock_init( XPTR( local_cxy , &vmm->gpt_lock ) , LOCK_VMM_GPT ); 149 144 150 // update process VMM with kernel vsegs 145 // update process VMM with kernel vsegs as required by the hardware architecture 151 146 error = hal_vmm_kernel_update( process ); 152 147 … … 185 180 } // end vmm_init() 186 181 187 //////////////////////////////////////188 void vmm_display( process_t * process,189 bool_t mapping )190 {191 vmm_t * vmm = &process->vmm;192 gpt_t * gpt = &vmm->gpt;193 194 printk("\n***** VSL and GPT(%x) for process %x in cluster %x\n\n",195 process->vmm.gpt.ptr , process->pid , local_cxy );196 197 // get lock protecting the VSL and the GPT198 remote_rwlock_rd_acquire( XPTR( local_cxy , &vmm->vsegs_lock ) );199 remote_rwlock_rd_acquire( XPTR( local_cxy , &vmm->gpt_lock ) );200 201 // scan the list of vsegs202 xptr_t root_xp = XPTR( local_cxy , &vmm->vsegs_root );203 xptr_t iter_xp;204 xptr_t vseg_xp;205 vseg_t * vseg;206 XLIST_FOREACH( root_xp , iter_xp )207 {208 vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist );209 vseg = GET_PTR( vseg_xp );210 211 printk(" - %s : base = %X / size = %X / npages = %d\n",212 vseg_type_str( vseg->type ) , vseg->min , vseg->max - vseg->min , vseg->vpn_size );213 214 if( mapping )215 {216 vpn_t vpn;217 ppn_t ppn;218 uint32_t attr;219 vpn_t base = vseg->vpn_base;220 vpn_t size = vseg->vpn_size;221 for( vpn = base ; vpn < (base+size) ; vpn++ )222 {223 hal_gpt_get_pte( XPTR( local_cxy , gpt ) , vpn , &attr , &ppn );224 if( attr & GPT_MAPPED )225 {226 printk(" . vpn = %X / attr = %X / ppn = %X\n", vpn , attr , ppn );227 }228 }229 }230 }231 232 // release the locks233 remote_rwlock_rd_release( XPTR( local_cxy , &vmm->vsegs_lock ) );234 remote_rwlock_rd_release( XPTR( local_cxy , &vmm->gpt_lock ) );235 236 } // vmm_display()237 182 238 183 ////////////////////////////////////////// … … 248 193 // update vseg descriptor 249 194 vseg->vmm = vmm; 195 196 // increment vsegs number 197 vmm->vsegs_nr++; 250 198 251 199 // add vseg in vmm list … … 735 683 736 684 // copy base addresses from parent VMM to child VMM 737 child_vmm->kent_vpn_base = (vpn_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->kent_vpn_base));738 685 child_vmm->args_vpn_base = (vpn_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->args_vpn_base)); 739 686 child_vmm->envs_vpn_base = (vpn_t)hal_remote_lpt(XPTR(parent_cxy, &parent_vmm->envs_vpn_base)); … … 773 720 #if (DEBUG_VMM_DESTROY & 1 ) 774 721 if( DEBUG_VMM_DESTROY < cycle ) 775 vmm_display( process , true );722 hal_vmm_display( process , true ); 776 723 #endif 777 724 … … 785 732 // (don't use a FOREACH for item deletion in xlist) 786 733 787 uint32_t count = 0; 788 789 while( !xlist_is_empty( root_xp ) && (count < 10 ) ) 734 while( !xlist_is_empty( root_xp ) ) 790 735 { 791 736 // get pointer on first vseg in VSL … … 801 746 __FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size ); 802 747 #endif 803 804 count++;805 748 806 749 } … … 1093 1036 // check collisions 1094 1037 vseg = vmm_check_conflict( process , vpn_base , vpn_size ); 1038 1095 1039 if( vseg != NULL ) 1096 1040 { … … 1162 1106 xptr_t lock_xp; // extended pointer on lock protecting forks counter 1163 1107 uint32_t forks; // actual number of pendinf forks 1164 uint32_t type;// vseg type1108 uint32_t vseg_type; // vseg type 1165 1109 1166 1110 #if DEBUG_VMM_DELETE_VSEG … … 1197 1141 1198 1142 // get relevant vseg infos 1199 type= vseg->type;1200 vpn_min = vseg->vpn_base;1201 vpn_max = vpn_min + vseg->vpn_size;1143 vseg_type = vseg->type; 1144 vpn_min = vseg->vpn_base; 1145 vpn_max = vpn_min + vseg->vpn_size; 1202 1146 1203 1147 // loop to invalidate all vseg PTEs in GPT 1204 1148 for( vpn = vpn_min ; vpn < vpn_max ; vpn++ ) 1205 1149 { 1206 // get GPT entry1150 // get ppn and attr from GPT entry 1207 1151 hal_gpt_get_pte( XPTR( local_cxy , gpt ) , vpn , &attr , &ppn ); 1208 1152 … … 1217 1161 hal_gpt_reset_pte( gpt , vpn ); 1218 1162 1219 // the allocated page is not released to KMEMfor kernel vseg1220 if( ( type != VSEG_TYPE_KCODE) &&1221 ( type != VSEG_TYPE_KDATA) &&1222 ( type != VSEG_TYPE_KDEV ) )1163 // the allocated page is not released to for kernel vseg 1164 if( (vseg_type != VSEG_TYPE_KCODE) && 1165 (vseg_type != VSEG_TYPE_KDATA) && 1166 (vseg_type != VSEG_TYPE_KDEV ) ) 1223 1167 { 1224 1225 // FIXME This code must be completely re-written, as the actual release must depend on1226 // - the vseg type1227 // - the reference cluster1228 // - the page refcount and/or the forks counter1229 1230 1168 // get extended pointer on physical page descriptor 1231 1169 page_xp = ppm_ppn2page( ppn ); … … 1233 1171 page_ptr = GET_PTR( page_xp ); 1234 1172 1173 // FIXME This code must be re-written, as the actual release depends on vseg type, 1174 // the reference cluster, the page refcount and/or the forks counter... 1175 1235 1176 // get extended pointers on forks and lock fields 1236 1177 forks_xp = XPTR( page_cxy , &page_ptr->forks ); … … 1245 1186 if( forks ) // decrement pending forks counter 1246 1187 { 1188 // update forks counter 1247 1189 hal_remote_atomic_add( forks_xp , -1 ); 1190 1191 // release the lock protecting the page 1192 remote_busylock_release( lock_xp ); 1248 1193 } 1249 1194 else // release physical page to relevant cluster 1250 1195 { 1196 // release the lock protecting the page 1197 remote_busylock_release( lock_xp ); 1198 1199 // release the page to kmem 1251 1200 if( page_cxy == local_cxy ) // local cluster 1252 1201 { … … 1266 1215 } 1267 1216 1268 // release the lock protecting the page1269 remote_busylock_release( lock_xp );1270 1217 } 1271 1218 } -
trunk/kernel/mm/vmm.h
r623 r624 121 121 uint32_t pgfault_nr; /*! page fault counter (instrumentation) */ 122 122 123 vpn_t kent_vpn_base; /*! kentry vseg first page */124 123 vpn_t args_vpn_base; /*! args vseg first page */ 125 vpn_t envs_vpn_base; /*! envs zonefirst page */126 vpn_t heap_vpn_base; /*! envs zonefirst page */127 vpn_t code_vpn_base; /*! code zonefirst page */128 vpn_t data_vpn_base; /*! datazone first page */124 vpn_t envs_vpn_base; /*! envs vseg first page */ 125 vpn_t code_vpn_base; /*! code vseg first page */ 126 vpn_t data_vpn_base; /*! data vseg first page */ 127 vpn_t heap_vpn_base; /*! heap zone first page */ 129 128 130 129 intptr_t entry_point; /*! main thread entry point */ … … 157 156 * @ mapping : detailed mapping if true. 158 157 ********************************************************************************************/ 159 void vmm_display( struct process_s * process,158 void hal_vmm_display( struct process_s * process, 160 159 bool_t mapping ); 161 160
Note: See TracChangeset
for help on using the changeset viewer.