- Timestamp:
- Jul 11, 2017, 12:57:27 PM (7 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/mm/vmm.c
r124 r178 181 181 182 182 hal_fence(); 183 184 } // end vmm_init() 183 } 185 184 186 185 ////////////////////////////////////////// … … 196 195 rwlock_wr_lock( &src_vmm->vsegs_lock ); 197 196 198 // initiali se dst_vmm vsegs_lock197 // initialize dst_vmm vsegs_lock 199 198 rwlock_init( &dst_vmm->vsegs_lock ); 200 199 201 // initiali se the dst_vmm vsegs list and the radix tree200 // initialize the dst_vmm vsegs list and the radix tree 202 201 dst_vmm->vsegs_nr = 0; 203 202 list_root_init( &dst_vmm->vsegs_root ); … … 208 207 if( error ) 209 208 { 210 printk("\n[ERROR] in %s : cannot initiali se radix tree for process %x\n",209 printk("\n[ERROR] in %s : cannot initialize radix tree for process %x\n", 211 210 __FUNCTION__ , dst_process->pid ); 212 211 return ENOMEM; … … 226 225 dst_vseg = vseg_alloc(); 227 226 228 if( dst_vseg == NULL ) 227 if( dst_vseg == NULL ) 229 228 { 230 229 // release all allocated vsegs … … 267 266 for( i = 0 ; i < 32 ; i++ ) list_root_init( &dst_vmm->mmap_mgr.zombi_list[i] ); 268 267 269 // initiali se instrumentation counters268 // initialize instrumentation counters 270 269 dst_vmm->pgfault_nr = 0; 271 270 dst_vmm->u_err_nr = 0; … … 273 272 274 273 // copy base addresses 275 dst_vmm->kent_vpn_base = src_vmm->kent_vpn_base; 276 dst_vmm->args_vpn_base = src_vmm->args_vpn_base; 277 dst_vmm->envs_vpn_base = src_vmm->envs_vpn_base; 278 dst_vmm->heap_vpn_base = src_vmm->heap_vpn_base; 279 dst_vmm->code_vpn_base = src_vmm->code_vpn_base; 280 dst_vmm->data_vpn_base = src_vmm->data_vpn_base; 274 dst_vmm->kent_vpn_base = src_vmm->kent_vpn_base; 275 dst_vmm->args_vpn_base = src_vmm->args_vpn_base; 276 dst_vmm->envs_vpn_base = src_vmm->envs_vpn_base; 277 dst_vmm->heap_vpn_base = src_vmm->heap_vpn_base; 278 dst_vmm->code_vpn_base = src_vmm->code_vpn_base; 279 dst_vmm->data_vpn_base = src_vmm->data_vpn_base; 281 280 282 281 dst_vmm->entry_point = src_vmm->entry_point; … … 302 301 303 302 return 0; 304 305 } // end vmm_copy() 303 } 306 304 307 305 /////////////////////////////////////// … … 344 342 // release memory allocated to the local page table 345 343 hal_gpt_destroy( &vmm->gpt ); 346 347 } // end vmm_destroy() 344 } 348 345 349 346 ///////////////////////////////////////////////// … … 364 361 } 365 362 return NULL; 366 } // vmm_check_conflict()363 } 367 364 368 365 //////////////////////////////////////////////////////////////////////////////////////////// … … 398 395 *vpn_size = CONFIG_VMM_STACK_SIZE - 1; 399 396 return 0; 400 401 } // end vmm_stack_alloc() 397 } 402 398 403 399 //////////////////////////////////////////////////////////////////////////////////////////// … … 464 460 *vpn_size = size; 465 461 return 0; 466 467 } // end vmm_mmap_alloc() 462 } 468 463 469 464 ////////////////////////////////////////////// … … 560 555 561 556 return vseg; 562 563 } // end vmm_create_vseg() 557 } 564 558 565 559 ///////////////////////////////////// … … 612 606 vseg_free( vseg ); 613 607 } 614 615 } // end vmm_remove_vseg() 616 608 } 617 609 618 610 ////////////////////////////////////////////// … … 668 660 669 661 return 0; 670 } // end vmm_map_vseg()662 } 671 663 672 664 ///////////////////////////////////////// … … 688 680 hal_gpt_reset_pte( gpt , vpn ); 689 681 } 690 } // end vmm_unmap_vseg() 691 682 } 692 683 693 684 ///////////////////////////////////////////// … … 745 736 746 737 return error; 747 } // end vmm_resize_vseg()738 } 748 739 749 740 /////////////////////////////////////////// … … 765 756 766 757 return vseg; 767 768 } // end vmm_get_vseg() 758 } 769 759 770 760 ///////////////////////////////////////// … … 779 769 error_t error; 780 770 781 // this function must be called by a thread running in the reference cluster 771 // this function must be called by a thread running in the reference cluster 782 772 assert( (GET_CXY( process->ref_xp ) == local_cxy ) , __FUNCTION__ , 783 773 " not called in the reference cluster\n" ); … … 860 850 *ret_attr = attr; 861 851 return 0; 862 } // end vmm_get_pte()852 } 863 853 864 854 /////////////////////////////////////////////////// … … 906 896 907 897 return 0; 908 } // end vmm_handle_page_fault()898 } 909 899 910 900 /////////////////////////////////////////// … … 946 936 947 937 return error; 948 949 } // end vmm_v2p_translate() 950 951 952 953 938 } 954 939 955 940 /* … … 989 974 current.ppn = ppn; 990 975 current.attr = vseg->vm_pgprot; 991 current.cluster = NULL; // this function is called after invalidate one 976 current.cluster = NULL; // this function is called after invalidate one 992 977 993 978 error= pmm_set_page(&vseg->vmm->pmm, vaddr , ¤t); … … 997 982 } 998 983 999 // Hypothesis: the vseg is shared-anon, mapper list is rdlocked, page is locked 984 // Hypothesis: the vseg is shared-anon, mapper list is rdlocked, page is locked 1000 985 error_t vmm_migrate_shared_page_seq( vseg_t *vseg, struct page_s *page, struct page_s **new) 1001 986 { … … 1016 1001 error = ECANCELED; 1017 1002 1018 // Invalidate All 1003 // Invalidate All 1019 1004 do 1020 1005 { … … 1055 1040 new_pg->index = page->index; 1056 1041 1057 // TODO: do the complet job regading dirty page 1042 // TODO: do the complet job regading dirty page 1058 1043 if(PAGE_IS(page, PG_DIRTY)) 1059 1044 PAGE_SET(new_pg, PG_DIRTY); … … 1087 1072 //TODO: revisit all manipulation of the page->refcount 1088 1073 /////////////////////////////////////////////////////////////// 1089 static inline error_t vmm_do_migrate( vseg_t * vseg, 1074 static inline error_t vmm_do_migrate( vseg_t * vseg, 1090 1075 pmm_page_info_t * pinfo, 1091 1076 uint32_t vaddr ) … … 1098 1083 error_t err; 1099 1084 ppn_t ppn; 1100 1085 1101 1086 assert( pinfo->ppn != 0 ); 1102 1087 … … 1105 1090 newpage = NULL; 1106 1091 cluster = current_cluster; 1107 1092 1108 1093 current.attr = 0; 1109 1094 current.ppn = 0; … … 1111 1096 error= pmm_lock_page(&vseg->vmm->pmm, vaddr, ¤t); 1112 1097 1113 if(error|| (current.isAtomic == false) || 1098 if(error|| (current.isAtomic == false) || 1114 1099 (current.ppn != ppn) || !(current.attr & PMM_MIGRATE)) 1115 1100 { 1116 1101 #if CONFIG_SHOW_SPURIOUS_PGFAULT 1117 printk(INFO, "%s: pid %d, tid %d, cpu %d, nothing to do for vaddr %x\n", 1118 __FUNCTION__, 1119 this->process->pid, 1120 this->info.order, 1102 printk(INFO, "%s: pid %d, tid %d, cpu %d, nothing to do for vaddr %x\n", 1103 __FUNCTION__, 1104 this->process->pid, 1105 this->info.order, 1121 1106 cpu_get_id(), 1122 1107 vaddr); … … 1127 1112 return 0; 1128 1113 } 1129 1114 1130 1115 if(!ppn_is_local(ppn)) 1131 1116 { … … 1161 1146 if(err) 1162 1147 { 1163 // TODO: we should differ the kmem_free call 1148 // TODO: we should differ the kmem_free call 1164 1149 //page_unlock(page); 1165 1150 (void)pmm_unlock_page(&vseg->vmm->pmm, vaddr, ¤t); … … 1167 1152 kmem_free(&req); 1168 1153 return err; 1169 } 1154 } 1170 1155 1171 1156 … … 1174 1159 ppn_refcount_down(ppn); 1175 1160 current_thread->info.remote_pages_cntr ++; 1176 #if CONFIG_SHOW_REMOTE_PGALLOC 1161 #if CONFIG_SHOW_REMOTE_PGALLOC 1177 1162 printk(INFO, "%s: pid %d, tid %x, cpu %d, cid %d: got new remote page from cluster %d (vaddr %x)\n", 1178 1163 __FUNCTION__, … … 1225 1210 vaddr); 1226 1211 1227 1212 1228 1213 error= pmm_lock_page(&vseg->vmm->pmm, vaddr, &old); 1229 1214 … … 1232 1217 { 1233 1218 #if CONFIG_SHOW_SPURIOUS_PGFAULT 1234 printk(INFO, "%s: pid %d, tid %d, cpu %d, nothing to do for vaddr %x\n", 1235 __FUNCTION__, 1236 this->process->pid, 1237 this->info.order, 1219 printk(INFO, "%s: pid %d, tid %d, cpu %d, nothing to do for vaddr %x\n", 1220 __FUNCTION__, 1221 this->process->pid, 1222 this->info.order, 1238 1223 cpu_get_id(), 1239 1224 vaddr); … … 1246 1231 } 1247 1232 1248 //if the ppn is local and the others (processus with wich we share the page) 1233 //if the ppn is local and the others (processus with wich we share the page) 1249 1234 //has done cow, then use the old.ppn directly 1250 1235 if(ppn_is_local(old.ppn)) … … 1259 1244 newpage = page;//don't copy the page. use it directly. 1260 1245 isCountDown = false; 1261 vmm_dmsg(2, "%s: pid %d, tid %d, cpu %d, reuse same page for vaddr %x, pg_addr %x\n", 1262 __FUNCTION__, 1263 this->process->pid, 1264 this->info.order, 1265 cpu_get_id(), 1266 vaddr, 1246 vmm_dmsg(2, "%s: pid %d, tid %d, cpu %d, reuse same page for vaddr %x, pg_addr %x\n", 1247 __FUNCTION__, 1248 this->process->pid, 1249 this->info.order, 1250 cpu_get_id(), 1251 vaddr, 1267 1252 ppm_page2addr(page)); 1268 1253 } … … 1275 1260 //also defcount down the ppn 1276 1261 if(newpage == NULL) 1277 { 1262 { 1278 1263 req.type = KMEM_PAGE; 1279 1264 req.size = 0; … … 1291 1276 assert(isCountDown); 1292 1277 1293 vmm_dmsg(2, 1294 "%s: pid %d, tid %d, cpu %d, newpage for vaddr %x, pg_addr %x\n", 1295 __FUNCTION__, 1296 this->process->pid, 1297 this->info.order, 1298 cpu_get_id(), 1299 vaddr, 1278 vmm_dmsg(2, 1279 "%s: pid %d, tid %d, cpu %d, newpage for vaddr %x, pg_addr %x\n", 1280 __FUNCTION__, 1281 this->process->pid, 1282 this->info.order, 1283 cpu_get_id(), 1284 vaddr, 1300 1285 ppm_page2addr(newpage)); 1301 1286 … … 1323 1308 if(isCountDown) ppn_refcount_down(old.ppn); 1324 1309 1325 vmm_dmsg(2, "%s, pid %d, tid %d, cpu %d, COW ended [vaddr %x]\n", 1326 __FUNCTION__, 1310 vmm_dmsg(2, "%s, pid %d, tid %d, cpu %d, COW ended [vaddr %x]\n", 1311 __FUNCTION__, 1327 1312 this->process->pid, 1328 1313 this->info.order, 1329 1314 cpu_get_id(), 1330 1315 vaddr); 1331 1316 1332 1317 return 0; 1333 1318 } … … 1358 1343 { 1359 1344 #if CONFIG_SHOW_SPURIOUS_PGFAULT 1360 printk(INFO, "%s: pid %d, tid %d, cpu %d, nothing to do for vaddr %x\n", 1361 __FUNCTION__, 1362 this->process->pid, 1363 this->info.order, 1345 printk(INFO, "%s: pid %d, tid %d, cpu %d, nothing to do for vaddr %x\n", 1346 __FUNCTION__, 1347 this->process->pid, 1348 this->info.order, 1364 1349 cpu_get_id(), 1365 1350 vaddr); … … 1373 1358 1374 1359 //also hold a refcount! 1375 ppn = mapper_get_ppn(&vseg->vm_mapper, 1376 index, 1360 ppn = mapper_get_ppn(&vseg->vm_mapper, 1361 index, 1377 1362 MAPPER_SYNC_OP); 1378 1363 … … 1411 1396 old.attr = 0; 1412 1397 this = current_thread; 1413 1398 1414 1399 error= pmm_lock_page(&vseg->vmm->pmm, vaddr, &old); 1415 1400 … … 1505 1490 #if 0 1506 1491 #if CONFIG_SHOW_VMM_ERROR_MSG 1507 printk(ERROR, 1492 printk(ERROR, 1508 1493 "ERROR: %s: pid %d, cpu %d, Unexpected page attributes configuration for vaddr %x, found: ppn %x, attr %x\n", 1509 1494 __FUNCTION__, 1510 1495 current_process->pid, 1511 cpu_get_id(), 1512 vaddr, 1496 cpu_get_id(), 1497 vaddr, 1513 1498 info.ppn, 1514 1499 info.attr); 1515 1500 #endif 1516 1501 1517 1502 return EPERM; 1518 1503 #endif … … 1524 1509 return vmm_do_aod(vseg, vaddr); 1525 1510 } 1526 1527 1511 */ 1528 1512 1529
Note: See TracChangeset
for help on using the changeset viewer.