- Timestamp:
- Aug 16, 2017, 2:33:17 PM (7 years ago)
- Location:
- trunk/kernel/mm
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/mm/vmm.c
r385 r388 722 722 723 723 /////////////////////////////////////////// 724 vseg_t * vmm_get_vseg( process_t * process, 725 intptr_t vaddr ) 726 { 724 error_t vmm_get_vseg( process_t * process, 725 intptr_t vaddr 726 vseg_t ** found_vseg ) 727 { 728 vmm_t * vmm; 729 vseg_t * vseg; 727 730 728 731 // get pointer on process VMM 729 vmm _t * vmm= &process->vmm;732 vmm = &process->vmm; 730 733 731 734 // get lock protecting the vseg list 732 735 rwlock_rd_lock( &vmm->vsegs_lock ); 733 736 734 // get pointer on vseg from radix tree735 vseg _t * vseg= grdxt_lookup( &vmm->grdxt, (uint32_t)(vaddr >> CONFIG_PPM_PAGE_SHIFT) );737 // get pointer on vseg from local radix tree 738 vseg = grdxt_lookup( &vmm->grdxt, (uint32_t)(vaddr >> CONFIG_PPM_PAGE_SHIFT) ); 736 739 737 740 // release the lock 738 741 rwlock_rd_unlock( &vmm->vsegs_lock ); 739 742 740 return vseg; 741 } 743 if( vseg == NULL ) // vseg not found in local cluster => try to get it from ref 744 { 745 // get extended pointer on reference process 746 xptr_t ref_xp = process->ref_xp; 747 748 // get cluster and local pointer on reference process 749 cxy_t ref_cxy = GET_CXY( ref_xp ); 750 process_t * ref_ptr = (process_t *)GET_PTR( ref_xp ); 751 752 if( local_cxy == ref_cxy ) return -1; // local cluster is the reference 753 754 // get extended pointer on reference vseg 755 xptr_t vseg_xp; 756 error_t error; 757 rpc_vmm_get_ref_vseg_client( ref_cxy , ref_ptr , bad_vaddr , &vseg_xp , &error ); 758 759 if( error ) return -1; // vseg not found => illegal user vaddr 760 761 // allocate a vseg in local cluster 762 vseg = vseg_alloc(); 763 764 if( vseg == NULL ) panic("no memory for vseg copy in cluster %x", local_cxy ); 765 766 // initialise local vseg from reference 767 vseg_init_from_ref( vseg , vseg_xp ); 768 769 // register local vseg in local VMM 770 error = vseg_attach( &process->vmm , vseg ); 771 772 if( error ) panic("no memory for vseg registration in cluster %x", local_cxy ); 773 } 774 775 // success 776 *found_vseg = vseg; 777 return O; 778 779 } // end vmm_get_vseg() 742 780 743 781 //////////////////////////////////////// … … 1002 1040 } 1003 1041 1004 // check page allocation error 1005 if( error ) 1006 { 1007 printk("\n[ERROR] in %s : cannot allocate memory / process = %x / vpn = %x\n", 1008 __FUNCTION__ , process->pid , vpn ); 1009 return ENOMEM; 1010 } 1011 1012 return 0; 1042 return error; 1013 1043 1014 1044 } // end vmm_handle_page_fault() … … 1056 1086 } // end vmm_v2p_translate() 1057 1087 1058 /* deprecated 1059 1060 /////////////////////////////////////////////////////////////////// 1061 /////////////////////////////////////////////////////////////////// 1062 error_t vmm_inval_shared_page( vseg_t *vseg, vma_t vaddr, ppn_t ppn) 1063 { 1064 pmm_page_info_t current; 1065 error_t err; 1066 1067 error= pmm_get_page(&vseg->vmm->pmm, vaddr, ¤t); 1068 1069 if((err) || (current.ppn != ppn)) 1070 goto ended; 1071 1072 current.ppn = 0; 1073 current.attr = 0; 1074 current.cluster = NULL; 1075 1076 error= pmm_set_page(&vseg->vmm->pmm, vaddr, ¤t); 1077 1078 ended: 1079 return err; 1080 } 1081 1082 error_t vmm_update_shared_page( vseg_t *vseg, vma_t vaddr, ppn_t ppn) 1083 { 1084 pmm_page_info_t current; 1085 error_t err; 1086 1087 error= pmm_get_page(&vseg->vmm->pmm, vaddr, ¤t); 1088 1089 if((err) || (current.attr != 0)) 1090 goto ended; 1091 1092 current.ppn = ppn; 1093 current.attr = vseg->vm_pgprot; 1094 current.cluster = NULL; // this function is called after invalidate one 1095 1096 error= pmm_set_page(&vseg->vmm->pmm, vaddr , ¤t); 1097 1098 ended: 1099 return err; 1100 } 1101 1102 // Hypothesis: the vseg is shared-anon, mapper list is rdlocked, page is locked 1103 error_t vmm_migrate_shared_page_seq( vseg_t *vseg, struct page_s *page, struct page_s **new) 1104 { 1105 register vseg_t *reg; 1106 register struct process_s *process; 1107 register struct process_s *this_process; 1108 struct page_s *new_pg; 1109 struct list_entry *iter; 1110 kmem_req_t req; 1111 vma_t vaddr; 1112 ppn_t ppn; 1113 error_t err; 1114 1115 vaddr = (page->index << PMM_PAGE_SHIFT) + vseg->vm_start + vseg->vm_offset; 1116 ppn = ppm_page2ppn(page); 1117 this_process = (new == NULL) ? NULL : current_process; 1118 iter = &vseg->vm_shared_list; 1119 error = ECANCELED; 1120 1121 // Invalidate All 1122 do 1123 { 1124 reg = list_element(iter, vseg_t, vm_shared_list); 1125 1126 process = vmm_get_process(reg->vmm); 1127 1128 if(process != this_process) 1129 { 1130 error= vmm_inval_shared_page(reg, vaddr, ppn); 1131 1132 if(err) goto fail_inval; 1133 } 1134 1135 assert(vseg->vm_mapper.m_home_cid == current_cid); 1136 iter = list_next(&vseg->vm_mapper.m_reg_root, iter); 1137 1138 }while(iter != NULL); 1139 1140 req.type = KMEM_PAGE; 1141 req.size = 0; 1142 req.excep_code = AF_USER; 1143 1144 new_pg = kmem_alloc(&req); 1145 *new = new_pg; 1146 1147 if(new_pg == NULL) 1148 { 1149 error= ENOMEM; 1150 goto fail_alloc; 1151 } 1152 1153 page_copy(new_pg, page); 1154 1155 page_lock(new_pg); 1156 1157 new_pg->mapper = page->mapper; 1158 new_pg->index = page->index; 1159 1160 // TODO: do the complet job regading dirty page 1161 if(PAGE_IS(page, PG_DIRTY)) 1162 PAGE_SET(new_pg, PG_DIRTY); 1163 1164 ppn = ppm_page2ppn(new_pg); 1165 iter = &vseg->vm_shared_list; 1166 1167 // Update All 1168 do 1169 { 1170 reg = list_element(iter, vseg_t, vm_shared_list); 1171 1172 process = vmm_get_process(reg->vmm); 1173 1174 if(process != this_process) 1175 (void) vmm_update_shared_page(reg, vaddr, ppn); 1176 1177 assert(vseg->vm_mapper.m_home_cid == current_cid); 1178 iter = list_next(&vseg->vm_mapper.m_reg_root, iter); 1179 1180 1181 }while(iter != NULL); 1182 1183 page_unlock(new_pg); 1184 1185 fail_alloc: 1186 fail_inval: 1187 return err; 1188 } 1189 1190 //TODO: revisit all manipulation of the page->refcount 1191 /////////////////////////////////////////////////////////////// 1192 static inline error_t vmm_do_migrate( vseg_t * vseg, 1193 pmm_page_info_t * pinfo, 1194 uint32_t vaddr ) 1195 { 1196 kmem_req_t req; 1197 pmm_page_info_t current; 1198 page_t * newpage; 1199 cluster_t * cluster; 1200 thread_t * this; 1201 error_t err; 1202 ppn_t ppn; 1203 1204 assert( pinfo->ppn != 0 ); 1205 1206 ppn = pinfo->ppn; 1207 this = current_thread; 1208 newpage = NULL; 1209 cluster = current_cluster; 1210 1211 current.attr = 0; 1212 current.ppn = 0; 1213 1214 error= pmm_lock_page(&vseg->vmm->pmm, vaddr, ¤t); 1215 1216 if(error|| (current.isAtomic == false) || 1217 (current.ppn != ppn) || !(current.attr & PMM_MIGRATE)) 1218 { 1219 #if CONFIG_SHOW_SPURIOUS_PGFAULT 1220 printk(INFO, "%s: pid %d, tid %d, cpu %d, nothing to do for vaddr %x\n", 1221 __FUNCTION__, 1222 this->process->pid, 1223 this->info.order, 1224 cpu_get_id(), 1225 vaddr); 1226 #endif 1227 this->info.spurious_pgfault_cntr ++; 1228 pmm_unlock_page(&vseg->vmm->pmm, vaddr, ¤t); 1229 pmm_tlb_flush_vaddr(vaddr, PMM_DATA); 1230 return 0; 1231 } 1232 1233 if(!ppn_is_local(ppn)) 1234 { 1235 req.type = KMEM_PAGE; 1236 req.size = 0; 1237 req.excep_code = AF_PGFAULT; 1238 1239 newpage = kmem_alloc(&req); 1240 1241 if(newpage) 1242 { 1243 newpage->mapper = NULL;//? 1244 ppn_copy(ppm_page2ppn(newpage), ppn); 1245 1246 if(current.attr & PMM_COW) 1247 { 1248 current.attr |= PMM_WRITE; 1249 current.attr &= ~(PMM_COW); 1250 } 1251 1252 current.ppn = ppm_page2ppn(newpage); 1253 } 1254 } 1255 1256 current.attr |= PMM_PRESENT; 1257 current.attr &= ~(PMM_MIGRATE); 1258 current.attr &= ~(PMM_LOCKED); 1259 current.cluster = NULL; 1260 1261 //also unlock the table entry 1262 error= pmm_set_page(&vseg->vmm->pmm, vaddr, ¤t); 1263 1264 if(err) 1265 { 1266 // TODO: we should differ the kmem_free call 1267 //page_unlock(page); 1268 (void)pmm_unlock_page(&vseg->vmm->pmm, vaddr, ¤t); 1269 req.ptr = newpage; 1270 kmem_free(&req); 1271 return err; 1272 } 1273 1274 1275 if(newpage) 1276 { 1277 ppn_refcount_down(ppn); 1278 current_thread->info.remote_pages_cntr ++; 1279 #if CONFIG_SHOW_REMOTE_PGALLOC 1280 printk(INFO, "%s: pid %d, tid %x, cpu %d, cid %d: got new remote page from cluster %d (vaddr %x)\n", 1281 __FUNCTION__, 1282 current_process->pid, 1283 current_thread, 1284 cpu_get_id(), 1285 cluster->id, 1286 newpage->cid, 1287 vaddr); 1288 #endif 1289 } 1290 1291 #if CONFIG_SHOW_VMMMGRT_MSG 1292 printk(INFO, "%s: pid %d, tid %d, cpu %d: Asked to migrate page (vaddr %x) from cluster %d to cluster %d, error%d\n", 1293 __FUNCTION__, 1294 current_process->pid, 1295 current_thread->info.order, 1296 cpu_get_id(), 1297 vaddr, 1298 ppn_ppn2cid(ppn), 1299 cluster->id, 1300 err); 1301 #endif 1302 1303 return err; 1304 } 1305 1306 error_t vmm_do_cow( vseg_t *vseg, pmm_page_info_t *pinfo, uint32_t vaddr) 1307 { 1308 register struct page_s *newpage; 1309 register struct page_s *page; 1310 register struct thread_s *this; 1311 register error_t err; 1312 register uint32_t count; 1313 register bool_t isCountDown; 1314 pmm_page_info_t old; 1315 pmm_page_info_t new; 1316 kmem_req_t req; 1317 1318 this = current_thread; 1319 old.attr = 0; 1320 newpage = NULL; 1321 isCountDown = true; 1322 1323 vmm_dmsg(2,"%s: pid %d, tid %d, cpu %d, vaddr %x\n", 1324 __FUNCTION__, 1325 this->process->pid, 1326 this->info.order, 1327 cpu_get_id(), 1328 vaddr); 1329 1330 1331 error= pmm_lock_page(&vseg->vmm->pmm, vaddr, &old); 1332 1333 //TODO: check this condition 1334 if(error|| (old.isAtomic == false) || !(old.attr & PMM_COW)) 1335 { 1336 #if CONFIG_SHOW_SPURIOUS_PGFAULT 1337 printk(INFO, "%s: pid %d, tid %d, cpu %d, nothing to do for vaddr %x\n", 1338 __FUNCTION__, 1339 this->process->pid, 1340 this->info.order, 1341 cpu_get_id(), 1342 vaddr); 1343 #endif 1344 this->info.spurious_pgfault_cntr ++; 1345 pmm_tlb_flush_vaddr(vaddr, PMM_DATA); 1346 pmm_unlock_page(&vseg->vmm->pmm, vaddr, &old); 1347 return err; 1348 //goto VMM_COW_END; 1349 } 1350 1351 //if the ppn is local and the others (processus with wich we share the page) 1352 //has done cow, then use the old.ppn directly 1353 if(ppn_is_local(old.ppn)) 1354 { 1355 page = ppm_ppn2page(¤t_cluster->ppm, old.ppn); 1356 1357 if(page->mapper == NULL) 1358 { 1359 count = page_refcount_get(page); 1360 if(count == 1) 1361 { 1362 newpage = page;//don't copy the page. use it directly. 1363 isCountDown = false; 1364 vmm_dmsg(2, "%s: pid %d, tid %d, cpu %d, reuse same page for vaddr %x, pg_addr %x\n", 1365 __FUNCTION__, 1366 this->process->pid, 1367 this->info.order, 1368 cpu_get_id(), 1369 vaddr, 1370 ppm_page2addr(page)); 1371 } 1372 } 1373 //else: we need to do the cow even if it's local! 1374 1375 } 1376 1377 //else: alocate newpage and copy the data from the remote node 1378 //also defcount down the ppn 1379 if(newpage == NULL) 1380 { 1381 req.type = KMEM_PAGE; 1382 req.size = 0; 1383 req.excep_code = AF_PGFAULT; 1384 1385 if((newpage = kmem_alloc(&req)) == NULL) 1386 { 1387 (void)pmm_unlock_page(&vseg->vmm->pmm, vaddr, &old); 1388 return ENOMEM; 1389 } 1390 1391 newpage->mapper = NULL; 1392 1393 ppn_copy(ppm_page2ppn(newpage), old.ppn); 1394 assert(isCountDown); 1395 1396 vmm_dmsg(2, 1397 "%s: pid %d, tid %d, cpu %d, newpage for vaddr %x, pg_addr %x\n", 1398 __FUNCTION__, 1399 this->process->pid, 1400 this->info.order, 1401 cpu_get_id(), 1402 vaddr, 1403 ppm_page2addr(newpage)); 1404 1405 if(newpage->cid != current_cid) 1406 this->info.remote_pages_cntr ++; 1407 } 1408 1409 new.attr = vseg->vm_pgprot | PMM_WRITE; 1410 new.attr &= ~(PMM_COW | PMM_MIGRATE); 1411 new.ppn = ppm_page2ppn(newpage); 1412 new.cluster = NULL; 1413 1414 //this also unlock the table entry (if no error) 1415 error= pmm_set_page(&vseg->vmm->pmm, vaddr, &new); 1416 1417 if(err) 1418 { 1419 (void)pmm_unlock_page(&vseg->vmm->pmm, vaddr, &old); 1420 req.ptr = newpage; 1421 kmem_free(&req); 1422 vmm_dmsg(3, "%s: ended [ error%d ]\n", __FUNCTION__, err); 1423 return err; 1424 } 1425 1426 if(isCountDown) ppn_refcount_down(old.ppn); 1427 1428 vmm_dmsg(2, "%s, pid %d, tid %d, cpu %d, COW ended [vaddr %x]\n", 1429 __FUNCTION__, 1430 this->process->pid, 1431 this->info.order, 1432 cpu_get_id(), 1433 vaddr); 1434 1435 return 0; 1436 } 1437 1438 1439 //refcount is taken on the file at mmap 1440 static inline error_t vmm_do_mapped( vseg_t *vseg, uint32_t vaddr, uint32_t excep_code) 1441 { 1442 ppn_t ppn; 1443 error_t err; 1444 uint32_t index; 1445 bool_t isDone; 1446 pmm_page_info_t info; 1447 pmm_page_info_t current; 1448 struct thread_s *this; 1449 1450 this = current_thread; 1451 1452 current.attr = 1; 1453 current.ppn = 1; 1454 isDone = false; 1455 1456 error= pmm_lock_page(&vseg->vmm->pmm, vaddr, ¤t); 1457 1458 if(err) return err; 1459 1460 if((current.isAtomic == false) || (current.attr != 0)) 1461 { 1462 #if CONFIG_SHOW_SPURIOUS_PGFAULT 1463 printk(INFO, "%s: pid %d, tid %d, cpu %d, nothing to do for vaddr %x\n", 1464 __FUNCTION__, 1465 this->process->pid, 1466 this->info.order, 1467 cpu_get_id(), 1468 vaddr); 1469 #endif 1470 this->info.spurious_pgfault_cntr ++; 1471 pmm_tlb_flush_vaddr(vaddr, PMM_DATA); 1472 return 0; 1473 } 1474 1475 index = ((vaddr - vseg->vm_start) + vseg->vm_offset) >> PMM_PAGE_SHIFT; 1476 1477 //also hold a refcount! 1478 ppn = mapper_get_ppn(&vseg->vm_mapper, 1479 index, 1480 MAPPER_SYNC_OP); 1481 1482 if(!ppn) 1483 { 1484 error= pmm_unlock_page(&vseg->vmm->pmm, vaddr, ¤t); 1485 assert(!err); //FIXME: liberate the ppn ... 1486 return (VFS_FILE_IS_NULL(vseg->vm_file)) ? EIO : ENOMEM; 1487 } 1488 1489 info.attr = vseg->vm_pgprot; 1490 info.ppn = ppn; 1491 info.cluster = NULL; 1492 1493 //also unlock the page 1494 error= pmm_set_page(&vseg->vmm->pmm, vaddr, &info); 1495 1496 assert(!err);//FIXME: liberate the ppn and unlock the table entry ... 1497 //error= pmm_unlock_page(&vseg->vmm->pmm, vaddr, ¤t); 1498 1499 return err; 1500 } 1501 1502 ///////////////////////////////////////////////////// 1503 static inline error_t vmm_do_aod( vseg_t *vseg, uint32_t vaddr) 1504 { 1505 register error_t err; 1506 register struct page_s *page; 1507 register struct cluster_s *cluster; 1508 struct thread_s *this; 1509 pmm_page_info_t old; 1510 pmm_page_info_t new; 1511 kmem_req_t req; 1512 1513 page = NULL; 1514 old.attr = 0; 1515 this = current_thread; 1516 1517 error= pmm_lock_page(&vseg->vmm->pmm, vaddr, &old); 1518 1519 if(err) return err; 1520 1521 if(old.isAtomic == false) 1522 { 1523 this->info.spurious_pgfault_cntr ++; 1524 pmm_tlb_flush_vaddr(vaddr, PMM_DATA); 1525 return 0; 1526 } 1527 1528 req.type = KMEM_PAGE; 1529 req.size = 0; 1530 req.excep_code = AF_PGFAULT | AF_ZERO; 1531 1532 if((page = kmem_alloc(&req)) == NULL) 1533 { 1534 (void)pmm_unlock_page(&vseg->vmm->pmm, vaddr, &old); 1535 return ENOMEM; 1536 } 1537 1538 page->mapper = NULL; 1539 1540 new.attr = vseg->vm_pgprot; 1541 new.ppn = ppm_page2ppn(page); 1542 new.cluster = NULL; 1543 1544 error= pmm_set_page(&vseg->vmm->pmm, vaddr, &new); 1545 1546 if(err) goto fail_set_pg; 1547 1548 cluster = current_cluster; 1549 1550 if(page->cid != cluster->id) 1551 this->info.remote_pages_cntr ++; 1552 1553 return 0; 1554 1555 fail_set_pg: 1556 (void)pmm_unlock_page(&vseg->vmm->pmm, vaddr, &old); 1557 req.ptr = page; 1558 kmem_free(&req); 1559 1560 vmm_dmsg(3, "%s: ended [ error%d ]\n", __FUNCTION__, err); 1561 return err; 1562 } 1563 1564 VSEGION_PAGE_FAULT(vmm_default_pagefault) 1565 { 1566 register struct thread_s *this; 1567 register error_t err; 1568 pmm_page_info_t info; 1569 1570 if((error= pmm_get_page(&vseg->vmm->pmm, vaddr, &info))) 1571 return err; 1572 1573 if((info.attr != 0) && (info.ppn != 0)) 1574 { 1575 if((info.attr & PMM_COW) && pmm_except_isWrite(excep_code)) 1576 { 1577 error= vmm_do_cow(vseg, &info, vaddr); 1578 return err; 1579 } 1580 1581 if(info.attr & PMM_MIGRATE) 1582 return vmm_do_migrate(vseg, &info, vaddr); 1583 1584 if(info.attr & PMM_PRESENT) 1585 { 1586 this = current_thread; 1587 1588 #if CONFIG_SHOW_SPURIOUS_PGFAULT 1589 printk(WARNING, "WARNING: %s: pid %d, tid %d, cpu %d, excep_code %x but vaddr is valid %x, attr %x, ppn %x\n", 1590 __FUNCTION__, 1591 this->process->pid, 1592 this->info.order, 1593 cpu_get_id(), 1594 excep_code, 1595 vaddr, 1596 info.attr, 1597 info.ppn); 1598 #endif 1599 1600 current_thread->info.spurious_pgfault_cntr ++; 1601 pmm_tlb_flush_vaddr(vaddr, PMM_UNKNOWN); 1602 return 0; 1603 } 1604 1605 current_thread->info.spurious_pgfault_cntr ++; 1606 pmm_tlb_flush_vaddr(vaddr, PMM_UNKNOWN); 1607 return 0; 1608 #if 0 1609 #if CONFIG_SHOW_VMM_ERROR_MSG 1610 printk(ERROR, 1611 "ERROR: %s: pid %d, cpu %d, Unexpected page attributes configuration for vaddr %x, found: ppn %x, attr %x\n", 1612 __FUNCTION__, 1613 current_process->pid, 1614 cpu_get_id(), 1615 vaddr, 1616 info.ppn, 1617 info.attr); 1618 #endif 1619 1620 return EPERM; 1621 #endif 1622 } 1623 1624 if(!MAPPER_IS_NULL(vseg->vm_mapper)) 1625 return vmm_do_mapped(vseg, vaddr, excep_code); 1626 1627 return vmm_do_aod(vseg, vaddr); 1628 } 1629 */ 1630 1088 -
trunk/kernel/mm/vmm.h
r313 r388 262 262 263 263 /********************************************************************************************* 264 * This function searches if a given virtual address is contained in a vseg registered in 265 * the local process VMM and returns the vseg pointer if success. 266 ********************************************************************************************* 267 * @ process : pointer on process descriptor 268 * @ vaddr : virtual address 269 * @ returns the pointer on vseg if success / returns NULL if failure. 264 * This function checks that a given virtual address is contained in a registered vseg. 265 * - if the vseg is registered in local process VMM, it returns the local vseg pointer. 266 * - if the vseg is missing in local VMM, it uses a RPC to get it from the reference cluster, 267 * register it in local VMM and returns the local vseg pointer, if success. 268 * - if the vseg is missing in reference VMM, it returns an user error. 269 * It creates a kernel panic if there is not enough memory to create a new vseg descriptor 270 * in the cluster containing the calling thread. 271 ********************************************************************************************* 272 * @ process : [in] pointer on process descriptor 273 * @ vaddr : [in] virtual address 274 * @ vseg : [out] pointer on found vseg 275 * @ returns 0 if success / returns -1 if user error. 270 276 *********************************************************************************************/ 271 vseg_t * vmm_get_vseg( struct process_s* process,272 intptr_t vaddr );273 277 error_t vmm_get_vseg( struct process_s * process, 278 intptr_t vaddr, 279 vseg_t * vseg ); 274 280 275 281 /********************************************************************************************* … … 296 302 * The vseg containing the searched VPN should be registered in the reference VMM. 297 303 * If the PTE in the reference page table is unmapped, this function allocates the missing 298 * physical page from the target cluster defined by the vseg type, initi lize it,304 * physical page from the target cluster defined by the vseg type, initialize it, 299 305 * and update the reference page table. It calls the RPC_PMEM_GET_PAGES to get and 300 306 * initialize the missing physical page, if the target cluster is not the reference cluster. … … 347 353 348 354 349 /*********************************************************************************************350 ********************************************************************************************/351 int sys_madvise( void * start,352 uint32_t length,353 uint32_t advice );354 355 /*********************************************************************************************356 ********************************************************************************************/357 int sys_sbrk( uint32_t current_heap_ptr,358 uint32_t size );359 360 /*********************************************************************************************361 ********************************************************************************************/362 error_t vmm_sbrk( vmm_t * vmm,363 uint32_t current,364 uint32_t size );365 366 /*********************************************************************************************367 ********************************************************************************************/368 error_t vmm_madvise_migrate( vmm_t * vmm,369 uint32_t start,370 uint32_t len );371 372 /*********************************************************************************************373 ********************************************************************************************/374 error_t vmm_madvise_willneed( vmm_t * vmm,375 uint32_t start,376 uint32_t len );377 378 /*********************************************************************************************379 ********************************************************************************************/380 error_t vmm_set_auto_migrate( vmm_t * vmm,381 uint32_t start,382 uint32_t flags );383 384 /*********************************************************************************************385 * Hypothesis: the region is shared-anon, mapper list is rdlocked, page is locked386 ********************************************************************************************/387 error_t vmm_broadcast_inval( vseg_t * region,388 page_t * page,389 page_t ** new );390 391 /*********************************************************************************************392 * Hypothesis: the region is shared-anon, mapper list is rdlocked, page is locked393 ********************************************************************************************/394 error_t vmm_migrate_shared_page_seq( vseg_t * region,395 page_t * page,396 page_t ** new );397 355 398 356 #endif /* _VMM_H_ */ -
trunk/kernel/mm/vseg.h
r315 r388 149 149 * @ vmm : pointer on the VMM 150 150 * @ vseg : pointer on the vseg descriptor 151 * @ returns 0 if success / returns ENOMEM if failure.151 * @ returns 0 if success / returns ENOMEM if registration in GRDXT unpossible. 152 152 *********************************************************************************************/ 153 153 error_t vseg_attach( struct vmm_s * vmm,
Note: See TracChangeset
for help on using the changeset viewer.