Changeset 640 for trunk/kernel/mm
- Timestamp:
- Oct 1, 2019, 1:19:00 PM (5 years ago)
- Location:
- trunk/kernel/mm
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/mm/vmm.c
r635 r640 32 32 #include <printk.h> 33 33 #include <memcpy.h> 34 #include <remote_rwlock.h>35 34 #include <remote_queuelock.h> 36 35 #include <list.h> … … 313 312 314 313 // initialize the lock protecting the VSL 315 remote_ rwlock_init( XPTR( local_cxy , &vmm->vsl_lock ) , LOCK_VMM_VSL );314 remote_queuelock_init( XPTR( local_cxy , &vmm->vsl_lock ) , LOCK_VMM_VSL ); 316 315 317 316 … … 425 424 426 425 // take the VSL lock 427 remote_ rwlock_wr_acquire( lock_xp );426 remote_queuelock_acquire( lock_xp ); 428 427 429 428 // scan the VSL to delete all non kernel vsegs … … 474 473 475 474 // release the VSL lock 476 remote_ rwlock_wr_release( lock_xp );475 remote_queuelock_release( lock_xp ); 477 476 478 477 // FIXME il faut gérer les process copies... … … 491 490 492 491 } // end vmm_user_reset() 492 493 ///////////////////////////////////////////////// 494 void vmm_global_delete_vseg( process_t * process, 495 intptr_t base ) 496 { 497 pid_t pid; 498 cxy_t owner_cxy; 499 lpid_t owner_lpid; 500 501 xlist_entry_t * process_root_ptr; 502 xptr_t process_root_xp; 503 xptr_t process_iter_xp; 504 505 xptr_t remote_process_xp; 506 cxy_t remote_process_cxy; 507 process_t * remote_process_ptr; 508 509 xptr_t vsl_root_xp; 510 xptr_t vsl_lock_xp; 511 xptr_t vsl_iter_xp; 512 513 #if DEBUG_VMM_GLOBAL_DELETE_VSEG 514 uint32_t cycle = (uint32_t)hal_get_cycles(); 515 thread_t * this = CURRENT_THREAD; 516 #endif 517 518 #if (DEBUG_VMM_GLOBAL_DELETE_VSEG & 1) 519 if( DEBUG_VMM_GLOBAL_DELETE_VSEG < cycle ) 520 printk("\n[%s] thread[%x,%x] : process %x / base %x / cycle %d\n", 521 __FUNCTION__, this->process->pid, this->trdid, process->pid, base, cycle ); 522 #endif 523 524 // get owner process cluster and local index 525 pid = process->pid; 526 owner_cxy = CXY_FROM_PID( pid ); 527 owner_lpid = LPID_FROM_PID( pid ); 528 529 // get extended pointer on root of process copies xlist in owner cluster 530 process_root_ptr = &LOCAL_CLUSTER->pmgr.copies_root[owner_lpid]; 531 process_root_xp = XPTR( owner_cxy , process_root_ptr ); 532 533 // loop on process copies 534 XLIST_FOREACH( process_root_xp , process_iter_xp ) 535 { 536 // get cluster and local pointer on remote process 537 remote_process_xp = XLIST_ELEMENT( process_iter_xp , process_t , copies_list ); 538 remote_process_ptr = GET_PTR( remote_process_xp ); 539 remote_process_cxy = GET_CXY( remote_process_xp ); 540 541 // build extended pointers on remote VSL root and lock 542 vsl_root_xp = XPTR( remote_process_cxy , &remote_process_ptr->vmm.vsegs_root ); 543 vsl_lock_xp = XPTR( remote_process_cxy , &remote_process_ptr->vmm.vsl_lock ); 544 545 // get lock on remote VSL 546 remote_queuelock_acquire( vsl_lock_xp ); 547 548 // loop on vsegs in remote process VSL 549 XLIST_FOREACH( vsl_root_xp , vsl_iter_xp ) 550 { 551 // get pointers on current vseg 552 xptr_t vseg_xp = XLIST_ELEMENT( vsl_iter_xp , vseg_t , xlist ); 553 vseg_t * vseg_ptr = GET_PTR( vseg_xp ); 554 555 // get current vseg base address 556 intptr_t vseg_base = (intptr_t)hal_remote_lpt( XPTR( remote_process_cxy, 557 &vseg_ptr->min ) ); 558 559 if( vseg_base == base ) // found searched vseg 560 { 561 if( remote_process_cxy == local_cxy ) 562 { 563 vmm_remove_vseg( process, 564 vseg_ptr ); 565 } 566 else 567 { 568 rpc_vmm_remove_vseg_client( remote_process_cxy, 569 remote_process_ptr, 570 vseg_ptr ); 571 } 572 573 #if (DEBUG_VMM_GLOBAL_DELETE_VSEG & 1) 574 if( DEBUG_VMM_GLOBAL_DELETE_VSEG < cycle ) 575 printk("\n[%s] thread[%x,%x] deleted vseg %x for process %x in cluster %x\n", 576 __FUNCTION__, this->process->pid, this->trdid, base, process->pid, remote_process_cxy ); 577 #endif 578 579 } 580 } // end of loop on vsegs 581 582 #if (DEBUG_VMM_GLOBAL_DELETE_VSEG & 1) 583 if( DEBUG_VMM_GLOBAL_DELETE_VSEG < cycle ) 584 hal_vmm_display( remote_process_xp , false ); 585 #endif 586 587 // release lock on remote VSL 588 remote_queuelock_release( vsl_lock_xp ); 589 590 } // end of loop on process copies 591 592 #if DEBUG_VMM_GLOBAL_DELETE_VSEG 593 cycle = (uint32_t)hal_get_cycles(); 594 if( DEBUG_VMM_GLOBAL_DELETE_VSEG < cycle ) 595 printk("\n[%s] thread[%x,%x] exit for process %x / base %x / cycle %d\n", 596 __FUNCTION__, this->process->pid, this->trdid, process->pid , base, cycle ); 597 #endif 598 599 } // end vmm_global_delete_vseg() 600 601 //////////////////////////////////////////////// 602 void vmm_global_resize_vseg( process_t * process, 603 intptr_t base, 604 intptr_t new_base, 605 intptr_t new_size ) 606 { 607 pid_t pid; 608 cxy_t owner_cxy; 609 lpid_t owner_lpid; 610 611 xlist_entry_t * process_root_ptr; 612 xptr_t process_root_xp; 613 xptr_t process_iter_xp; 614 615 xptr_t remote_process_xp; 616 cxy_t remote_process_cxy; 617 process_t * remote_process_ptr; 618 619 xptr_t vsl_root_xp; 620 xptr_t vsl_lock_xp; 621 xptr_t vsl_iter_xp; 622 623 #if DEBUG_VMM_GLOBAL_RESIZE_VSEG 624 uint32_t cycle = (uint32_t)hal_get_cycles(); 625 thread_t * this = CURRENT_THREAD; 626 #endif 627 628 #if (DEBUG_VMM_GLOBAL_RESIZE_VSEG & 1) 629 if( DEBUG_VMM_GLOBAL_RESIZE_VSEG < cycle ) 630 printk("\n[%s] thread[%x,%x] : process %x / base %x / new_base %x / new_size %x / cycle %d\n", 631 __FUNCTION__, this->process->pid, this->trdid, process->pid, base, new_base, new_size, cycle ); 632 #endif 633 634 // get owner process cluster and local index 635 pid = process->pid; 636 owner_cxy = CXY_FROM_PID( pid ); 637 owner_lpid = LPID_FROM_PID( pid ); 638 639 // get extended pointer on root of process copies xlist in owner cluster 640 process_root_ptr = &LOCAL_CLUSTER->pmgr.copies_root[owner_lpid]; 641 process_root_xp = XPTR( owner_cxy , process_root_ptr ); 642 643 // loop on process copies 644 XLIST_FOREACH( process_root_xp , process_iter_xp ) 645 { 646 // get cluster and local pointer on remote process 647 remote_process_xp = XLIST_ELEMENT( process_iter_xp , process_t , copies_list ); 648 remote_process_ptr = GET_PTR( remote_process_xp ); 649 remote_process_cxy = GET_CXY( remote_process_xp ); 650 651 // build extended pointers on remote VSL root and lock 652 vsl_root_xp = XPTR( remote_process_cxy , &remote_process_ptr->vmm.vsegs_root ); 653 vsl_lock_xp = XPTR( remote_process_cxy , &remote_process_ptr->vmm.vsl_lock ); 654 655 // get lock on remote VSL 656 remote_queuelock_acquire( vsl_lock_xp ); 657 658 // loop on vsegs in remote process VSL 659 XLIST_FOREACH( vsl_root_xp , vsl_iter_xp ) 660 { 661 // get pointers on current vseg 662 xptr_t vseg_xp = XLIST_ELEMENT( vsl_iter_xp , vseg_t , xlist ); 663 vseg_t * vseg_ptr = GET_PTR( vseg_xp ); 664 665 // get current vseg base address 666 intptr_t vseg_base = (intptr_t)hal_remote_lpt( XPTR( remote_process_cxy, 667 &vseg_ptr->min ) ); 668 669 if( vseg_base == base ) // found searched vseg 670 { 671 if( remote_process_cxy == local_cxy ) 672 { 673 vmm_resize_vseg( remote_process_ptr, 674 vseg_ptr, 675 new_base, 676 new_size ); 677 } 678 else 679 { 680 rpc_vmm_resize_vseg_client( remote_process_cxy, 681 remote_process_ptr, 682 vseg_ptr, 683 new_base, 684 new_size ); 685 } 686 687 #if (DEBUG_VMM_GLOBAL_RESIZE_VSEG & 1) 688 if( DEBUG_VMM_GLOBAL_RESIZE_VSEG < cycle ) 689 printk("\n[%s] thread[%x,%x] resized vseg %x for process %x in cluster %x\n", 690 __FUNCTION__, this->process->pid, this->trdid, base, process->pid, remote_process_cxy ); 691 #endif 692 693 } 694 } // end of loop on vsegs 695 696 #if (DEBUG_VMM_GLOBAL_RESIZE_VSEG & 1) 697 if( DEBUG_VMM_GLOBAL_RESIZE_VSEG < cycle ) 698 hal_vmm_display( remote_process_xp , false ); 699 #endif 700 701 // release lock on remote VSL 702 remote_queuelock_release( vsl_lock_xp ); 703 } // end of loop on process copies 704 705 #if DEBUG_VMM_GLOBAL_RESIZE_VSEG 706 cycle = (uint32_t)hal_get_cycles(); 707 if( DEBUG_VMM_GLOBAL_RESIZE_VSEG < cycle ) 708 printk("\n[%s] thread[%x,%x] exit for process %x / base %x / cycle %d\n", 709 __FUNCTION__, this->process->pid, this->trdid, process->pid , base, cycle ); 710 #endif 711 712 } // end vmm_global_resize_vseg() 493 713 494 714 //////////////////////////////////////////////// … … 498 718 ppn_t ppn ) 499 719 { 720 pid_t pid; 721 cxy_t owner_cxy; 722 lpid_t owner_lpid; 723 500 724 xlist_entry_t * process_root_ptr; 501 725 xptr_t process_root_xp; … … 507 731 xptr_t remote_gpt_xp; 508 732 509 pid_t pid; 510 cxy_t owner_cxy; 511 lpid_t owner_lpid; 512 513 #if DEBUG_VMM_UPDATE_PTE 733 #if DEBUG_VMM_GLOBAL_UPDATE_PTE 514 734 uint32_t cycle = (uint32_t)hal_get_cycles(); 515 735 thread_t * this = CURRENT_THREAD; 516 if( DEBUG_VMM_UPDATE_PTE < cycle ) 736 #endif 737 738 739 #if (DEBUG_VMM_GLOBAL_UPDATE_PTE & 1) 740 if( DEBUG_VMM_GLOBAL_UPDATE_PTE < cycle ) 517 741 printk("\n[%s] thread[%x,%x] enter for process %x / vpn %x / attr %x / ppn %x / ycle %d\n", 518 742 __FUNCTION__, this->process->pid, this->trdid, process->pid, vpn, attr, ppn, cycle ); 519 743 #endif 520 744 521 // get extended pointer on root of process copies xlist in owner cluster745 // get owner process cluster and local index 522 746 pid = process->pid; 523 747 owner_cxy = CXY_FROM_PID( pid ); 524 748 owner_lpid = LPID_FROM_PID( pid ); 749 750 // get extended pointer on root of process copies xlist in owner cluster 525 751 process_root_ptr = &LOCAL_CLUSTER->pmgr.copies_root[owner_lpid]; 526 752 process_root_xp = XPTR( owner_cxy , process_root_ptr ); 527 753 528 // check local cluster is owner cluster 529 assert( (owner_cxy == local_cxy) , "must be called in owner cluster\n"); 530 531 // loop on destination process copies 754 // loop on process copies 532 755 XLIST_FOREACH( process_root_xp , process_iter_xp ) 533 756 { … … 537 760 remote_process_cxy = GET_CXY( remote_process_xp ); 538 761 539 #if (DEBUG_VMM_ UPDATE_PTE & 1)540 if( DEBUG_VMM_ UPDATE_PTE < cycle )762 #if (DEBUG_VMM_GLOBAL_UPDATE_PTE & 1) 763 if( DEBUG_VMM_GLOBAL_UPDATE_PTE < cycle ) 541 764 printk("\n[%s] thread[%x,%x] handling vpn %x for process %x in cluster %x\n", 542 765 __FUNCTION__, this->process->pid, this->trdid, vpn, process->pid, remote_process_cxy ); … … 550 773 } 551 774 552 #if DEBUG_VMM_ UPDATE_PTE775 #if DEBUG_VMM_GLOBAL_UPDATE_PTE 553 776 cycle = (uint32_t)hal_get_cycles(); 554 if( DEBUG_VMM_ UPDATE_PTE < cycle )777 if( DEBUG_VMM_GLOBAL_UPDATE_PTE < cycle ) 555 778 printk("\n[%s] thread[%x,%x] exit for process %x / vpn %x / cycle %d\n", 556 779 __FUNCTION__, this->process->pid, this->trdid, process->pid , vpn , cycle ); 557 780 #endif 558 781 559 #if (DEBUG_VMM_ UPDATE_PTE & 1)782 #if (DEBUG_VMM_GLOBAL_UPDATE_PTE & 1) 560 783 hal_vmm_display( process , true ); 561 784 #endif … … 772 995 parent_lock_xp = XPTR( parent_cxy , &parent_vmm->vsl_lock ); 773 996 774 // take the lock protecting the parent VSL in read mode775 remote_ rwlock_rd_acquire( parent_lock_xp );997 // take the lock protecting the parent VSL 998 remote_queuelock_acquire( parent_lock_xp ); 776 999 777 1000 // loop on parent VSL xlist … … 809 1032 vseg_init_from_ref( child_vseg , parent_vseg_xp ); 810 1033 811 // build extended pointer on VSL lock812 xptr_t lock_xp = XPTR( local_cxy , &child_vmm->vsl_lock );1034 // build extended pointer on child VSL lock 1035 xptr_t child_lock_xp = XPTR( local_cxy , &child_vmm->vsl_lock ); 813 1036 814 // take the VSL lock in write mode815 remote_ rwlock_wr_acquire(lock_xp );1037 // take the child VSL lock 1038 remote_queuelock_acquire( child_lock_xp ); 816 1039 817 1040 // register child vseg in child VSL 818 1041 vmm_attach_vseg_to_vsl( child_vmm , child_vseg ); 819 1042 820 // release the VSL lock821 remote_ rwlock_wr_release(lock_xp );1043 // release the child VSL lock 1044 remote_queuelock_release( child_lock_xp ); 822 1045 823 1046 #if DEBUG_VMM_FORK_COPY … … 866 1089 867 1090 // release the parent VSL lock in read mode 868 remote_ rwlock_rd_release( parent_lock_xp );1091 remote_queuelock_release( parent_lock_xp ); 869 1092 870 1093 // initialize the child VMM STACK allocator … … 939 1162 940 1163 // take the VSL lock 941 remote_ rwlock_wr_acquire( vsl_lock_xp );1164 remote_queuelock_acquire( vsl_lock_xp ); 942 1165 943 1166 // scan the VSL to delete all registered vsegs … … 968 1191 969 1192 // release the VSL lock 970 remote_ rwlock_wr_release( vsl_lock_xp );1193 remote_queuelock_release( vsl_lock_xp ); 971 1194 972 1195 // remove all registered MMAP vsegs … … 1042 1265 1043 1266 } // end vmm_check_conflict() 1044 1045 1046 1267 1047 1268 //////////////////////////////////////////////// … … 1060 1281 error_t error; 1061 1282 1283 #if DEBUG_VMM_CREATE_VSEG 1284 thread_t * this = CURRENT_THREAD; 1285 uint32_t cycle; 1286 #endif 1287 1062 1288 #if (DEBUG_VMM_CREATE_VSEG & 1) 1063 thread_t * this = CURRENT_THREAD; 1064 uint32_t cycle = (uint32_t)hal_get_cycles(); 1289 cycle = (uint32_t)hal_get_cycles(); 1065 1290 if( DEBUG_VMM_CREATE_VSEG < cycle ) 1066 1291 printk("\n[%s] thread[%x,%x] enter / process %x / %s / base %x / cxy %x / cycle %d\n", … … 1180 1405 1181 1406 // take the VSL lock in write mode 1182 remote_ rwlock_wr_acquire( lock_xp );1407 remote_queuelock_acquire( lock_xp ); 1183 1408 1184 1409 // attach vseg to VSL … … 1186 1411 1187 1412 // release the VSL lock 1188 remote_ rwlock_wr_release( lock_xp );1413 remote_queuelock_release( lock_xp ); 1189 1414 1190 1415 #if DEBUG_VMM_CREATE_VSEG 1191 1416 cycle = (uint32_t)hal_get_cycles(); 1192 if( DEBUG_VMM_CREATE_VSEG < cycle ) 1417 // if( DEBUG_VMM_CREATE_VSEG < cycle ) 1418 if( type == VSEG_TYPE_REMOTE ) 1193 1419 printk("\n[%s] thread[%x,%x] exit / process %x / %s / base %x / cxy %x / cycle %d\n", 1194 1420 __FUNCTION__, this->process->pid, this->trdid, … … 1200 1426 } // vmm_create_vseg() 1201 1427 1428 //////////////////////////////////////////////////////////////////////////////////////////// 1429 // This static function is called by the vmm_remove_vseg() and vmm_resize_vseg() functions. 1430 // Depending on the vseg <type>, it decrements the physical page refcount, and 1431 // conditionnally release to the relevant kmem the physical page identified by <ppn>. 1432 //////////////////////////////////////////////////////////////////////////////////////////// 1433 // @ process : local pointer on process. 1434 // @ vseg : local pointer on vseg. 1435 // @ ppn : released pysical page index. 1436 //////////////////////////////////////////////////////////////////////////////////////////// 1437 static void vmm_ppn_release( process_t * process, 1438 vseg_t * vseg, 1439 ppn_t ppn ) 1440 { 1441 bool_t do_release; 1442 1443 // get vseg type 1444 vseg_type_t type = vseg->type; 1445 1446 // compute is_ref 1447 bool_t is_ref = (GET_CXY( process->ref_xp ) == local_cxy); 1448 1449 // get pointers on physical page descriptor 1450 xptr_t page_xp = ppm_ppn2page( ppn ); 1451 cxy_t page_cxy = GET_CXY( page_xp ); 1452 page_t * page_ptr = GET_PTR( page_xp ); 1453 1454 // decrement page refcount 1455 xptr_t count_xp = XPTR( page_cxy , &page_ptr->refcount ); 1456 hal_remote_atomic_add( count_xp , -1 ); 1457 1458 // compute the do_release condition depending on vseg type 1459 if( (type == VSEG_TYPE_FILE) || 1460 (type == VSEG_TYPE_KCODE) || 1461 (type == VSEG_TYPE_KDATA) || 1462 (type == VSEG_TYPE_KDEV) ) 1463 { 1464 // no physical page release for FILE and KERNEL 1465 do_release = false; 1466 } 1467 else if( (type == VSEG_TYPE_CODE) || 1468 (type == VSEG_TYPE_STACK) ) 1469 { 1470 // always release physical page for private vsegs 1471 do_release = true; 1472 } 1473 else if( (type == VSEG_TYPE_ANON) || 1474 (type == VSEG_TYPE_REMOTE) ) 1475 { 1476 // release physical page if reference cluster 1477 do_release = is_ref; 1478 } 1479 else if( is_ref ) // vseg_type == DATA in reference cluster 1480 { 1481 // get extended pointers on forks and lock field in page descriptor 1482 xptr_t forks_xp = XPTR( page_cxy , &page_ptr->forks ); 1483 xptr_t lock_xp = XPTR( page_cxy , &page_ptr->lock ); 1484 1485 // take lock protecting "forks" counter 1486 remote_busylock_acquire( lock_xp ); 1487 1488 // get number of pending forks from page descriptor 1489 uint32_t forks = hal_remote_l32( forks_xp ); 1490 1491 // decrement pending forks counter if required 1492 if( forks ) hal_remote_atomic_add( forks_xp , -1 ); 1493 1494 // release lock protecting "forks" counter 1495 remote_busylock_release( lock_xp ); 1496 1497 // release physical page if forks == 0 1498 do_release = (forks == 0); 1499 } 1500 else // vseg_type == DATA not in reference cluster 1501 { 1502 // no physical page release if not in reference cluster 1503 do_release = false; 1504 } 1505 1506 // release physical page to relevant kmem when required 1507 if( do_release ) 1508 { 1509 ppm_remote_free_pages( page_cxy , page_ptr ); 1510 1511 #if DEBUG_VMM_PPN_RELEASE 1512 thread_t * this = CURRENT_THREAD; 1513 if( DEBUG_VMM_PPN_RELEASE < cycle ) 1514 printk("\n[%s] thread[%x,%x] released ppn %x to kmem\n", 1515 __FUNCTION__, this->process->pid, this->trdid, ppn ); 1516 #endif 1517 1518 } 1519 } // end vmm_ppn_release() 1202 1520 1203 1521 ////////////////////////////////////////// … … 1205 1523 vseg_t * vseg ) 1206 1524 { 1207 vmm_t * vmm; // local pointer on process VMM1208 xptr_t gpt_xp; // extended pointer on GPT1209 bool_t is_ref; // local process is reference process1210 1525 uint32_t vseg_type; // vseg type 1211 1526 vpn_t vpn; // VPN of current PTE … … 1214 1529 ppn_t ppn; // current PTE ppn value 1215 1530 uint32_t attr; // current PTE attributes 1216 xptr_t page_xp; // extended pointer on page descriptor1217 cxy_t page_cxy; // page descriptor cluster1218 page_t * page_ptr; // page descriptor pointer1219 xptr_t count_xp; // extended pointer on page refcount1220 1531 1221 1532 // check arguments … … 1223 1534 assert( (vseg != NULL), "vseg argument is NULL" ); 1224 1535 1225 // compute is_ref1226 is_ref = (GET_CXY( process->ref_xp ) == local_cxy);1227 1228 1536 // get pointers on local process VMM 1229 vmm = &process->vmm;1537 vmm_t * vmm = &process->vmm; 1230 1538 1231 1539 // build extended pointer on GPT 1232 gpt_xp = XPTR( local_cxy , &vmm->gpt );1540 xptr_t gpt_xp = XPTR( local_cxy , &vmm->gpt ); 1233 1541 1234 1542 // get relevant vseg infos … … 1240 1548 uint32_t cycle = (uint32_t)hal_get_cycles(); 1241 1549 thread_t * this = CURRENT_THREAD; 1550 #endif 1551 1552 #if (DEBUG_VMM_REMOVE_VSEG & 1 ) 1242 1553 if( DEBUG_VMM_REMOVE_VSEG < cycle ) 1243 1554 printk("\n[%s] thread[%x,%x] enter / process %x / %s / base %x / cycle %d\n", … … 1246 1557 #endif 1247 1558 1248 // loop on PTEs in GPT 1559 // loop on PTEs in GPT to unmap all mapped PTE 1249 1560 for( vpn = vpn_min ; vpn < vpn_max ; vpn++ ) 1250 1561 { … … 1257 1568 #if( DEBUG_VMM_REMOVE_VSEG & 1 ) 1258 1569 if( DEBUG_VMM_REMOVE_VSEG < cycle ) 1259 printk("- unmap vpn %x / ppn %x / %s" , vpn , ppn, vseg_type_str(vseg_type) ); 1570 printk("\n[%s] thread[%x,%x] unmap vpn %x / ppn %x / %s", 1571 __FUNCTION__, this->process->pid, this->trdid, vpn , ppn, vseg_type_str(vseg_type) ); 1260 1572 #endif 1261 1573 // unmap GPT entry in local GPT 1262 1574 hal_gpt_reset_pte( gpt_xp , vpn ); 1263 1575 1264 // get pointers on physical page descriptor 1265 page_xp = ppm_ppn2page( ppn ); 1266 page_cxy = GET_CXY( page_xp ); 1267 page_ptr = GET_PTR( page_xp ); 1268 1269 // decrement page refcount 1270 count_xp = XPTR( page_cxy , &page_ptr->refcount ); 1271 hal_remote_atomic_add( count_xp , -1 ); 1272 1273 // compute the ppn_release condition depending on vseg type 1274 bool_t ppn_release; 1275 if( (vseg_type == VSEG_TYPE_FILE) || 1276 (vseg_type == VSEG_TYPE_KCODE) || 1277 (vseg_type == VSEG_TYPE_KDATA) || 1278 (vseg_type == VSEG_TYPE_KDEV) ) 1279 { 1280 // no physical page release for FILE and KERNEL 1281 ppn_release = false; 1282 } 1283 else if( (vseg_type == VSEG_TYPE_CODE) || 1284 (vseg_type == VSEG_TYPE_STACK) ) 1285 { 1286 // always release physical page for private vsegs 1287 ppn_release = true; 1288 } 1289 else if( (vseg_type == VSEG_TYPE_ANON) || 1290 (vseg_type == VSEG_TYPE_REMOTE) ) 1291 { 1292 // release physical page if reference cluster 1293 ppn_release = is_ref; 1294 } 1295 else if( is_ref ) // vseg_type == DATA in reference cluster 1296 { 1297 // get extended pointers on forks and lock field in page descriptor 1298 xptr_t forks_xp = XPTR( page_cxy , &page_ptr->forks ); 1299 xptr_t lock_xp = XPTR( page_cxy , &page_ptr->lock ); 1300 1301 // take lock protecting "forks" counter 1302 remote_busylock_acquire( lock_xp ); 1303 1304 // get number of pending forks from page descriptor 1305 uint32_t forks = hal_remote_l32( forks_xp ); 1306 1307 // decrement pending forks counter if required 1308 if( forks ) hal_remote_atomic_add( forks_xp , -1 ); 1309 1310 // release lock protecting "forks" counter 1311 remote_busylock_release( lock_xp ); 1312 1313 // release physical page if forks == 0 1314 ppn_release = (forks == 0); 1315 } 1316 else // vseg_type == DATA not in reference cluster 1317 { 1318 // no physical page release if not in reference cluster 1319 ppn_release = false; 1320 } 1321 1322 // release physical page to relevant kmem when required 1323 if( ppn_release ) ppm_remote_free_pages( page_cxy , page_ptr ); 1324 1325 #if( DEBUG_VMM_REMOVE_VSEG & 1 ) 1326 if( DEBUG_VMM_REMOVE_VSEG < cycle ) 1327 { 1328 if( ppn_release ) printk(" / released to kmem\n" ); 1329 else printk("\n"); 1330 } 1331 #endif 1576 // release physical page when required 1577 vmm_ppn_release( process , vseg , ppn ); 1332 1578 } 1333 1579 } … … 1368 1614 } // end vmm_remove_vseg() 1369 1615 1370 1371 /////////////////////////////////// 1372 void vmm_delete_vseg( pid_t pid, 1373 intptr_t vaddr ) 1616 ///////////////////////////////////////////// 1617 void vmm_resize_vseg( process_t * process, 1618 vseg_t * vseg, 1619 intptr_t new_base, 1620 intptr_t new_size ) 1374 1621 { 1375 process_t * process; // local pointer on local process 1376 vseg_t * vseg; // local pointer on local vseg containing vaddr 1377 1378 // get local pointer on local process descriptor 1379 process = cluster_get_local_process_from_pid( pid ); 1380 1381 if( process == NULL ) 1382 { 1383 printk("\n[WARNING] in %s : cannot get local process descriptor\n", 1384 __FUNCTION__ ); 1385 return; 1386 } 1387 1388 // get local pointer on local vseg containing vaddr 1389 vseg = vmm_vseg_from_vaddr( &process->vmm , vaddr ); 1390 1391 if( vseg == NULL ) 1392 { 1393 printk("\n[WARNING] in %s : cannot get vseg descriptor\n", 1394 __FUNCTION__ ); 1395 return; 1396 } 1397 1398 // call relevant function 1399 vmm_remove_vseg( process , vseg ); 1400 1401 } // end vmm_delete_vseg 1402 1403 1404 ///////////////////////////////////////////// 1405 vseg_t * vmm_vseg_from_vaddr( vmm_t * vmm, 1406 intptr_t vaddr ) 1407 { 1408 xptr_t vseg_xp; 1409 vseg_t * vseg; 1410 xptr_t iter_xp; 1411 1412 // get extended pointers on VSL lock and root 1413 xptr_t lock_xp = XPTR( local_cxy , &vmm->vsl_lock ); 1414 xptr_t root_xp = XPTR( local_cxy , &vmm->vsegs_root ); 1415 1416 // get lock protecting the VSL 1417 remote_rwlock_rd_acquire( lock_xp ); 1418 1419 // scan the list of vsegs in VSL 1420 XLIST_FOREACH( root_xp , iter_xp ) 1421 { 1422 // get pointers on vseg 1423 vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist ); 1424 vseg = GET_PTR( vseg_xp ); 1425 1426 // return success when match 1427 if( (vaddr >= vseg->min) && (vaddr < vseg->max) ) 1428 { 1429 // return success 1430 remote_rwlock_rd_release( lock_xp ); 1431 return vseg; 1432 } 1433 } 1434 1435 // return failure 1436 remote_rwlock_rd_release( lock_xp ); 1437 return NULL; 1438 1439 } // end vmm_vseg_from_vaddr() 1440 1441 ///////////////////////////////////////////// 1442 error_t vmm_resize_vseg( process_t * process, 1443 intptr_t base, 1444 intptr_t size ) 1445 { 1446 error_t error; 1447 vseg_t * new; 1448 vpn_t vpn_min; 1449 vpn_t vpn_max; 1622 vpn_t vpn; 1623 ppn_t ppn; 1624 uint32_t attr; 1625 1626 // check arguments 1627 assert( (process != NULL), "process argument is NULL" ); 1628 assert( (vseg != NULL), "vseg argument is NULL" ); 1450 1629 1451 1630 #if DEBUG_VMM_RESIZE_VSEG 1452 1631 uint32_t cycle = (uint32_t)hal_get_cycles(); 1453 1632 thread_t * this = CURRENT_THREAD; 1633 #endif 1634 1635 #if (DEBUG_VMM_RESIZE_VSEG & 1) 1454 1636 if( DEBUG_VMM_RESIZE_VSEG < cycle ) 1455 printk("\n[%s] thread[%x,%x] enter / process %x / base %x / size %d / cycle %d\n", 1456 __FUNCTION__, this->process->pid, this->trdid, process->pid, base, size, cycle ); 1457 #endif 1458 1459 // get pointer on process VMM 1460 vmm_t * vmm = &process->vmm; 1461 1462 intptr_t addr_min = base; 1463 intptr_t addr_max = base + size; 1464 1465 // get pointer on vseg 1466 vseg_t * vseg = vmm_vseg_from_vaddr( vmm , base ); 1467 1468 if( vseg == NULL) 1469 { 1470 printk("\n[ERROR] in %s : vseg(%x,%d) not found\n", 1471 __FUNCTION__, base , size ); 1472 return -1; 1473 } 1474 1475 // resize depends on unmapped region base and size 1476 if( (vseg->min > addr_min) || (vseg->max < addr_max) ) // not included in vseg 1477 { 1478 printk("\n[ERROR] in %s : unmapped region[%x->%x[ not included in vseg[%x->%x[\n", 1479 __FUNCTION__, addr_min, addr_max, vseg->min, vseg->max ); 1480 1481 error = -1; 1482 } 1483 else if( (vseg->min == addr_min) && (vseg->max == addr_max) ) // vseg must be deleted 1484 { 1637 printk("\n[%s] thread[%x,%x] enter / process %x / %s / base %x / cycle %d\n", 1638 __FUNCTION__, this->process->pid, this->trdid, 1639 process->pid, vseg_type_str(vseg->type), old_base, cycle ); 1640 #endif 1641 1642 // get existing vseg vpn_min and vpn_max 1643 vpn_t old_vpn_min = vseg->vpn_base; 1644 vpn_t old_vpn_max = old_vpn_min + vseg->vpn_size - 1; 1645 1646 // compute new vseg vpn_min & vpn_max 1647 intptr_t min = new_base; 1648 intptr_t max = new_base + new_size; 1649 vpn_t new_vpn_min = min >> CONFIG_PPM_PAGE_SHIFT; 1650 vpn_t new_vpn_max = (max - 1) >> CONFIG_PPM_PAGE_SHIFT; 1651 1652 // build extended pointer on GPT 1653 xptr_t gpt_xp = XPTR( local_cxy , &process->vmm.gpt ); 1654 1655 // loop on PTEs in GPT to unmap PTE if (oldd_vpn_min <= vpn < new_vpn_min) 1656 for( vpn = old_vpn_min ; vpn < new_vpn_min ; vpn++ ) 1657 { 1658 // get ppn and attr 1659 hal_gpt_get_pte( gpt_xp , vpn , &attr , &ppn ); 1660 1661 if( attr & GPT_MAPPED ) // PTE is mapped 1662 { 1485 1663 1486 1664 #if( DEBUG_VMM_RESIZE_VSEG & 1 ) 1487 1665 if( DEBUG_VMM_RESIZE_VSEG < cycle ) 1488 printk("\n[%s] unmapped region[%x->%x[ equal vseg[%x->%x[\n", 1489 __FUNCTION__, addr_min, addr_max, vseg->min, vseg->max ); 1490 #endif 1491 vmm_delete_vseg( process->pid , vseg->min ); 1492 1493 #if( DEBUG_VMM_RESIZE_VSEG & 1 ) 1666 printk("\n[%s] thread[%x,%x] unmap vpn %x / ppn %x / %s", 1667 __FUNCTION__, this->process->pid, this->trdid, vpn , ppn, vseg_type_str(vseg_type) ); 1668 #endif 1669 // unmap GPT entry 1670 hal_gpt_reset_pte( gpt_xp , vpn ); 1671 1672 // release physical page when required 1673 vmm_ppn_release( process , vseg , ppn ); 1674 } 1675 } 1676 1677 // loop on PTEs in GPT to unmap PTE if (new vpn_max <= vpn < old_vpn_max) 1678 for( vpn = new_vpn_max ; vpn < old_vpn_max ; vpn++ ) 1679 { 1680 // get ppn and attr 1681 hal_gpt_get_pte( gpt_xp , vpn , &attr , &ppn ); 1682 1683 if( attr & GPT_MAPPED ) // PTE is mapped 1684 { 1685 1686 #if( DEBUG_VMM_REMOVE_VSEG & 1 ) 1494 1687 if( DEBUG_VMM_RESIZE_VSEG < cycle ) 1495 printk("\n[%s] thread[%x,%x] deleted vseg\n", 1496 __FUNCTION__, this->process->pid, this->trdid ); 1497 #endif 1498 error = 0; 1499 } 1500 else if( vseg->min == addr_min ) // vseg must be resized 1501 { 1502 1503 #if( DEBUG_VMM_RESIZE_VSEG & 1 ) 1688 printk("\n[%s] thread[%x,%x] unmap vpn %x / ppn %x / %s", 1689 __FUNCTION__, this->process->pid, this->trdid, vpn , ppn, vseg_type_str(vseg_type) ); 1690 #endif 1691 // unmap GPT entry in local GPT 1692 hal_gpt_reset_pte( gpt_xp , vpn ); 1693 1694 // release physical page when required 1695 vmm_ppn_release( process , vseg , ppn ); 1696 } 1697 } 1698 1699 // resize vseg in VSL 1700 vseg->min = min; 1701 vseg->max = max; 1702 vseg->vpn_base = new_vpn_min; 1703 vseg->vpn_size = new_vpn_max - new_vpn_min + 1; 1704 1705 #if DEBUG_VMM_RESIZE_VSEG 1706 cycle = (uint32_t)hal_get_cycles(); 1504 1707 if( DEBUG_VMM_RESIZE_VSEG < cycle ) 1505 printk("\n[%s] unmapped region[%x->%x[ included in vseg[%x->%x[\n", 1506 __FUNCTION__, addr_min, addr_max, vseg->min, vseg->max ); 1507 #endif 1508 // update vseg min address 1509 vseg->min = addr_max; 1510 1511 // update vpn_base and vpn_size 1512 vpn_min = vseg->min >> CONFIG_PPM_PAGE_SHIFT; 1513 vpn_max = (vseg->max - 1) >> CONFIG_PPM_PAGE_SHIFT; 1514 vseg->vpn_base = vpn_min; 1515 vseg->vpn_size = vpn_max - vpn_min + 1; 1516 1517 #if( DEBUG_VMM_RESIZE_VSEG & 1 ) 1518 if( DEBUG_VMM_RESIZE_VSEG < cycle ) 1519 printk("\n[%s] thread[%x,%x] changed vseg_min\n", 1520 __FUNCTION__, this->process->pid, this->trdid ); 1521 #endif 1522 error = 0; 1523 } 1524 else if( vseg->max == addr_max ) // vseg must be resized 1525 { 1526 1527 #if( DEBUG_VMM_RESIZE_VSEG & 1 ) 1528 if( DEBUG_VMM_RESIZE_VSEG < cycle ) 1529 printk("\n[%s] unmapped region[%x->%x[ included in vseg[%x->%x[\n", 1530 __FUNCTION__, addr_min, addr_max, vseg->min, vseg->max ); 1531 #endif 1532 // update vseg max address 1533 vseg->max = addr_min; 1534 1535 // update vpn_base and vpn_size 1536 vpn_min = vseg->min >> CONFIG_PPM_PAGE_SHIFT; 1537 vpn_max = (vseg->max - 1) >> CONFIG_PPM_PAGE_SHIFT; 1538 vseg->vpn_base = vpn_min; 1539 vseg->vpn_size = vpn_max - vpn_min + 1; 1540 1541 #if( DEBUG_VMM_RESIZE_VSEG & 1 ) 1542 if( DEBUG_VMM_RESIZE_VSEG < cycle ) 1543 printk("\n[%s] thread[%x,%x] changed vseg_max\n", 1544 __FUNCTION__, this->process->pid, this->trdid ); 1545 #endif 1546 error = 0; 1547 1548 } 1549 else // vseg cut in three regions 1550 { 1551 1552 #if( DEBUG_VMM_RESIZE_VSEG & 1 ) 1553 if( DEBUG_VMM_RESIZE_VSEG < cycle ) 1554 printk("\n[%s] unmapped region[%x->%x[ included in vseg[%x->%x[\n", 1555 __FUNCTION__, addr_min, addr_max, vseg->min, vseg->max ); 1556 #endif 1557 // resize existing vseg 1558 vseg->max = addr_min; 1559 1560 // update vpn_base and vpn_size 1561 vpn_min = vseg->min >> CONFIG_PPM_PAGE_SHIFT; 1562 vpn_max = (vseg->max - 1) >> CONFIG_PPM_PAGE_SHIFT; 1563 vseg->vpn_base = vpn_min; 1564 vseg->vpn_size = vpn_max - vpn_min + 1; 1565 1566 // create new vseg 1567 new = vmm_create_vseg( process, 1568 vseg->type, 1569 addr_min, 1570 (vseg->max - addr_max), 1571 vseg->file_offset, 1572 vseg->file_size, 1573 vseg->mapper_xp, 1574 vseg->cxy ); 1575 1576 #if( DEBUG_VMM_RESIZE_VSEG & 1 ) 1577 if( DEBUG_VMM_RESIZE_VSEG < cycle ) 1578 printk("\n[%s] thread[%x,%x] replaced vseg by two smal vsegs\n", 1579 __FUNCTION__, this->process->pid, this->trdid ); 1580 #endif 1581 1582 if( new == NULL ) error = -1; 1583 else error = 0; 1584 } 1585 1586 #if DEBUG_VMM_RESIZE_VSEG 1587 if( DEBUG_VMM_RESIZE_VSEG < cycle ) 1588 printk("\n[%s] thread[%x,%x] exit / process %x / base %x / size %d / cycle %d\n", 1589 __FUNCTION__, this->process->pid, this->trdid, process->pid, base, size, cycle ); 1590 #endif 1591 1592 return error; 1593 1594 } // vmm_resize_vseg() 1708 printk("[%s] thread[%x,%x] exit / process %x / %s / base %x / cycle %d\n", 1709 __FUNCTION__, this->process->pid, this->trdid, 1710 process->pid, vseg_type_str(vseg->type), vseg->min, cycle ); 1711 #endif 1712 1713 } // end vmm_resize_vseg 1714 1715 ///////////////////////////////////////////////////////////////////////////////////////////// 1716 // This static function is called twice by the vmm_get_vseg() function. 1717 // It scan the - possibly remote - VSL defined by the <vmm_xp> argument to find the vseg 1718 // containing a given virtual address <vaddr>. It uses remote accesses to access the remote 1719 // VSL if required. The VSL lock protecting the VSL must be taken by the caller. 1720 ///////////////////////////////////////////////////////////////////////////////////////////// 1721 // @ vmm_xp : extended pointer on the process VMM. 1722 // @ vaddr : virtual address. 1723 // @ return local pointer on remote vseg if success / return NULL if not found. 1724 ///////////////////////////////////////////////////////////////////////////////////////////// 1725 static vseg_t * vmm_vseg_from_vaddr( xptr_t vmm_xp, 1726 intptr_t vaddr ) 1727 { 1728 xptr_t iter_xp; 1729 xptr_t vseg_xp; 1730 vseg_t * vseg; 1731 intptr_t min; 1732 intptr_t max; 1733 1734 // get cluster and local pointer on target VMM 1735 vmm_t * vmm_ptr = GET_PTR( vmm_xp ); 1736 cxy_t vmm_cxy = GET_CXY( vmm_xp ); 1737 1738 // build extended pointer on VSL root 1739 xptr_t root_xp = XPTR( vmm_cxy , &vmm_ptr->vsegs_root ); 1740 1741 // scan the list of vsegs in VSL 1742 XLIST_FOREACH( root_xp , iter_xp ) 1743 { 1744 vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist ); 1745 vseg = GET_PTR( vseg_xp ); 1746 1747 min = hal_remote_l32( XPTR( vmm_cxy , &vseg->min ) ); 1748 max = hal_remote_l32( XPTR( vmm_cxy , &vseg->max ) ); 1749 1750 // return success when match 1751 if( (vaddr >= min) && (vaddr < max) ) return vseg; 1752 } 1753 1754 // return failure 1755 return NULL; 1756 1757 } // end vmm_vseg_from_vaddr() 1595 1758 1596 1759 /////////////////////////////////////////// … … 1599 1762 vseg_t ** found_vseg ) 1600 1763 { 1601 xptr_t vseg_xp; 1602 vseg_t * vseg; 1603 vmm_t * vmm; 1604 error_t error; 1605 1606 // get pointer on local VMM 1607 vmm = &process->vmm; 1764 xptr_t loc_lock_xp; // extended pointer on local VSL lock 1765 xptr_t ref_lock_xp; // extended pointer on reference VSL lock 1766 vseg_t * loc_vseg; // local pointer on local vseg 1767 vseg_t * ref_vseg; // local pointer on reference vseg 1768 1769 // build extended pointer on local VSL lock 1770 loc_lock_xp = XPTR( local_cxy , &process->vmm.vsl_lock ); 1771 1772 // get local VSL lock 1773 remote_queuelock_acquire( loc_lock_xp ); 1608 1774 1609 1775 // try to get vseg from local VMM 1610 vseg = vmm_vseg_from_vaddr( vmm, vaddr );1611 1612 if ( vseg == NULL ) // vseg not found in local cluster => try to get it from ref1613 1776 loc_vseg = vmm_vseg_from_vaddr( XPTR( local_cxy, &process->vmm ) , vaddr ); 1777 1778 if (loc_vseg == NULL) // vseg not found => access reference VSL 1779 { 1614 1780 // get extended pointer on reference process 1615 1781 xptr_t ref_xp = process->ref_xp; 1616 1782 1617 // get cluster and local pointer on reference process 1783 // get cluster and local pointer on reference process 1618 1784 cxy_t ref_cxy = GET_CXY( ref_xp ); 1619 1785 process_t * ref_ptr = GET_PTR( ref_xp ); 1620 1786 1621 if( local_cxy == ref_cxy ) return -1; // local cluster is the reference 1622 1623 // get extended pointer on reference vseg 1624 rpc_vmm_get_vseg_client( ref_cxy , ref_ptr , vaddr , &vseg_xp , &error ); 1625 1626 if( error ) return -1; // vseg not found => illegal user vaddr 1627 1628 // allocate a vseg in local cluster 1629 vseg = vseg_alloc(); 1630 1631 if( vseg == NULL ) return -1; // cannot allocate a local vseg 1632 1633 // initialise local vseg from reference 1634 vseg_init_from_ref( vseg , vseg_xp ); 1635 1636 // build extended pointer on VSL lock 1637 xptr_t lock_xp = XPTR( local_cxy , &vmm->vsl_lock ); 1638 1639 // take the VSL lock in write mode 1640 remote_rwlock_wr_acquire( lock_xp ); 1641 1642 // register local vseg in local VSL 1643 vmm_attach_vseg_to_vsl( vmm , vseg ); 1644 1645 // release the VSL lock 1646 remote_rwlock_wr_release( lock_xp ); 1647 } 1648 1649 // success 1650 *found_vseg = vseg; 1651 return 0; 1652 1787 // build extended pointer on reference VSL lock 1788 ref_lock_xp = XPTR( ref_cxy , &ref_ptr->vmm.vsl_lock ); 1789 1790 // get reference VSL lock 1791 remote_queuelock_acquire( ref_lock_xp ); 1792 1793 // try to get vseg from reference VMM 1794 ref_vseg = vmm_vseg_from_vaddr( XPTR( ref_cxy , &ref_ptr->vmm ) , vaddr ); 1795 1796 if( ref_vseg == NULL ) // vseg not found => return error 1797 { 1798 printk("\n[ERROR] in %s : vaddr %x in process %x out of segment\n", 1799 __FUNCTION__, vaddr, process->pid ); 1800 1801 // release reference VSL lock 1802 remote_queuelock_release( ref_lock_xp ); 1803 1804 return -1; 1805 } 1806 else // vseg found => try to update local VSL 1807 { 1808 // allocate a local vseg descriptor 1809 loc_vseg = vseg_alloc(); 1810 1811 if( loc_vseg == NULL ) // no memory => return error 1812 { 1813 printk("\n[ERROR] in %s : vaddr %x in process %x / no memory for local vseg\n", 1814 __FUNCTION__, vaddr, process->pid ); 1815 1816 // release reference VSL & local VSL locks 1817 remote_queuelock_release( ref_lock_xp ); 1818 remote_queuelock_release( loc_lock_xp ); 1819 1820 return -1; 1821 } 1822 else // update local VSL and return success 1823 { 1824 // initialize local vseg 1825 vseg_init_from_ref( loc_vseg , XPTR( ref_cxy , ref_vseg ) ); 1826 1827 // register local vseg in local VSL 1828 vmm_attach_vseg_to_vsl( &process->vmm , loc_vseg ); 1829 1830 // release reference VSL & local VSL locks 1831 remote_queuelock_release( ref_lock_xp ); 1832 remote_queuelock_release( loc_lock_xp ); 1833 1834 *found_vseg = loc_vseg; 1835 return 0; 1836 } 1837 } 1838 } 1839 else // vseg found in local VSL => return success 1840 { 1841 // release local VSL lock 1842 remote_queuelock_release( loc_lock_xp ); 1843 1844 *found_vseg = loc_vseg; 1845 return 0; 1846 } 1653 1847 } // end vmm_get_vseg() 1654 1848 … … 1658 1852 // pointer on the allocated page descriptor. 1659 1853 // The vseg cannot have the FILE type. 1854 ////////////////////////////////////////////////////////////////////////////////////// 1855 // @ vseg : local pointer on vseg. 1856 // @ vpn : unmapped vpn. 1857 // @ return an extended pointer on the allocated page 1660 1858 ////////////////////////////////////////////////////////////////////////////////////// 1661 1859 static xptr_t vmm_page_allocate( vseg_t * vseg, … … 2194 2392 #if DEBUG_VMM_HANDLE_COW 2195 2393 uint32_t cycle = (uint32_t)hal_get_cycles(); 2196 if( DEBUG_VMM_HANDLE_COW < cycle)2394 if( (DEBUG_VMM_HANDLE_COW < cycle) && (vpn > 0) ) 2197 2395 printk("\n[%s] thread[%x,%x] enter for vpn %x / core[%x,%d] / cycle %d\n", 2198 2396 __FUNCTION__, this->process->pid, this->trdid, vpn, local_cxy, this->core->lid, cycle ); … … 2200 2398 2201 2399 #if ((DEBUG_VMM_HANDLE_COW & 3) == 3 ) 2202 hal_vmm_display( process, true );2400 hal_vmm_display( XPTR( local_cxy , process ) , true ); 2203 2401 #endif 2204 2402 … … 2216 2414 2217 2415 #if DEBUG_VMM_HANDLE_COW 2218 if( DEBUG_VMM_HANDLE_COW < cycle)2416 if( (DEBUG_VMM_HANDLE_COW < cycle) && (vpn > 0) ) 2219 2417 printk("\n[%s] thread[%x,%x] get vseg %s\n", 2220 2418 __FUNCTION__, this->process->pid, this->trdid, vseg_type_str(vseg->type) ); … … 2256 2454 2257 2455 #if DEBUG_VMM_HANDLE_COW 2258 if( DEBUG_VMM_HANDLE_COW < cycle)2456 if( (DEBUG_VMM_HANDLE_COW < cycle) && (vpn > 0) ) 2259 2457 printk("\n[%s] thread[%x,%x] get pte for vpn %x : ppn %x / attr %x\n", 2260 2458 __FUNCTION__, this->process->pid, this->trdid, vpn, old_ppn, old_attr ); … … 2285 2483 2286 2484 #if DEBUG_VMM_HANDLE_COW 2287 if( DEBUG_VMM_HANDLE_COW < cycle)2485 if( (DEBUG_VMM_HANDLE_COW < cycle) && (vpn > 0) ) 2288 2486 printk("\n[%s] thread[%x,%x] get forks = %d for vpn %x\n", 2289 2487 __FUNCTION__, this->process->pid, this->trdid, forks, vpn ); … … 2315 2513 2316 2514 #if DEBUG_VMM_HANDLE_COW 2317 if( DEBUG_VMM_HANDLE_COW < cycle)2515 if( (DEBUG_VMM_HANDLE_COW < cycle) && (vpn > 0) ) 2318 2516 printk("\n[%s] thread[%x,%x] get new ppn %x for vpn %x\n", 2319 2517 __FUNCTION__, this->process->pid, this->trdid, new_ppn, vpn ); … … 2326 2524 2327 2525 #if DEBUG_VMM_HANDLE_COW 2328 if( DEBUG_VMM_HANDLE_COW < cycle)2526 if( (DEBUG_VMM_HANDLE_COW < cycle) && (vpn > 0) ) 2329 2527 printk("\n[%s] thread[%x,%x] copied old page to new page\n", 2330 2528 __FUNCTION__, this->process->pid, this->trdid ); … … 2338 2536 2339 2537 #if(DEBUG_VMM_HANDLE_COW & 1) 2340 if( DEBUG_VMM_HANDLE_COW < cycle)2538 if( (DEBUG_VMM_HANDLE_COW < cycle) && (vpn > 0) ) 2341 2539 printk("\n[%s] thread[%x,%x] no pending forks / keep existing PPN %x\n", 2342 2540 __FUNCTION__, this->process->pid, this->trdid, old_ppn ); … … 2349 2547 2350 2548 #if(DEBUG_VMM_HANDLE_COW & 1) 2351 if( DEBUG_VMM_HANDLE_COW < cycle)2549 if( (DEBUG_VMM_HANDLE_COW < cycle) && (vpn > 0) ) 2352 2550 printk("\n[%s] thread[%x,%x] new_attr %x / new_ppn %x\n", 2353 2551 __FUNCTION__, this->process->pid, this->trdid, new_attr, new_ppn ); … … 2367 2565 else 2368 2566 { 2369 if( ref_cxy == local_cxy ) // reference cluster is local 2370 { 2371 vmm_global_update_pte( process, 2372 vpn, 2373 new_attr, 2374 new_ppn ); 2375 } 2376 else // reference cluster is remote 2377 { 2378 rpc_vmm_global_update_pte_client( ref_cxy, 2379 ref_ptr, 2380 vpn, 2381 new_attr, 2382 new_ppn ); 2383 } 2567 // set new PTE in all GPT copies 2568 vmm_global_update_pte( process, 2569 vpn, 2570 new_attr, 2571 new_ppn ); 2384 2572 } 2385 2573 2386 2574 #if DEBUG_VMM_HANDLE_COW 2387 2575 cycle = (uint32_t)hal_get_cycles(); 2388 if( DEBUG_VMM_HANDLE_COW < cycle)2576 if( (DEBUG_VMM_HANDLE_COW < cycle) && (vpn > 0) ) 2389 2577 printk("\n[%s] thread[%x,%x] exit for vpn %x / core[%x,%d] / cycle %d\n", 2390 2578 __FUNCTION__, this->process->pid, this->trdid, vpn, local_cxy, this->core->lid, cycle ); … … 2392 2580 2393 2581 #if ((DEBUG_VMM_HANDLE_COW & 3) == 3) 2394 hal_vmm_display( process, true );2582 hal_vmm_display( XPTR( local_cxy , process ) , true ); 2395 2583 #endif 2396 2584 -
trunk/kernel/mm/vmm.h
r635 r640 112 112 typedef struct vmm_s 113 113 { 114 remote_ rwlock_t vsl_lock; /*! lock protecting the local VSL*/115 xlist_entry_t vsegs_root; /*! Virtual Segment List (complete in reference)*/116 uint32_t vsegs_nr; /*! total number of local vsegs*/117 118 gpt_t gpt; /*! Generic Page Table (complete in reference)*/119 120 stack_mgr_t stack_mgr; /*! embedded STACK vsegs allocator*/121 mmap_mgr_t mmap_mgr; /*! embedded MMAP vsegs allocator*/122 123 uint32_t false_pgfault_nr; /*! false page fault counter (for all threads)*/124 uint32_t local_pgfault_nr; /*! false page fault counter (for all threads)*/125 uint32_t global_pgfault_nr; /*! false page fault counter (for all threads)*/126 uint32_t false_pgfault_cost; /*! cumulated cost (for all threads)*/127 uint32_t local_pgfault_cost; /*! cumulated cost (for all threads)*/128 uint32_t global_pgfault_cost; /*! cumulated cost (for all threads)*/129 130 vpn_t args_vpn_base; /*! args vseg first page*/131 vpn_t envs_vpn_base; /*! envs vseg first page*/132 vpn_t code_vpn_base; /*! code vseg first page*/133 vpn_t data_vpn_base; /*! data vseg first page*/134 vpn_t heap_vpn_base; /*! heap zone first page*/135 136 intptr_t entry_point; /*! main thread entry point*/114 remote_queuelock_t vsl_lock; /*! lock protecting the local VSL */ 115 xlist_entry_t vsegs_root; /*! Virtual Segment List root */ 116 uint32_t vsegs_nr; /*! total number of local vsegs */ 117 118 gpt_t gpt; /*! Generic Page Table descriptor */ 119 120 stack_mgr_t stack_mgr; /*! embedded STACK vsegs allocator */ 121 mmap_mgr_t mmap_mgr; /*! embedded MMAP vsegs allocator */ 122 123 uint32_t false_pgfault_nr; /*! false page fault counter (for all threads) */ 124 uint32_t local_pgfault_nr; /*! false page fault counter (for all threads) */ 125 uint32_t global_pgfault_nr; /*! false page fault counter (for all threads) */ 126 uint32_t false_pgfault_cost; /*! cumulated cost (for all threads) */ 127 uint32_t local_pgfault_cost; /*! cumulated cost (for all threads) */ 128 uint32_t global_pgfault_cost; /*! cumulated cost (for all threads) */ 129 130 vpn_t args_vpn_base; /*! args vseg first page */ 131 vpn_t envs_vpn_base; /*! envs vseg first page */ 132 vpn_t code_vpn_base; /*! code vseg first page */ 133 vpn_t data_vpn_base; /*! data vseg first page */ 134 vpn_t heap_vpn_base; /*! heap zone first page */ 135 136 intptr_t entry_point; /*! main thread entry point */ 137 137 } 138 138 vmm_t; … … 143 143 * - The GPT has been previously created, with the hal_gpt_create() function. 144 144 * - The "kernel" vsegs are previously registered, by the hal_vmm_kernel_update() function. 145 * - The "code" and "data" vsegs ar e registered by the elf_load_process() function.145 * - The "code" and "data" vsegs arlmmmmmme registered by the elf_load_process() function. 146 146 * - The "stack" vsegs are dynamically registered by the thread_user_create() function. 147 147 * - The "file", "anon", "remote" vsegs are dynamically registered by the mmap() syscall. … … 206 206 207 207 /********************************************************************************************* 208 * This function modifies the size of the vseg identified by <process> and <base> arguments 209 * in all clusters containing a VSL copy, as defined by <new_base> and <new_size> arguments. 210 * This function is called by the sys_munmap() function, and can be called by a thread 211 * running in any cluster, as it uses remote accesses. 212 * It cannot fail, as only vseg registered in VSL copies are updated. 213 ********************************************************************************************* 214 * @ process : local pointer on process descriptor. 215 * @ base : current vseg base address in user space. 216 * @ new_base : new vseg base. 217 * @ new_size : new vseg size. 218 ********************************************************************************************/ 219 void vmm_global_resize_vseg( struct process_s * process, 220 intptr_t base, 221 intptr_t new_base, 222 intptr_t new_size ); 223 224 /********************************************************************************************* 225 * This function removes the vseg identified by the <process> and <base> arguments from 226 * the VSL and remove all associated PTE entries from the GPT. 227 * This is done in all clusters containing a VMM copy to maintain VMM coherence. 228 * This function can be called by a thread running in any cluster, as it uses the 229 * vmm_remove_vseg() in the local cluster, and the RPC_VMM_REMOVE_VSEG for remote clusters. 230 * It cannot fail, as only vseg registered in VSL copies are deleted. 231 ********************************************************************************************* 232 * @ pid : local pointer on process identifier. 233 * @ base : vseg base address in user space. 234 ********************************************************************************************/ 235 void vmm_global_delete_vseg( struct process_s * process, 236 intptr_t base ); 237 238 /********************************************************************************************* 208 239 * This function modifies one GPT entry identified by the <process> and <vpn> arguments 209 * in all clusters containing a process copy. It is used to maintain coherence in GPT 210 * copies, using remote_write accesses. 211 * It must be called by a thread running in the process owner cluster. 212 * Use the RPC_VMM_GLOBAL_UPDATE_PTE if required. 240 * in all clusters containing a process copy. It maintains coherence in GPT copies, 241 * using remote_write accesses. 213 242 * It cannot fail, as only mapped PTE2 in GPT copies are updated. 214 243 ********************************************************************************************* … … 282 311 /********************************************************************************************* 283 312 * This function removes from the VMM of a process descriptor identified by the <process> 284 * argument the vseg identified by the <vseg> argument. It can be used for any type of vseg. 285 * As it uses local pointers, it must be called by a local thread. 286 * It is called by the vmm_user_reset(), vmm_delete_vseg() and vmm_destroy() functions. 313 * argument the vseg identified by the <vseg> argument. 314 * It is called by the vmm_user_reset(), vmm_global_delete_vseg() and vmm_destroy() functions. 315 * It must be called by a local thread, running in the cluster containing the modified VMM. 316 * Use the RPC_VMM_REMOVE_VSEG if required. 287 317 * It makes a kernel panic if the process is not registered in the local cluster, 288 318 * or if the vseg is not registered in the process VSL. 289 319 * For all vseg types, the vseg is detached from local VSL, and all associated PTEs are 290 320 * unmapped from local GPT. Other actions depend on the vseg type: 291 * -Regarding the vseg descriptor release:321 * Regarding the vseg descriptor release: 292 322 * . for ANON and REMOTE, the vseg is not released, but registered in local zombi_list. 293 323 * . for STACK the vseg is released to the local stack allocator. 294 324 * . for all other types, the vseg is released to the local kmem. 295 * -Regarding the physical pages release:325 * Regarding the physical pages release: 296 326 * . for KERNEL and FILE, the pages are not released to kmem. 297 327 * . for CODE and STACK, the pages are released to local kmem when they are not COW. 298 328 * . for DATA, ANON and REMOTE, the pages are released to relevant kmem only when 299 329 * the local cluster is the reference cluster. 300 * The lock protecting the VSL must be taken by the caller.301 ********************************************************************************************* 302 * @ process : local pointer on process .303 * @ vseg : local pointer on vseg.330 * The VSL lock protecting the VSL must be taken by the caller. 331 ********************************************************************************************* 332 * @ process : local pointer on process descriptor. 333 * @ vseg : local pointer on target vseg. 304 334 ********************************************************************************************/ 305 335 void vmm_remove_vseg( struct process_s * process, … … 307 337 308 338 /********************************************************************************************* 309 * This function call the vmm_remove vseg() function to remove from the VMM of a local 310 * process descriptor, identified by the <pid> argument the vseg identified by the <vaddr> 311 * virtual address in user space. 312 * Use the RPC_VMM_DELETE_VSEG to remove a vseg from a remote process descriptor. 313 ********************************************************************************************* 314 * @ pid : process identifier. 315 * @ vaddr : virtual address in user space. 316 ********************************************************************************************/ 317 void vmm_delete_vseg( pid_t pid, 318 intptr_t vaddr ); 319 320 /********************************************************************************************* 321 * This function removes a given region (defined by a base address and a size) from 322 * the VMM of a given process descriptor. This can modify the number of vsegs: 323 * (a) if the region is not entirely mapped in an existing vseg, it's an error. 324 * (b) if the region has same base and size as an existing vseg, the vseg is removed. 325 * (c) if the removed region cut the vseg in two parts, it is modified. 326 * (d) if the removed region cut the vseg in three parts, it is modified, and a new 327 * vseg is created with same type. 328 * FIXME [AG] this function should be called by a thread running in the reference cluster, 329 * and the VMM should be updated in all process descriptors copies. 330 ********************************************************************************************* 331 * @ process : pointer on process descriptor 332 * @ base : vseg base address 333 * @ size : vseg size (bytes) 334 ********************************************************************************************/ 335 error_t vmm_resize_vseg( struct process_s * process, 336 intptr_t base, 337 intptr_t size ); 338 339 /********************************************************************************************* 340 * This low-level function scan the local VSL in <vmm> to find the unique vseg containing 341 * a given virtual address <vaddr>. 342 * It is called by the vmm_get_vseg(), vmm_get_pte(), and vmm_resize_vseg() functions. 343 ********************************************************************************************* 344 * @ vmm : pointer on the process VMM. 345 * @ vaddr : virtual address. 346 * @ return vseg pointer if success / return NULL if not found. 347 ********************************************************************************************/ 348 struct vseg_s * vmm_vseg_from_vaddr( vmm_t * vmm, 349 intptr_t vaddr ); 350 351 /********************************************************************************************* 352 * This function checks that a given virtual address is contained in a registered vseg. 353 * It can be called by any thread running in any cluster: 354 * - if the vseg is registered in the local process VMM, it returns the local vseg pointer. 355 * - if the vseg is missing in local VMM, it uses a RPC to get it from the reference cluster, 356 * register it in local VMM and returns the local vseg pointer, if success. 357 * - it returns an user error if the vseg is missing in the reference VMM, or if there is 358 * not enough memory for a new vseg descriptor in the calling thread cluster. 359 ********************************************************************************************* 360 * @ process : [in] pointer on process descriptor 361 * @ vaddr : [in] virtual address 362 * @ vseg : [out] local pointer on local vseg 363 * @ returns 0 if success / returns -1 if user error (out of segment). 339 * This function resize a local vseg identified by the <process> and <vseg> arguments. 340 * It is called by the vmm_global_resize() function. 341 * It must be called by a local thread, running in the cluster containing the modified VMM. 342 * Use the RPC_VMM_RESIZE_VSEG if required. 343 * It makes a kernel panic if the process is not registered in the local cluster, 344 * or if the vseg is not registered in the process VSL. 345 * The new vseg, defined by the <new_base> and <new_size> arguments must be strictly 346 * included in the target vseg. The target VSL size and base fields are modified in the VSL. 347 * If the new vseg contains less pages than the target vseg, the relevant pages are 348 * removed from the GPT. 349 * The VSL lock protecting the VSL must be taken by the caller. 350 ********************************************************************************************* 351 * @ process : local pointer on process descriptor 352 * @ vseg : local pointer on target vseg 353 * @ new_base : vseg base address 354 * @ new_size : vseg size (bytes) 355 ********************************************************************************************/ 356 void vmm_resize_vseg( struct process_s * process, 357 struct vseg_s * vseg, 358 intptr_t new_base, 359 intptr_t new_size ); 360 361 /********************************************************************************************* 362 * This function checks that a given virtual address <vaddr> in a given <process> is 363 * contained in a registered vseg. It can be called by any thread running in any cluster. 364 * - if the vseg is registered in the local process VSL, it returns the local vseg pointer. 365 * - if the vseg is missing in local VSL, it access directly the reference VSL. 366 * - if the vseg is found in reference VSL, it updates the local VSL and returns this pointer. 367 * It returns an error when the vseg is missing in the reference VMM, or when there is 368 * not enough memory for a new vseg descriptor in the calling thread cluster. 369 * For both the local and the reference VSL, it takes the VSL lock before scanning the VSL. 370 ********************************************************************************************* 371 * @ process : [in] pointer on process descriptor. 372 * @ vaddr : [in] virtual address. 373 * @ vseg : [out] local pointer on local vseg. 374 * @ returns 0 if success / returns -1 if user error 364 375 ********************************************************************************************/ 365 376 error_t vmm_get_vseg( struct process_s * process, … … 395 406 * This function is called by the generic exception handler in case of WRITE violation event, 396 407 * detected for a given <vpn>. The <process> argument is used to access the relevant VMM. 397 * It returns a kernel panic if VPN is not in a registered vsegor is not mapped.408 * It returns a kernel panic if the faulty VPN is not in a registered vseg, or is not mapped. 398 409 * For a legal mapped vseg there is two cases: 399 410 * 1) If the missing VPN belongs to a private vseg (STACK), it access only the local GPT. -
trunk/kernel/mm/vseg.h
r625 r640 2 2 * vseg.h - virtual segment (vseg) related operations 3 3 * 4 * Authors Ghassan Almaless (2008,2009,2010,2011, 2012) 5 * Mohamed Lamine Karaoui (2015) 6 * Alain Greiner (2016,2017,2018,2019) 4 * Authors Alain Greiner (2016,2017,2018,2019) 7 5 * 8 6 * Copyright (c) UPMC Sorbonne Universites
Note: See TracChangeset
for help on using the changeset viewer.