Changeset 567 for trunk/kernel/mm/vmm.c
- Timestamp:
- Oct 5, 2018, 12:01:52 AM (6 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/mm/vmm.c
r561 r567 31 31 #include <printk.h> 32 32 #include <memcpy.h> 33 #include <rwlock.h> 33 #include <remote_rwlock.h> 34 #include <remote_queuelock.h> 34 35 #include <list.h> 35 36 #include <xlist.h> … … 51 52 ////////////////////////////////////////////////////////////////////////////////// 52 53 53 extern process_t process_zero; // defined in cluster.c file 54 54 extern process_t process_zero; // allocated in cluster.c 55 55 56 56 /////////////////////////////////////// … … 65 65 66 66 #if DEBUG_VMM_INIT 67 thread_t * this = CURRENT_THREAD; 67 68 uint32_t cycle = (uint32_t)hal_get_cycles(); 68 69 if( DEBUG_VMM_INIT ) 69 printk("\n[DBG] %s : thread %x enter for process %x / cycle %d\n",70 __FUNCTION__ , CURRENT_THREAD, process->pid , cycle );70 printk("\n[DBG] %s : thread %x in process %x enter for process %x / cycle %d\n", 71 __FUNCTION__ , this->trdid, this->process->pid, process->pid , cycle ); 71 72 #endif 72 73 … … 77 78 vmm->vsegs_nr = 0; 78 79 xlist_root_init( XPTR( local_cxy , &vmm->vsegs_root ) ); 79 remote_rwlock_init( XPTR( local_cxy , &vmm->vsegs_lock ) );80 81 82 83 84 assert( (CONFIG_THREAD_MAX_PER_CLUSTER <= 32) ,85 86 87 assert( ((CONFIG_VMM_STACK_SIZE * CONFIG_THREAD_MAX_PER_CLUSTER) <=88 89 80 remote_rwlock_init( XPTR( local_cxy , &vmm->vsegs_lock ),LOCK_VMM_VSL ); 81 82 assert( ((CONFIG_VMM_KENTRY_SIZE + CONFIG_VMM_ARGS_SIZE + CONFIG_VMM_ENVS_SIZE) 83 <= CONFIG_VMM_ELF_BASE) , "UTILS zone too small\n" ); 84 85 assert( (CONFIG_THREADS_MAX_PER_CLUSTER <= 32) , 86 "no more than 32 threads per cluster for a single process\n"); 87 88 assert( ((CONFIG_VMM_STACK_SIZE * CONFIG_THREADS_MAX_PER_CLUSTER) <= 89 (CONFIG_VMM_VSPACE_SIZE - CONFIG_VMM_STACK_BASE)) , 90 "STACK zone too small\n"); 90 91 91 92 // register kentry vseg in VSL … … 171 172 vmm->stack_mgr.bitmap = 0; 172 173 vmm->stack_mgr.vpn_base = CONFIG_VMM_STACK_BASE; 173 spinlock_init( &vmm->stack_mgr.lock);174 busylock_init( &vmm->stack_mgr.lock , LOCK_VMM_STACK ); 174 175 175 176 // initialize MMAP allocator … … 177 178 vmm->mmap_mgr.vpn_size = CONFIG_VMM_STACK_BASE - CONFIG_VMM_HEAP_BASE; 178 179 vmm->mmap_mgr.first_free_vpn = CONFIG_VMM_HEAP_BASE; 179 spinlock_init( &vmm->mmap_mgr.lock);180 busylock_init( &vmm->mmap_mgr.lock , LOCK_VMM_MMAP ); 180 181 181 182 uint32_t i; … … 190 191 cycle = (uint32_t)hal_get_cycles(); 191 192 if( DEBUG_VMM_INIT ) 192 printk("\n[DBG] %s : thread %x exit forprocess %x / entry_point = %x / cycle %d\n",193 __FUNCTION__ , CURRENT_THREAD, process->pid , process->vmm.entry_point , cycle );193 printk("\n[DBG] %s : thread %x in process %x exit / process %x / entry_point = %x / cycle %d\n", 194 __FUNCTION__, this->trdid, this->process->pid, process->pid , process->vmm.entry_point , cycle ); 194 195 #endif 195 196 … … 209 210 210 211 // get lock protecting the vseg list 211 remote_rwlock_rd_ lock( XPTR( local_cxy , &vmm->vsegs_lock ) );212 remote_rwlock_rd_acquire( XPTR( local_cxy , &vmm->vsegs_lock ) ); 212 213 213 214 // scan the list of vsegs … … 243 244 244 245 // release the lock 245 remote_rwlock_rd_ unlock( XPTR( local_cxy , &vmm->vsegs_lock ) );246 remote_rwlock_rd_release( XPTR( local_cxy , &vmm->vsegs_lock ) ); 246 247 247 248 } // vmm_display() 249 250 /////////////////////////////////// 251 void vmm_vseg_attach( vmm_t * vmm, 252 vseg_t * vseg ) 253 { 254 // build extended pointer on rwlock protecting VSL 255 xptr_t lock_xp = XPTR( local_cxy , &vmm->vsegs_lock ); 256 257 // get rwlock in write mode 258 remote_rwlock_wr_acquire( lock_xp ); 259 260 // update vseg descriptor 261 vseg->vmm = vmm; 262 263 // add vseg in vmm list 264 xlist_add_last( XPTR( local_cxy , &vmm->vsegs_root ), 265 XPTR( local_cxy , &vseg->xlist ) ); 266 267 // release rwlock in write mode 268 remote_rwlock_wr_release( lock_xp ); 269 } 270 271 /////////////////////////////////// 272 void vmm_vseg_detach( vmm_t * vmm, 273 vseg_t * vseg ) 274 { 275 // build extended pointer on rwlock protecting VSL 276 xptr_t lock_xp = XPTR( local_cxy , &vmm->vsegs_lock ); 277 278 // get rwlock in write mode 279 remote_rwlock_wr_acquire( lock_xp ); 280 281 // update vseg descriptor 282 vseg->vmm = NULL; 283 284 // remove vseg from vmm list 285 xlist_unlink( XPTR( local_cxy , &vseg->xlist ) ); 286 287 // release rwlock in write mode 288 remote_rwlock_wr_release( lock_xp ); 289 } 248 290 249 291 /////////////////////i////////////////////////// … … 274 316 #endif 275 317 276 277 278 318 // check cluster is reference 319 assert( (GET_CXY( process->ref_xp ) == local_cxy) , 320 "not called in reference cluster\n"); 279 321 280 322 // get extended pointer on root of process copies xlist in owner cluster … … 346 388 #endif 347 389 348 349 350 390 // check cluster is reference 391 assert( (GET_CXY( process->ref_xp ) == local_cxy) , 392 "local cluster is not process reference cluster\n"); 351 393 352 394 // get pointer on reference VMM … … 387 429 vseg = GET_PTR( vseg_xp ); 388 430 389 390 431 assert( (GET_CXY( vseg_xp ) == local_cxy) , 432 "all vsegs in reference VSL must be local\n" ); 391 433 392 434 // get vseg type, base and size … … 444 486 lock_xp = XPTR( page_cxy , &page_ptr->lock ); 445 487 488 // take lock protecting "forks" counter 489 remote_busylock_acquire( lock_xp ); 490 446 491 // increment "forks" 447 remote_spinlock_lock( lock_xp );448 492 hal_remote_atomic_add( forks_xp , 1 ); 449 remote_spinlock_unlock( lock_xp ); 493 494 // release lock protecting "forks" counter 495 remote_busylock_release( lock_xp ); 450 496 } 451 497 } // end loop on vpn … … 511 557 512 558 // initialize the lock protecting the child VSL 513 remote_rwlock_init( XPTR( local_cxy , &child_vmm->vsegs_lock ) );559 remote_rwlock_init( XPTR( local_cxy , &child_vmm->vsegs_lock ), LOCK_VMM_STACK ); 514 560 515 561 // initialize the child VSL as empty … … 529 575 parent_root_xp = XPTR( parent_cxy , &parent_vmm->vsegs_root ); 530 576 531 // take the lock protecting the parent VSL 532 remote_rwlock_rd_ lock( parent_lock_xp );577 // take the lock protecting the parent VSL in read mode 578 remote_rwlock_rd_acquire( parent_lock_xp ); 533 579 534 580 // loop on parent VSL xlist … … 540 586 541 587 // get vseg type 542 type = hal_remote_l w( XPTR( parent_cxy , &parent_vseg->type ) );588 type = hal_remote_l32( XPTR( parent_cxy , &parent_vseg->type ) ); 543 589 544 590 #if DEBUG_VMM_FORK_COPY … … 547 593 printk("\n[DBG] %s : thread %x found parent vseg %s / vpn_base = %x / cycle %d\n", 548 594 __FUNCTION__ , CURRENT_THREAD, vseg_type_str(type), 549 hal_remote_l w( XPTR( parent_cxy , &parent_vseg->vpn_base ) ) , cycle );595 hal_remote_l32( XPTR( parent_cxy , &parent_vseg->vpn_base ) ) , cycle ); 550 596 #endif 551 597 … … 566 612 567 613 // register child vseg in child VSL 568 v seg_attach( child_vmm , child_vseg );614 vmm_vseg_attach( child_vmm , child_vseg ); 569 615 570 616 #if DEBUG_VMM_FORK_COPY … … 573 619 printk("\n[DBG] %s : thread %x copied vseg %s / vpn_base = %x to child VSL / cycle %d\n", 574 620 __FUNCTION__ , CURRENT_THREAD , vseg_type_str(type), 575 hal_remote_l w( XPTR( parent_cxy , &parent_vseg->vpn_base ) ) , cycle );621 hal_remote_l32( XPTR( parent_cxy , &parent_vseg->vpn_base ) ) , cycle ); 576 622 #endif 577 623 … … 613 659 lock_xp = XPTR( page_cxy , &page_ptr->lock ); 614 660 661 // get lock protecting "forks" counter 662 remote_busylock_acquire( lock_xp ); 663 615 664 // increment "forks" 616 remote_spinlock_lock( lock_xp );617 665 hal_remote_atomic_add( forks_xp , 1 ); 618 remote_spinlock_unlock( lock_xp ); 666 667 // release lock protecting "forks" counter 668 remote_busylock_release( lock_xp ); 619 669 620 670 #if DEBUG_VMM_FORK_COPY … … 630 680 } // end loop on vsegs 631 681 632 // release the parent vsegs lock633 remote_rwlock_rd_ unlock( parent_lock_xp );682 // release the parent VSL lock in read mode 683 remote_rwlock_rd_release( parent_lock_xp ); 634 684 635 685 // initialize child GPT (architecture specic) … … 703 753 // get extended pointer on VSL root and VSL lock 704 754 xptr_t root_xp = XPTR( local_cxy , &vmm->vsegs_root ); 705 xptr_t lock_xp = XPTR( local_cxy , &vmm->vsegs_lock );706 707 // get lock protecting vseg list708 remote_rwlock_wr_lock( lock_xp );709 755 710 756 // remove all user vsegs registered in VSL … … 712 758 { 713 759 // get pointer on first vseg in VSL 714 vseg_xp = XLIST_FIRST _ELEMENT( root_xp , vseg_t , xlist );760 vseg_xp = XLIST_FIRST( root_xp , vseg_t , xlist ); 715 761 vseg = GET_PTR( vseg_xp ); 716 762 … … 719 765 720 766 // remove vseg from VSL 721 v seg_detach(vseg );767 vmm_vseg_detach( vmm , vseg ); 722 768 723 769 // release memory allocated to vseg descriptor … … 732 778 } 733 779 734 // release lock protecting VSL735 remote_rwlock_wr_unlock( lock_xp );736 737 780 // remove all vsegs from zombi_lists in MMAP allocator 738 781 uint32_t i; … … 748 791 __FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size ); 749 792 #endif 750 v seg_detach(vseg );793 vmm_vseg_detach( vmm , vseg ); 751 794 vseg_free( vseg ); 752 795 … … 812 855 813 856 // get lock on stack allocator 814 spinlock_lock( &mgr->lock );857 busylock_acquire( &mgr->lock ); 815 858 816 859 // get first free slot index in bitmap … … 818 861 if( (index < 0) || (index > 31) ) 819 862 { 820 spinlock_unlock( &mgr->lock );821 return ENOMEM;863 busylock_release( &mgr->lock ); 864 return 0xFFFFFFFF; 822 865 } 823 866 … … 826 869 827 870 // release lock on stack allocator 828 spinlock_unlock( &mgr->lock );871 busylock_release( &mgr->lock ); 829 872 830 873 // returns vpn_base, vpn_size (one page non allocated) … … 864 907 865 908 // get lock on mmap allocator 866 spinlock_lock( &mgr->lock );909 busylock_acquire( &mgr->lock ); 867 910 868 911 // get vseg from zombi_list or from mmap zone … … 892 935 893 936 // release lock on mmap allocator 894 spinlock_unlock( &mgr->lock );937 busylock_release( &mgr->lock ); 895 938 896 939 // returns vpn_base, vpn_size … … 1002 1045 1003 1046 // attach vseg to VSL 1004 xptr_t lock_xp = XPTR( local_cxy , &vmm->vsegs_lock ); 1005 remote_rwlock_wr_lock( lock_xp ); 1006 vseg_attach( vmm , vseg ); 1007 remote_rwlock_wr_unlock( lock_xp ); 1047 vmm_vseg_attach( vmm , vseg ); 1008 1048 1009 1049 #if DEBUG_VMM_CREATE_VSEG … … 1027 1067 1028 1068 // detach vseg from VSL 1029 xptr_t lock_xp = XPTR( local_cxy , &vmm->vsegs_lock ); 1030 remote_rwlock_wr_lock( lock_xp ); 1031 vseg_detach( vseg ); 1032 remote_rwlock_wr_unlock( lock_xp ); 1069 vmm_vseg_detach( vmm , vseg ); 1033 1070 1034 1071 // release the stack slot to VMM stack allocator if STACK type … … 1042 1079 1043 1080 // update stacks_bitmap 1044 spinlock_lock( &mgr->lock );1081 busylock_acquire( &mgr->lock ); 1045 1082 bitmap_clear( &mgr->bitmap , index ); 1046 spinlock_unlock( &mgr->lock );1083 busylock_release( &mgr->lock ); 1047 1084 } 1048 1085 … … 1057 1094 1058 1095 // update zombi_list 1059 spinlock_lock( &mgr->lock );1096 busylock_acquire( &mgr->lock ); 1060 1097 list_add_first( &mgr->zombi_list[index] , &vseg->zlist ); 1061 spinlock_unlock( &mgr->lock );1098 busylock_release( &mgr->lock ); 1062 1099 } 1063 1100 … … 1112 1149 #endif 1113 1150 1114 1115 1116 1151 // check small page 1152 assert( (attr & GPT_SMALL) , 1153 "an user vseg must use small pages" ); 1117 1154 1118 1155 // unmap GPT entry in all GPT copies … … 1121 1158 // handle pending forks counter if 1122 1159 // 1) not identity mapped 1123 // 2) r unning in reference cluster1160 // 2) reference cluster 1124 1161 if( ((vseg->flags & VSEG_IDENT) == 0) && 1125 1162 (GET_CXY( process->ref_xp ) == local_cxy) ) … … 1134 1171 lock_xp = XPTR( page_cxy , &page_ptr->lock ); 1135 1172 1136 // get lock protecting page descriptor1137 remote_spinlock_lock( lock_xp );1138 1139 1173 // get pending forks counter 1140 forks = hal_remote_l w( forks_xp );1174 forks = hal_remote_l32( forks_xp ); 1141 1175 1142 1176 if( forks ) // decrement pending forks counter … … 1157 1191 } 1158 1192 } 1159 1160 // release lock protecting page descriptor1161 remote_spinlock_unlock( lock_xp );1162 1193 } 1163 1194 } … … 1194 1225 1195 1226 // get lock protecting the VSL 1196 remote_rwlock_rd_ lock( lock_xp );1227 remote_rwlock_rd_acquire( lock_xp ); 1197 1228 1198 1229 // scan the list of vsegs in VSL … … 1204 1235 { 1205 1236 // return success 1206 remote_rwlock_rd_ unlock( lock_xp );1237 remote_rwlock_rd_release( lock_xp ); 1207 1238 return vseg; 1208 1239 } … … 1210 1241 1211 1242 // return failure 1212 remote_rwlock_rd_ unlock( lock_xp );1243 remote_rwlock_rd_release( lock_xp ); 1213 1244 return NULL; 1214 1245 … … 1240 1271 1241 1272 // get lock protecting VSL 1242 remote_rwlock_wr_ lock( lock_xp );1273 remote_rwlock_wr_acquire( lock_xp ); 1243 1274 1244 1275 if( (vseg->min > addr_min) || (vseg->max < addr_max) ) // region not included in vseg … … 1301 1332 1302 1333 // release VMM lock 1303 remote_rwlock_wr_ unlock( lock_xp );1334 remote_rwlock_wr_release( lock_xp ); 1304 1335 1305 1336 return error; … … 1348 1379 1349 1380 // register local vseg in local VMM 1350 v seg_attach( &process->vmm , vseg );1381 vmm_vseg_attach( &process->vmm , vseg ); 1351 1382 } 1352 1383 … … 1381 1412 uint32_t flags = vseg->flags; 1382 1413 1383 assert( ( type != VSEG_TYPE_FILE ) , "illegal vseg type\n" ); 1414 // check vseg type 1415 assert( ( type != VSEG_TYPE_FILE ) , "illegal vseg type\n" ); 1384 1416 1385 1417 if( flags & VSEG_DISTRIB ) // distributed => cxy depends on vpn LSB … … 1400 1432 } 1401 1433 page_cxy = ( x << y_width ) + y; 1434 1435 // if ( LOCAL_CLUSTER->valid[x][y] == false ) page_cxy = cluster_random_select(); 1436 1402 1437 } 1403 1438 else // other cases => cxy specified in vseg … … 1457 1492 xptr_t mapper_xp = vseg->mapper_xp; 1458 1493 1459 1460 1494 assert( (mapper_xp != XPTR_NULL), 1495 "mapper not defined for a FILE vseg\n" ); 1461 1496 1462 1497 // get mapper cluster and local pointer … … 1495 1530 xptr_t mapper_xp = vseg->mapper_xp; 1496 1531 1497 1498 1532 assert( (mapper_xp != XPTR_NULL), 1533 "mapper not defined for a CODE or DATA vseg\n" ); 1499 1534 1500 1535 // get mapper cluster and local pointer … … 1513 1548 __FUNCTION__, this, vpn, elf_offset ); 1514 1549 #endif 1515 1516 1517 1550 // compute extended pointer on page base 1518 1551 xptr_t base_xp = ppm_page2base( page_xp ); … … 1529 1562 __FUNCTION__, this, vpn ); 1530 1563 #endif 1531 1532 1533 1564 if( GET_CXY( page_xp ) == local_cxy ) 1534 1565 { … … 1646 1677 error_t error; 1647 1678 1648 thread_t * this = CURRENT_THREAD;1649 1679 1650 1680 #if DEBUG_VMM_GET_PTE 1681 thread_t * this = CURRENT_THREAD; 1651 1682 uint32_t cycle = (uint32_t)hal_get_cycles(); 1652 1683 if( DEBUG_VMM_GET_PTE < cycle ) … … 1663 1694 &vseg ); 1664 1695 1665 1666 1696 // vseg has been checked by the vmm_handle_page_fault() function 1697 assert( (vseg != NULL) , "vseg undefined / vpn %x\n"); 1667 1698 1668 1699 if( cow ) //////////////// copy_on_write request ////////////////////// … … 1675 1706 hal_gpt_get_pte( &vmm->gpt , vpn , &old_attr , &old_ppn ); 1676 1707 1677 1678 1708 assert( (old_attr & GPT_MAPPED), 1709 "PTE unmapped for a COW exception / vpn %x\n" ); 1679 1710 1680 1711 #if( DEBUG_VMM_GET_PTE & 1 ) … … 1693 1724 xptr_t lock_xp = XPTR( page_cxy , &page_ptr->lock ); 1694 1725 1695 // take lock protecting page descriptor1696 remote_ spinlock_lock( lock_xp );1726 // take lock protecting "forks" counter 1727 remote_busylock_acquire( lock_xp ); 1697 1728 1698 1729 // get number of pending forks in page descriptor 1699 uint32_t forks = hal_remote_l w( forks_xp );1730 uint32_t forks = hal_remote_l32( forks_xp ); 1700 1731 1701 1732 if( forks ) // pending fork => allocate a new page, copy old to new … … 1728 1759 } 1729 1760 1730 // release lock protecting page descriptor1731 remote_ spinlock_unlock( lock_xp );1761 // release lock protecting "forks" counter 1762 remote_busylock_release( lock_xp ); 1732 1763 1733 1764 // build new_attr : reset COW and set WRITABLE, … … 1840 1871 type = vseg->type; 1841 1872 1842 // get re ferenceprocess cluster and local pointer1873 // get relevant process cluster and local pointer 1843 1874 // for private vsegs (CODE and DATA type), 1844 // the re ference is the local process descriptor.1875 // the relevant process descriptor is local. 1845 1876 if( (type == VSEG_TYPE_STACK) || (type == VSEG_TYPE_CODE) ) 1846 1877 {
Note: See TracChangeset
for help on using the changeset viewer.