Changeset 623 for trunk/kernel/mm
- Timestamp:
- Mar 6, 2019, 4:37:15 PM (6 years ago)
- Location:
- trunk/kernel/mm
- Files:
-
- 8 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/mm/mapper.c
r614 r623 3 3 * 4 4 * Authors Mohamed Lamine Karaoui (2015) 5 * Alain Greiner (2016,2017,2018 )5 * Alain Greiner (2016,2017,2018,2019) 6 6 * 7 7 * Copyright (c) UPMC Sorbonne Universites … … 261 261 vfs_inode_t * inode = mapper->inode; 262 262 vfs_inode_get_name( XPTR( local_cxy , inode ) , name ); 263 // if( DEBUG_MAPPER_HANDLE_MISS < cycle ) 264 // if( (page_id == 1) && (cycle > 10000000) ) 263 if( DEBUG_MAPPER_HANDLE_MISS < cycle ) 265 264 printk("\n[%s] enter for page %d in <%s> / cycle %d", 266 265 __FUNCTION__, page_id, name, cycle ); … … 322 321 #if DEBUG_MAPPER_HANDLE_MISS 323 322 cycle = (uint32_t)hal_get_cycles(); 324 // if( DEBUG_MAPPER_HANDLE_MISS < cycle ) 325 // if( (page_id == 1) && (cycle > 10000000) ) 323 if( DEBUG_MAPPER_HANDLE_MISS < cycle ) 326 324 printk("\n[%s] exit for page %d in <%s> / ppn %x / cycle %d", 327 325 __FUNCTION__, page_id, name, ppm_page2ppn( *page_xp ), cycle ); … … 442 440 ppm_page_do_dirty( page_xp ); 443 441 hal_copy_from_uspace( map_ptr , buf_ptr , page_count ); 442 443 putb(" in mapper_move_user()" , map_ptr , page_count ); 444 444 445 } 445 446 … … 645 646 646 647 } // end mapper_remote_set_32() 648 649 ///////////////////////////////////////// 650 error_t mapper_sync( mapper_t * mapper ) 651 { 652 page_t * page; // local pointer on current page descriptor 653 xptr_t page_xp; // extended pointer on current page descriptor 654 grdxt_t * rt; // pointer on radix_tree descriptor 655 uint32_t start_key; // start page index in mapper 656 uint32_t found_key; // current page index in mapper 657 error_t error; 658 659 #if DEBUG_MAPPER_SYNC 660 thread_t * this = CURRENT_THREAD; 661 uint32_t cycle = (uint32_t)hal_get_cycles(); 662 char name[CONFIG_VFS_MAX_NAME_LENGTH]; 663 vfs_inode_get_name( XPTR( local_cxy , mapper->inode ) , name ); 664 #endif 665 666 // get pointer on radix tree 667 rt = &mapper->rt; 668 669 // initialise loop variable 670 start_key = 0; 671 672 // scan radix-tree until last page found 673 while( 1 ) 674 { 675 // get page descriptor from radix tree 676 page = (page_t *)grdxt_get_first( rt , start_key , &found_key ); 677 678 if( page == NULL ) break; 679 680 assert( (page->index == found_key ), __FUNCTION__, "wrong page descriptor index" ); 681 assert( (page->order == 0), __FUNCTION__, "mapper page order must be 0" ); 682 683 // build extended pointer on page descriptor 684 page_xp = XPTR( local_cxy , page ); 685 686 // synchronize page if dirty 687 if( (page->flags & PG_DIRTY) != 0 ) 688 { 689 690 #if DEBUG_MAPPER_SYNC 691 if( cycle > DEBUG_MAPPER_SYNC ) 692 printk("\n[%s] thread[%x,%x] synchonise page %d of <%s> to device\n", 693 __FUNCTION__, this->process->pid, this->trdid, page->index, name ); 694 #endif 695 // copy page to file system 696 error = vfs_fs_move_page( page_xp , IOC_WRITE ); 697 698 if( error ) 699 { 700 printk("\n[ERROR] in %s : cannot synchonize dirty page %d\n", 701 __FUNCTION__, page->index ); 702 return -1; 703 } 704 705 // remove page from PPM dirty list 706 ppm_page_undo_dirty( page_xp ); 707 } 708 else 709 { 710 711 #if DEBUG_MAPPER_SYNC 712 if( cycle > DEBUG_MAPPER_SYNC ) 713 printk("\n[%s] thread[%x,%x] skip page %d for <%s>\n", 714 __FUNCTION__, this->process->pid, this->trdid, page->index, name ); 715 #endif 716 } 717 718 // update loop variable 719 start_key = page->index + 1; 720 } // end while 721 722 return 0; 723 724 } // end mapper_sync() 647 725 648 726 ////////////////////////////////////////////////// -
trunk/kernel/mm/mapper.h
r614 r623 3 3 * 4 4 * Authors Mohamed Lamine Karaoui (2015) 5 * Alain Greiner (2016,2017,2018 )5 * Alain Greiner (2016,2017,2018,2019) 6 6 * 7 7 * Copyright (c) UPMC Sorbonne Universites … … 48 48 * "readers", and only one "writer". 49 49 * - A "reader" thread, calling the mapper_remote_get_page() function to get a page 50 * descriptor pointer from the page index in file, can be r emote (running in any cluster).50 * descriptor pointer from the page index in file, can be running in any cluster. 51 51 * - A "writer" thread, calling the mapper_handle_miss() function to handle a page miss 52 52 * must be local (running in the mapper cluster). 53 * - The vfs_ mapper_move_page() function access the file system to handle a mapper miss,53 * - The vfs_fs_move_page() function access the file system to handle a mapper miss, 54 54 * or update a dirty page on device. 55 55 * - The vfs_mapper_load_all() functions is used to load all pages of a directory … … 63 63 * 64 64 * TODO : the mapper being only used to implement the VFS cache(s), the mapper.c 65 * and mapper.h file should be trandfered to the vfs directory.65 * and mapper.h file should be trandfered to the fs directory. 66 66 ******************************************************************************************/ 67 67 … … 230 230 231 231 /******************************************************************************************* 232 * This scans all pages present in the mapper identified by the <mapper> argument, 233 * and synchronize all pages maked as dirty" on disk. 234 * These pages are unmarked and removed from the local PPM dirty_list. 235 * This function must be called by a local thread running in same cluster as the mapper. 236 * A remote thread must call the RPC_MAPPER_SYNC function. 237 ******************************************************************************************* 238 * @ mapper : [in] local pointer on local mapper. 239 * @ returns 0 if success / return -1 if error. 240 ******************************************************************************************/ 241 error_t mapper_sync( mapper_t * mapper ); 242 243 /******************************************************************************************* 232 244 * This debug function displays the content of a given page of a given mapper. 233 245 * - the mapper is identified by the <mapper_xp> argument. -
trunk/kernel/mm/page.h
r612 r623 41 41 #define PG_INIT 0x0001 // page descriptor has been initialised 42 42 #define PG_RESERVED 0x0002 // cannot be allocated by PPM 43 #define PG_FREE 0x0004 // page can beallocated by PPM43 #define PG_FREE 0x0004 // page not yet allocated by PPM 44 44 #define PG_DIRTY 0x0040 // page has been written 45 45 #define PG_COW 0x0080 // page is copy-on-write -
trunk/kernel/mm/ppm.h
r611 r623 3 3 * 4 4 * Authors Ghassan Almaless (2008,2009,2010,2011,2012) 5 * Alain Greiner (2016,2017,2018 )5 * Alain Greiner (2016,2017,2018,2019) 6 6 * 7 7 * Copyright (c) UPMC Sorbonne Universites … … 37 37 * This structure defines the Physical Pages Manager in a cluster. 38 38 * In each cluster, the physical memory bank starts at local physical address 0 and 39 * contains an integer number of pages, defined by the <pages_nr> field in the39 * contains an integer number of small pages, defined by the <pages_nr> field in the 40 40 * boot_info structure. It is split in three parts: 41 41 * 42 42 * - the "kernel_code" section contains the kernel code, loaded by the boot-loader. 43 * It starts at PPN = 0 and the size is defined by the <pages_offset> field in the44 * boot_info structure.45 * - the "pages_tbl" section contains the physical page descriptors array. It starts46 * at PPN = pages_offset, and it contains one entry per small physical page in cluster.43 * It starts at local PPN = 0 and the size is defined by the <pages_offset> field 44 * in the boot_info structure. 45 * - the local "pages_tbl" section contains the physical page descriptors array. 46 * It starts at local PPN = pages_offset, and it contains one entry per small page. 47 47 * It is created and initialized by the hal_ppm_create() function. 48 48 * - The "kernel_heap" section contains all physical pages that are are not in the 49 * kernel_code and pages_tbl sections, and that have not been reserved by the 50 * architecture specific bootloader. The reserved pages are defined in the boot_info 51 * structure. 49 * "kernel_code" and "pages_tbl" sections, and that have not been reserved. 50 * The reserved pages are defined in the boot_info structure. 52 51 * 53 52 * The main service provided by the PMM is the dynamic allocation of physical pages … … 60 59 * 61 60 * Another service is to register the dirty pages in a specific dirty_list, that is 62 * also rooted in the PPM, in order to be able to s ave all dirty pages on disk.61 * also rooted in the PPM, in order to be able to synchronize all dirty pages on disk. 63 62 * This dirty list is protected by a specific remote_queuelock, because it can be 64 63 * modified by a remote thread, but it contains only local pages. … … 198 197 * . if page already dirty => do nothing 199 198 * . it page not dirty => set the PG_DIRTY flag and register page in PPM dirty list. 200 * - it releases the busylock prot cting the page flags.199 * - it releases the busylock protecting the page flags. 201 200 * - it releases the queuelock protecting the PPM dirty_list. 202 201 ***************************************************************************************** … … 214 213 * . if page not dirty => do nothing 215 214 * . it page dirty => reset the PG_DIRTY flag and remove page from PPM dirty list. 216 * - it releases the busylock prot cting the page flags.215 * - it releases the busylock protecting the page flags. 217 216 * - it releases the queuelock protecting the PPM dirty_list. 218 217 ***************************************************************************************** -
trunk/kernel/mm/vmm.c
r621 r623 59 59 { 60 60 error_t error; 61 vseg_t * vseg_kentry;62 61 vseg_t * vseg_args; 63 62 vseg_t * vseg_envs; … … 91 90 (CONFIG_VMM_VSPACE_SIZE - CONFIG_VMM_STACK_BASE)) , 92 91 "STACK zone too small\n"); 93 94 // register kentry vseg in VSL95 base = CONFIG_VMM_KENTRY_BASE << CONFIG_PPM_PAGE_SHIFT;96 size = CONFIG_VMM_KENTRY_SIZE << CONFIG_PPM_PAGE_SHIFT;97 98 vseg_kentry = vmm_create_vseg( process,99 VSEG_TYPE_CODE,100 base,101 size,102 0, // file_offset unused103 0, // file_size unused104 XPTR_NULL, // mapper_xp unused105 local_cxy );106 107 if( vseg_kentry == NULL )108 {109 printk("\n[ERROR] in %s : cannot register kentry vseg\n", __FUNCTION__ );110 return -1;111 }112 113 vmm->kent_vpn_base = base;114 92 115 93 // register args vseg in VSL … … 162 140 163 141 if( error ) 164 printk("\n[ERROR] in %s : cannot create GPT\n", __FUNCTION__ ); 142 { 143 printk("\n[ERROR] in %s : cannot create GPT\n", __FUNCTION__ ); 144 return -1; 145 } 165 146 166 147 // initialize GPT lock 167 148 remote_rwlock_init( XPTR( local_cxy , &vmm->gpt_lock ) , LOCK_VMM_GPT ); 168 149 169 // architecture specic GPT initialisation 170 // (For TSAR, identity map the kentry_vseg) 171 error = hal_vmm_init( vmm ); 172 173 if( error ) 174 printk("\n[ERROR] in %s : cannot initialize GPT\n", __FUNCTION__ ); 150 // update process VMM with kernel vsegs 151 error = hal_vmm_kernel_update( process ); 152 153 if( error ) 154 { 155 printk("\n[ERROR] in %s : cannot update GPT for kernel vsegs\n", __FUNCTION__ ); 156 return -1; 157 } 175 158 176 159 // initialize STACK allocator … … 326 309 } 327 310 328 // release physical memory allocated for vseg descriptor if no MMAP type 329 if( (type != VSEG_TYPE_ANON) && (type != VSEG_TYPE_FILE) && (type != VSEG_TYPE_REMOTE) ) 311 // release physical memory allocated for vseg if no MMAP and no kernel type 312 if( (type != VSEG_TYPE_ANON) && (type != VSEG_TYPE_FILE) && (type != VSEG_TYPE_REMOTE) && 313 (type != VSEG_TYPE_KCODE) && (type != VSEG_TYPE_KDATA) && (type != VSEG_TYPE_KDEV) ) 330 314 { 331 315 vseg_free( vseg ); … … 606 590 child_vmm->vsegs_nr = 0; 607 591 608 // create child GPT592 // create the child GPT 609 593 error = hal_gpt_create( &child_vmm->gpt ); 610 594 … … 639 623 #endif 640 624 641 // all parent vsegs - but STACK - must be copied in child VSL 642 if( type != VSEG_TYPE_STACK ) 625 // all parent vsegs - but STACK and kernel vsegs - must be copied in child VSL 626 if( (type != VSEG_TYPE_STACK) && (type != VSEG_TYPE_KCODE) && 627 (type != VSEG_TYPE_KDATA) && (type != VSEG_TYPE_KDEV) ) 643 628 { 644 629 // allocate memory for a new child vseg … … 726 711 remote_rwlock_rd_release( parent_lock_xp ); 727 712 728 // initialize child GPT (architecture specic) 729 // => For TSAR, identity map the kentry_vseg 730 error = hal_vmm_init( child_vmm ); 713 // update child VMM with kernel vsegs 714 error = hal_vmm_kernel_update( child_process ); 731 715 732 716 if( error ) 733 717 { 734 printk("\n[ERROR] in %s : cannot create GPT\n", __FUNCTION__ );718 printk("\n[ERROR] in %s : cannot update child VMM\n", __FUNCTION__ ); 735 719 return -1; 736 720 } … … 1098 1082 base = vpn_base << CONFIG_PPM_PAGE_SHIFT; 1099 1083 } 1100 else // VSEG_TYPE_DATA or VSEG_TYPE_CODE1084 else // VSEG_TYPE_DATA, VSEG_TYPE_CODE or KERNEL vseg 1101 1085 { 1102 1086 uint32_t vpn_min = base >> CONFIG_PPM_PAGE_SHIFT; … … 1178 1162 xptr_t lock_xp; // extended pointer on lock protecting forks counter 1179 1163 uint32_t forks; // actual number of pendinf forks 1164 uint32_t type; // vseg type 1180 1165 1181 1166 #if DEBUG_VMM_DELETE_VSEG … … 1190 1175 process = cluster_get_local_process_from_pid( pid ); 1191 1176 1192 if( process == NULL ) return; 1177 if( process == NULL ) 1178 { 1179 printk("\n[ERRORR] in %s : cannot get local process descriptor\n", 1180 __FUNCTION__ ); 1181 return; 1182 } 1193 1183 1194 1184 // get pointers on local process VMM an GPT … … 1199 1189 vseg = vmm_vseg_from_vaddr( vmm , vaddr ); 1200 1190 1201 if( vseg == NULL ) return; 1202 1203 // loop to invalidate all vseg PTEs in GPT 1191 if( vseg == NULL ) 1192 { 1193 printk("\n[ERRORR] in %s : cannot get vseg descriptor\n", 1194 __FUNCTION__ ); 1195 return; 1196 } 1197 1198 // get relevant vseg infos 1199 type = vseg->type; 1204 1200 vpn_min = vseg->vpn_base; 1205 1201 vpn_max = vpn_min + vseg->vpn_size; 1202 1203 // loop to invalidate all vseg PTEs in GPT 1206 1204 for( vpn = vpn_min ; vpn < vpn_max ; vpn++ ) 1207 1205 { … … 1216 1214 printk("- unmap vpn %x / ppn %x / vseg %s \n" , vpn , ppn, vseg_type_str(vseg->type) ); 1217 1215 #endif 1218 1219 // check small page1220 assert( (attr & GPT_SMALL) , "an user vseg must use small pages" );1221 1222 1216 // unmap GPT entry in local GPT 1223 1217 hal_gpt_reset_pte( gpt , vpn ); 1224 1218 1225 // handle pending forks counter if 1226 // 1) not identity mapped 1227 // 2) reference cluster 1228 if( ((vseg->flags & VSEG_IDENT) == 0) && 1229 (GET_CXY( process->ref_xp ) == local_cxy) ) 1219 // the allocated page is not released to KMEM for kernel vseg 1220 if( (type != VSEG_TYPE_KCODE) && 1221 (type != VSEG_TYPE_KDATA) && 1222 (type != VSEG_TYPE_KDEV ) ) 1230 1223 { 1224 1225 // FIXME This code must be completely re-written, as the actual release must depend on 1226 // - the vseg type 1227 // - the reference cluster 1228 // - the page refcount and/or the forks counter 1229 1231 1230 // get extended pointer on physical page descriptor 1232 1231 page_xp = ppm_ppn2page( ppn ); … … 1238 1237 lock_xp = XPTR( page_cxy , &page_ptr->lock ); 1239 1238 1239 // get the lock protecting the page 1240 1240 remote_busylock_acquire( lock_xp ); 1241 1241 1242 // get pending forks counter 1242 1243 forks = hal_remote_l32( forks_xp ); 1244 1243 1245 if( forks ) // decrement pending forks counter 1244 1246 { … … 1263 1265 #endif 1264 1266 } 1267 1268 // release the lock protecting the page 1265 1269 remote_busylock_release( lock_xp ); 1266 1270 } … … 1311 1315 // return failure 1312 1316 remote_rwlock_rd_release( lock_xp ); 1317 1313 1318 return NULL; 1314 1319 … … 1325 1330 vpn_t vpn_max; 1326 1331 1332 #if DEBUG_VMM_RESIZE_VSEG 1333 uint32_t cycle = (uint32_t)hal_get_cycles(); 1334 thread_t * this = CURRENT_THREAD; 1335 if( DEBUG_VMM_RESIZE_VSEG < cycle ) 1336 printk("\n[%s] thread[%x,%x] enter / process %x / base %x / size %d / cycle %d\n", 1337 __FUNCTION__, this->process->pid, this->trdid, process->pid, base, size, cycle ); 1338 #endif 1339 1327 1340 // get pointer on process VMM 1328 1341 vmm_t * vmm = &process->vmm; … … 1334 1347 vseg_t * vseg = vmm_vseg_from_vaddr( vmm , base ); 1335 1348 1336 if( vseg == NULL) return EINVAL;1337 1338 // get extended pointer on VSL lock1339 xptr_t lock_xp = XPTR( local_cxy , &vmm->vsegs_lock);1340 1341 // get lock protecting VSL1342 remote_rwlock_wr_acquire( lock_xp ); 1343 1349 if( vseg == NULL) 1350 { 1351 printk("\n[ERROR] in %s : vseg(%x,%d) not found\n", 1352 __FUNCTION__, base , size ); 1353 return -1; 1354 } 1355 1356 // resize depends on unmapped region base and size 1344 1357 if( (vseg->min > addr_min) || (vseg->max < addr_max) ) // not included in vseg 1345 1358 { 1359 printk("\n[ERROR] in %s : unmapped region[%x->%x[ not included in vseg[%x->%x[\n", 1360 __FUNCTION__, addr_min, addr_max, vseg->min, vseg->max ); 1361 1346 1362 error = -1; 1347 1363 } 1348 1364 else if( (vseg->min == addr_min) && (vseg->max == addr_max) ) // vseg must be deleted 1349 1365 { 1366 1367 #if( DEBUG_VMM_RESIZE_VSEG & 1 ) 1368 if( DEBUG_VMM_RESIZE_VSEG < cycle ) 1369 printk("\n[%s] unmapped region[%x->%x[ equal vseg[%x->%x[\n", 1370 __FUNCTION__, addr_min, addr_max, vseg->min, vseg->max ); 1371 #endif 1350 1372 vmm_delete_vseg( process->pid , vseg->min ); 1373 1374 #if( DEBUG_VMM_RESIZE_VSEG & 1 ) 1375 if( DEBUG_VMM_RESIZE_VSEG < cycle ) 1376 printk("\n[%s] thread[%x,%x] deleted vseg\n", 1377 __FUNCTION__, this->process->pid, this->trdid ); 1378 #endif 1351 1379 error = 0; 1352 1380 } 1353 1381 else if( vseg->min == addr_min ) // vseg must be resized 1354 1382 { 1355 // update vseg base address 1383 1384 #if( DEBUG_VMM_RESIZE_VSEG & 1 ) 1385 if( DEBUG_VMM_RESIZE_VSEG < cycle ) 1386 printk("\n[%s] unmapped region[%x->%x[ included in vseg[%x->%x[\n", 1387 __FUNCTION__, addr_min, addr_max, vseg->min, vseg->max ); 1388 #endif 1389 // update vseg min address 1356 1390 vseg->min = addr_max; 1357 1391 … … 1361 1395 vseg->vpn_base = vpn_min; 1362 1396 vseg->vpn_size = vpn_max - vpn_min + 1; 1397 1398 #if( DEBUG_VMM_RESIZE_VSEG & 1 ) 1399 if( DEBUG_VMM_RESIZE_VSEG < cycle ) 1400 printk("\n[%s] thread[%x,%x] changed vseg_min\n", 1401 __FUNCTION__, this->process->pid, this->trdid ); 1402 #endif 1363 1403 error = 0; 1364 1404 } 1365 1405 else if( vseg->max == addr_max ) // vseg must be resized 1366 1406 { 1407 1408 #if( DEBUG_VMM_RESIZE_VSEG & 1 ) 1409 if( DEBUG_VMM_RESIZE_VSEG < cycle ) 1410 printk("\n[%s] unmapped region[%x->%x[ included in vseg[%x->%x[\n", 1411 __FUNCTION__, addr_min, addr_max, vseg->min, vseg->max ); 1412 #endif 1367 1413 // update vseg max address 1368 1414 vseg->max = addr_min; … … 1373 1419 vseg->vpn_base = vpn_min; 1374 1420 vseg->vpn_size = vpn_max - vpn_min + 1; 1421 1422 #if( DEBUG_VMM_RESIZE_VSEG & 1 ) 1423 if( DEBUG_VMM_RESIZE_VSEG < cycle ) 1424 printk("\n[%s] thread[%x,%x] changed vseg_max\n", 1425 __FUNCTION__, this->process->pid, this->trdid ); 1426 #endif 1375 1427 error = 0; 1428 1376 1429 } 1377 1430 else // vseg cut in three regions 1378 1431 { 1432 1433 #if( DEBUG_VMM_RESIZE_VSEG & 1 ) 1434 if( DEBUG_VMM_RESIZE_VSEG < cycle ) 1435 printk("\n[%s] unmapped region[%x->%x[ included in vseg[%x->%x[\n", 1436 __FUNCTION__, addr_min, addr_max, vseg->min, vseg->max ); 1437 #endif 1379 1438 // resize existing vseg 1380 1439 vseg->max = addr_min; … … 1396 1455 vseg->cxy ); 1397 1456 1398 if( new == NULL ) error = EINVAL; 1457 #if( DEBUG_VMM_RESIZE_VSEG & 1 ) 1458 if( DEBUG_VMM_RESIZE_VSEG < cycle ) 1459 printk("\n[%s] thread[%x,%x] replaced vseg by two smal vsegs\n", 1460 __FUNCTION__, this->process->pid, this->trdid ); 1461 #endif 1462 1463 if( new == NULL ) error = -1; 1399 1464 else error = 0; 1400 1465 } 1401 1466 1402 // release VMM lock 1403 remote_rwlock_wr_release( lock_xp ); 1467 #if DEBUG_VMM_RESIZE_VSEG 1468 if( DEBUG_VMM_RESIZE_VSEG < cycle ) 1469 printk("\n[%s] thread[%x,%x] exit / process %x / base %x / size %d / cycle %d\n", 1470 __FUNCTION__, this->process->pid, this->trdid, process->pid, base, size, cycle ); 1471 #endif 1404 1472 1405 1473 return error; -
trunk/kernel/mm/vmm.h
r614 r623 4 4 * Authors Ghassan Almaless (2008,2009,2010,2011, 2012) 5 5 * Mohamed Lamine Karaoui (2015) 6 * Alain Greiner (2016,2017,2018 )6 * Alain Greiner (2016,2017,2018,2019) 7 7 * 8 8 * Copyright (c) UPMC Sorbonne Universites -
trunk/kernel/mm/vseg.c
r595 r623 4 4 * Authors Ghassan Almaless (2008,2009,2010,2011, 2012) 5 5 * Mohamed Lamine Karaoui (2015) 6 * Alain Greiner (2016,201 8,2019)6 * Alain Greiner (2016,2017,2018,2019) 7 7 * 8 8 * Copyright (c) UPMC Sorbonne Universites … … 55 55 else if( vseg_type == VSEG_TYPE_FILE ) return "FILE"; 56 56 else if( vseg_type == VSEG_TYPE_REMOTE ) return "REMO"; 57 else if( vseg_type == VSEG_TYPE_KCODE ) return "KCOD"; 58 else if( vseg_type == VSEG_TYPE_KDATA ) return "KDAT"; 59 else if( vseg_type == VSEG_TYPE_KDEV ) return "KDEV"; 57 60 else return "undefined"; 58 61 } … … 142 145 VSEG_CACHE ; 143 146 } 147 else if( type == VSEG_TYPE_KCODE ) 148 { 149 vseg->flags = VSEG_EXEC | 150 VSEG_CACHE | 151 VSEG_PRIVATE ; 152 } 153 else if( type == VSEG_TYPE_KDATA ) 154 { 155 vseg->flags = VSEG_CACHE | 156 VSEG_WRITE ; 157 } 158 else if( type == VSEG_TYPE_KDEV ) 159 { 160 vseg->flags = VSEG_WRITE ; 161 } 144 162 else 145 163 { … … 158 176 159 177 // initialize vseg with remote_read access 160 vseg->type = hal_remote_l32 178 vseg->type = hal_remote_l32( XPTR( cxy , &ptr->type ) ); 161 179 vseg->min = (intptr_t)hal_remote_lpt( XPTR( cxy , &ptr->min ) ); 162 180 vseg->max = (intptr_t)hal_remote_lpt( XPTR( cxy , &ptr->max ) ); 163 vseg->vpn_base = hal_remote_l32 164 vseg->vpn_size = hal_remote_l32 165 vseg->flags = hal_remote_l32 166 vseg->file_offset = hal_remote_l32 167 vseg->file_size = hal_remote_l32 181 vseg->vpn_base = hal_remote_l32( XPTR( cxy , &ptr->vpn_base ) ); 182 vseg->vpn_size = hal_remote_l32( XPTR( cxy , &ptr->vpn_size ) ); 183 vseg->flags = hal_remote_l32( XPTR( cxy , &ptr->flags ) ); 184 vseg->file_offset = hal_remote_l32( XPTR( cxy , &ptr->file_offset ) ); 185 vseg->file_size = hal_remote_l32( XPTR( cxy , &ptr->file_size ) ); 168 186 vseg->mapper_xp = (xptr_t) hal_remote_l64( XPTR( cxy , &ptr->mapper_xp ) ); 169 187 170 188 switch (vseg->type) 171 189 { 172 case VSEG_TYPE_DATA: 190 case VSEG_TYPE_DATA: // unused 173 191 { 174 192 vseg->cxy = 0xffff; 175 193 break; 176 194 } 177 case VSEG_TYPE_CODE: 195 case VSEG_TYPE_CODE: // always local 178 196 case VSEG_TYPE_STACK: 197 case VSEG_TYPE_KCODE: 179 198 { 180 199 vseg->cxy = local_cxy; 181 200 break; 182 201 } 183 case VSEG_TYPE_ANON: 202 case VSEG_TYPE_ANON: // intrinsic 184 203 case VSEG_TYPE_FILE: 185 204 case VSEG_TYPE_REMOTE: 205 case VSEG_TYPE_KDEV: 206 case VSEG_TYPE_KDATA: 186 207 { 187 208 vseg->cxy = (cxy_t) hal_remote_l32( XPTR(cxy, &ptr->cxy) ); -
trunk/kernel/mm/vseg.h
r611 r623 4 4 * Authors Ghassan Almaless (2008,2009,2010,2011, 2012) 5 5 * Mohamed Lamine Karaoui (2015) 6 * Alain Greiner (2016 )6 * Alain Greiner (2016,2017,2018,2019) 7 7 * 8 8 * Copyright (c) UPMC Sorbonne Universites … … 35 35 36 36 /******************************************************************************************* 37 * This enum defines the vseg types for an user process. 37 * This enum defines the vseg types. 38 * Note : the KDATA and KDEV types are not used by the TSAR HAL, because the accesses 39 * to kernel data or kernel devices are done through the DATA extension address 40 * register, but these types are probably required by the I86 HAL [AG]. 38 41 ******************************************************************************************/ 39 42 40 43 typedef enum 41 44 { 42 VSEG_TYPE_CODE = 0, /*! executable user code / private / localized */ 43 VSEG_TYPE_DATA = 1, /*! initialized user data / public / distributed */ 44 VSEG_TYPE_STACK = 2, /*! execution user stack / private / localized */ 45 VSEG_TYPE_ANON = 3, /*! anonymous mmap / public / localized */ 46 VSEG_TYPE_FILE = 4, /*! file mmap / public / localized */ 47 VSEG_TYPE_REMOTE = 5, /*! remote mmap / public / localized */ 45 VSEG_TYPE_CODE = 0, /*! executable user code / private / localized */ 46 VSEG_TYPE_DATA = 1, /*! initialized user data / public / distributed */ 47 VSEG_TYPE_STACK = 2, /*! execution user stack / private / localized */ 48 VSEG_TYPE_ANON = 3, /*! anonymous mmap / public / localized */ 49 VSEG_TYPE_FILE = 4, /*! file mmap / public / localized */ 50 VSEG_TYPE_REMOTE = 5, /*! remote mmap / public / localized */ 51 52 VSEG_TYPE_KCODE = 6, /*! executable kernel code / private / localized */ 53 VSEG_TYPE_KDATA = 7, /*! initialized kernel data / private / localized */ 54 VSEG_TYPE_KDEV = 8, /*! kernel peripheral device / public / localized */ 48 55 } 49 56 vseg_type_t; … … 60 67 #define VSEG_PRIVATE 0x0010 /*! should not be accessed from another cluster */ 61 68 #define VSEG_DISTRIB 0x0020 /*! physically distributed on all clusters */ 62 #define VSEG_IDENT 0x0040 /*! identity mapping */63 69 64 70 /*******************************************************************************************
Note: See TracChangeset
for help on using the changeset viewer.