Changeset 635 for trunk/kernel
- Timestamp:
- Jun 26, 2019, 11:42:37 AM (5 years ago)
- Location:
- trunk/kernel
- Files:
-
- 67 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/fs/devfs.c
r624 r635 58 58 kmem_req_t req; 59 59 60 req.type = KMEM_ DEVFS_CTX;61 req. size = sizeof(devfs_ctx_t);60 req.type = KMEM_KCM; 61 req.order = bits_log2( sizeof(devfs_ctx_t) ); 62 62 req.flags = AF_KERNEL | AF_ZERO; 63 63 64 return (devfs_ctx_t *)kmem_alloc( &req );64 return kmem_alloc( &req ); 65 65 } 66 66 … … 81 81 kmem_req_t req; 82 82 83 req.type = KMEM_ DEVFS_CTX;83 req.type = KMEM_KCM; 84 84 req.ptr = devfs_ctx; 85 85 kmem_free( &req ); -
trunk/kernel/fs/fatfs.c
r633 r635 1069 1069 { 1070 1070 kmem_req_t req; 1071 req.type = KMEM_ FATFS_CTX;1072 req. size = sizeof(fatfs_ctx_t);1071 req.type = KMEM_KCM; 1072 req.order = bits_log2( sizeof(fatfs_ctx_t) ); 1073 1073 req.flags = AF_KERNEL | AF_ZERO; 1074 1074 1075 return (fatfs_ctx_t *)kmem_alloc( &req );1075 return kmem_alloc( &req ); 1076 1076 } 1077 1077 … … 1101 1101 // - temporarily the BOOT sector 1102 1102 // - permanently the FS_INFO sector 1103 req.type = KMEM_512_BYTES; 1103 req.type = KMEM_KCM; 1104 req.order = 9; // 512 bytes 1104 1105 req.flags = AF_KERNEL | AF_ZERO; 1105 buffer = (uint8_t *)kmem_alloc( &req ); 1106 buffer_xp = XPTR( local_cxy , buffer ); 1106 buffer = kmem_alloc( &req ); 1107 1107 1108 1108 if( buffer == NULL ) … … 1112 1112 } 1113 1113 1114 buffer_xp = XPTR( local_cxy , buffer ); 1115 1114 1116 // load the BOOT record from device 1115 1117 error = dev_ioc_sync_read( buffer_xp , 0 , 1 ); … … 1242 1244 { 1243 1245 kmem_req_t req; 1244 req.type = KMEM_ FATFS_CTX;1246 req.type = KMEM_KCM; 1245 1247 req.ptr = fatfs_ctx; 1246 1248 kmem_free( &req ); -
trunk/kernel/fs/vfs.c
r634 r635 150 150 mapper_t * mapper; // associated mapper( to be allocated) 151 151 vfs_inode_t * inode; // inode descriptor (to be allocated) 152 152 153 uint32_t inum; // inode identifier (to be allocated) 153 154 vfs_ctx_t * ctx; // file system context … … 155 156 error_t error; 156 157 157 #if DEBUG_VFS_INODE_CREATE158 char name[CONFIG_VFS_MAX_NAME_LENGTH];159 uint32_t cycle = (uint32_t)hal_get_cycles();160 cxy_t dentry_cxy = GET_CXY( dentry_xp );161 vfs_dentry_t * dentry_ptr = GET_PTR( dentry_xp );162 thread_t * this = CURRENT_THREAD;163 if( dentry_xp != XPTR_NULL ) hal_remote_strcpy( XPTR( local_cxy , name ),164 XPTR( dentry_cxy , dentry_ptr->name ) );165 else strcpy( name , "/" );166 if( DEBUG_VFS_INODE_CREATE < cycle )167 printk("\n[%s] thread[%x,%x] enter for <%s> / cycle %d\n",168 __FUNCTION__, this->process->pid, this->trdid, name, cycle );169 #endif170 171 158 // check fs type and get pointer on context 172 159 if ( fs_type == FS_TYPE_FATFS ) ctx = &fs_context[FS_TYPE_FATFS]; … … 198 185 } 199 186 200 // allocate memory for VFS inode descriptor 201 req.type = KMEM_VFS_INODE; 202 req.size = sizeof(vfs_inode_t); 187 // check inode descriptor contained in one page 188 assert( (sizeof(vfs_inode_t) <= CONFIG_PPM_PAGE_SIZE), 189 "inode descriptor must fit in one page" ); 190 191 // allocate one page for VFS inode descriptor 192 // because the embedded "children xhtab footprint 193 req.type = KMEM_PPM; 194 req.order = 0; 203 195 req.flags = AF_KERNEL | AF_ZERO; 204 inode = (vfs_inode_t *)kmem_alloc( &req );196 inode = kmem_alloc( &req ); 205 197 206 198 if( inode == NULL ) … … 243 235 244 236 #if DEBUG_VFS_INODE_CREATE 245 cycle = (uint32_t)hal_get_cycles(); 237 char name[CONFIG_VFS_MAX_NAME_LENGTH]; 238 uint32_t cycle = (uint32_t)hal_get_cycles(); 239 thread_t * this = CURRENT_THREAD; 240 vfs_inode_get_name( *inode_xp , name ); 246 241 if( DEBUG_VFS_INODE_CREATE < cycle ) 247 printk("\n[%s] thread[%x,%x] exit for<%s> / inode [%x,%x] / cycle %d\n",242 printk("\n[%s] thread[%x,%x] created <%s> / inode [%x,%x] / cycle %d\n", 248 243 __FUNCTION__, this->process->pid, this->trdid, name, local_cxy, inode, cycle ); 249 244 #endif … … 261 256 // release memory allocate for inode descriptor 262 257 kmem_req_t req; 258 req.type = KMEM_PPM; 263 259 req.ptr = inode; 264 req.type = KMEM_VFS_INODE;265 260 kmem_free( &req ); 266 261 … … 477 472 kmem_req_t req; // request to kernel memory allocator 478 473 479 #if DEBUG_VFS_DENTRY_CREATE480 thread_t * this = CURRENT_THREAD;481 uint32_t cycle = (uint32_t)hal_get_cycles();482 if( DEBUG_VFS_DENTRY_CREATE < cycle )483 printk("\n[%s] thread[%x,%x] enter for <%s> / cycle %d\n",484 __FUNCTION__, this->process->pid, this->trdid, name, cycle );485 #endif486 487 474 // get pointer on context 488 475 if ( fs_type == FS_TYPE_FATFS ) ctx = &fs_context[FS_TYPE_FATFS]; … … 501 488 502 489 // allocate memory for dentry descriptor 503 req.type = KMEM_ VFS_DENTRY;504 req. size = sizeof(vfs_dentry_t);490 req.type = KMEM_KCM; 491 req.order = bits_log2( sizeof(vfs_dentry_t) ); 505 492 req.flags = AF_KERNEL | AF_ZERO; 506 dentry = (vfs_dentry_t *)kmem_alloc( &req );493 dentry = kmem_alloc( &req ); 507 494 508 495 if( dentry == NULL ) … … 523 510 524 511 #if DEBUG_VFS_DENTRY_CREATE 525 cycle = (uint32_t)hal_get_cycles(); 512 thread_t * this = CURRENT_THREAD; 513 uint32_t cycle = (uint32_t)hal_get_cycles(); 526 514 if( DEBUG_VFS_DENTRY_CREATE < cycle ) 527 printk("\n[%s] thread[%x,%x] exit for<%s> / dentry [%x,%x] / cycle %d\n",515 printk("\n[%s] thread[%x,%x] created <%s> / dentry [%x,%x] / cycle %d\n", 528 516 __FUNCTION__, this->process->pid, this->trdid, name, local_cxy, dentry, cycle ); 529 517 #endif … … 538 526 // release memory allocated to dentry 539 527 kmem_req_t req; 528 req.type = KMEM_KCM; 540 529 req.ptr = dentry; 541 req.type = KMEM_VFS_DENTRY;542 530 kmem_free( &req ); 543 531 … … 566 554 567 555 // allocate memory for new file descriptor 568 req.type = KMEM_ VFS_FILE;569 req. size = sizeof(vfs_file_t);556 req.type = KMEM_KCM; 557 req.order = bits_log2( sizeof(vfs_file_t) ); 570 558 req.flags = AF_KERNEL | AF_ZERO; 571 file = (vfs_file_t *)kmem_alloc( &req );559 file = kmem_alloc( &req ); 572 560 573 561 if( file == NULL ) return ENOMEM; … … 602 590 { 603 591 kmem_req_t req; 592 req.type = KMEM_KCM; 604 593 req.ptr = file; 605 req.type = KMEM_VFS_FILE;606 594 kmem_free( &req ); 607 595 … … 3347 3335 #endif 3348 3336 3337 3349 3338 // 3. register new_dentry in new_inode xlist of parents 3350 3339 parents_root_xp = XPTR( child_cxy , &new_inode_ptr->parents ); -
trunk/kernel/fs/vfs.h
r633 r635 306 306 /****************************************************************************************** 307 307 * This function allocates memory from local cluster for an inode descriptor and the 308 * associated mapper. It initialise these descriptors from arguments values. 308 * associated mapper, and partially initialise this inode from arguments values. 309 * It does NOT link it to the Inode Tree, as this is done by add_child_in_parent(). 309 310 * It must called by a local thread. Use the RPC_INODE_CREATE if client thread is remote. 310 311 ****************************************************************************************** -
trunk/kernel/kern/chdev.c
r625 r635 90 90 91 91 // allocate memory for chdev 92 req.type = KMEM_DEVICE; 93 req.flags = AF_ZERO; 94 chdev = (chdev_t *)kmem_alloc( &req ); 92 req.type = KMEM_KCM; 93 req.order = bits_log2( sizeof(chdev_t) ); 94 req.flags = AF_ZERO | AF_KERNEL; 95 chdev = kmem_alloc( &req ); 95 96 96 97 if( chdev == NULL ) return NULL; -
trunk/kernel/kern/cluster.c
r627 r635 4 4 * Author Ghassan Almaless (2008,2009,2010,2011,2012) 5 5 * Mohamed Lamine Karaoui (2015) 6 * Alain Greiner (2016,2017,2018 )6 * Alain Greiner (2016,2017,2018,2019) 7 7 * 8 8 * Copyright (c) UPMC Sorbonne Universites … … 109 109 cluster_t * cluster = LOCAL_CLUSTER; 110 110 111 // initialize the lock protecting the embedded kcm allocator112 busylock_init( &cluster->kcm_lock , LOCK_CLUSTER_KCM );113 114 111 #if DEBUG_CLUSTER_INIT 115 112 uint32_t cycle = (uint32_t)hal_get_cycles(); … … 148 145 149 146 // initialises embedded KCM 150 kcm_init( &cluster->kcm , KMEM_KCM ); 147 uint32_t i; 148 for( i = 0 ; i < 6 ; i++ ) kcm_init( &cluster->kcm[i] , i+6 ); 151 149 152 150 #if( DEBUG_CLUSTER_INIT & 1 ) 153 151 cycle = (uint32_t)hal_get_cycles(); 154 152 if( DEBUG_CLUSTER_INIT < cycle ) 155 printk("\n[%s] KCM initialized in cluster %x at cycle %d\n",153 printk("\n[%s] KCM[6:11] initialized in cluster %x at cycle %d\n", 156 154 __FUNCTION__ , local_cxy , hal_get_cycles() ); 157 155 #endif -
trunk/kernel/kern/cluster.h
r611 r635 126 126 ppm_t ppm; /*! embedded kernel page manager */ 127 127 khm_t khm; /*! embedded kernel heap manager */ 128 kcm_t kcm; /*! embedded kernel KCMs manager */ 129 130 kcm_t * kcm_tbl[KMEM_TYPES_NR]; /*! pointers on allocated KCMs */ 131 busylock_t kcm_lock; /*! protect kcm_tbl[] updates */ 128 kcm_t kcm[6]; /*! embedded kernel cache managers [6:11] */ 132 129 133 130 // RPC -
trunk/kernel/kern/kernel_init.c
r633 r635 251 251 "\n\n\t\t Advanced Locality Management Operating System / Multi Kernel Hybrid\n" 252 252 "\n\n\t\t %s / %d cluster(s) / %d core(s) per cluster\n\n", 253 CONFIG_ ALMOS_VERSION , nclusters , ncores );253 CONFIG_VERSION , nclusters , ncores ); 254 254 } 255 255 … … 1428 1428 } 1429 1429 1430 #if ( DEBUG_KERNEL_INIT & 1 )1430 #if CONFIG_INSTRUMENTATION_FOOTPRINT 1431 1431 if( (core_lid == 0) & (local_cxy == 0) ) 1432 1432 printk("\n\n***** memory fooprint for main kernel objects\n\n" … … 1439 1439 " - rpc fifo : %d bytes\n" 1440 1440 " - page descriptor : %d bytes\n" 1441 " - mapper root : %d bytes\n" 1441 " - mapper descriptor : %d bytes\n" 1442 " - vseg descriptor : %d bytes\n" 1442 1443 " - ppm manager : %d bytes\n" 1443 1444 " - kcm manager : %d bytes\n" … … 1445 1446 " - vmm manager : %d bytes\n" 1446 1447 " - gpt root : %d bytes\n" 1448 " - vfs inode : %d bytes\n" 1449 " - vfs dentry : %d bytes\n" 1450 " - vfs file : %d bytes\n" 1451 " - vfs context : %d bytes\n" 1452 " - xhtab root : %d bytes\n" 1447 1453 " - list item : %d bytes\n" 1448 1454 " - xlist item : %d bytes\n" … … 1462 1468 sizeof( page_t ), 1463 1469 sizeof( mapper_t ), 1470 sizeof( vseg_t ), 1464 1471 sizeof( ppm_t ), 1465 1472 sizeof( kcm_t ), … … 1467 1474 sizeof( vmm_t ), 1468 1475 sizeof( gpt_t ), 1476 sizeof( vfs_inode_t ), 1477 sizeof( vfs_dentry_t ), 1478 sizeof( vfs_file_t ), 1479 sizeof( vfs_ctx_t ), 1480 sizeof( xhtab_t ), 1469 1481 sizeof( list_entry_t ), 1470 1482 sizeof( xlist_entry_t ), … … 1486 1498 ///////////////////////////////////////////////////////////////////////////////// 1487 1499 1488 #if ( DEBUG_KERNEL_INIT & 1 )1500 #if DEBUG_KERNEL_INIT 1489 1501 thread_t * this = CURRENT_THREAD; 1490 1502 printk("\n[%s] : thread[%x,%x] on core[%x,%d] jumps to thread_idle_func() / cycle %d\n", -
trunk/kernel/kern/process.c
r633 r635 72 72 process_t * process_alloc( void ) 73 73 { 74 kmem_req_t 75 76 req.type = KMEM_ PROCESS;77 req. size = sizeof(process_t);74 kmem_req_t req; 75 76 req.type = KMEM_KCM; 77 req.order = bits_log2( sizeof(process_t) ); 78 78 req.flags = AF_KERNEL; 79 79 80 return (process_t *)kmem_alloc( &req );80 return kmem_alloc( &req ); 81 81 } 82 82 … … 86 86 kmem_req_t req; 87 87 88 req.type = KMEM_ PROCESS;88 req.type = KMEM_KCM; 89 89 req.ptr = process; 90 90 kmem_free( &req ); … … 166 166 #endif 167 167 168 // initialize VSL lock s168 // initialize VSL lock 169 169 remote_rwlock_init( XPTR( local_cxy , &vmm->vsl_lock ) , LOCK_VMM_VSL ); 170 170 171 // register kernel vsegs in VMM as required by the architecture171 // register kernel vsegs in user process VMM as required by the architecture 172 172 error = hal_vmm_kernel_update( process ); 173 173 if( error ) … … 179 179 #if (DEBUG_PROCESS_REFERENCE_INIT & 1) 180 180 if( DEBUG_PROCESS_REFERENCE_INIT < cycle ) 181 printk("\n[%s] thread[%x,%x] registered kernel vsegs for process %x\n",181 printk("\n[%s] thread[%x,%x] registered kernel vsegs in VSL for process %x\n", 182 182 __FUNCTION__, parent_pid, this->trdid, pid ); 183 183 #endif … … 374 374 printk("\n[%s] thread[%x,%x] exit for process %x / cycle %d\n", 375 375 __FUNCTION__, parent_pid, this->trdid, pid, cycle ); 376 #endif 377 378 #if (DEBUG_PROCESS_REFERENCE_INIT & 1) 379 hal_vmm_display( parent_xp , false ); 380 hal_vmm_display( XPTR( local_cxy , process ) , false ); 376 381 #endif 377 382 … … 1088 1093 } 1089 1094 } 1095 1090 1096 //////////////////////////////////////////////////// 1091 1097 error_t process_fd_register( xptr_t process_xp, … … 1356 1362 1357 1363 #if DEBUG_PROCESS_MAKE_FORK 1358 uint32_t cycle = (uint32_t)hal_get_cycles();1364 uint32_t cycle; 1359 1365 thread_t * this = CURRENT_THREAD; 1360 1366 trdid_t trdid = this->trdid; 1361 1367 pid_t pid = this->process->pid; 1368 #endif 1369 1370 #if( DEBUG_PROCESS_MAKE_FORK & 1 ) 1371 cycle = (uint32_t)hal_get_cycles(); 1362 1372 if( DEBUG_PROCESS_MAKE_FORK < cycle ) 1363 1373 printk("\n[%s] thread[%x,%x] enter / cluster %x / cycle %d\n", … … 1367 1377 // allocate a process descriptor 1368 1378 process = process_alloc(); 1379 1369 1380 if( process == NULL ) 1370 1381 { … … 1427 1438 printk("\n[%s] thread[%x,%x] copied VMM from parent to child / cycle %d\n", 1428 1439 __FUNCTION__, pid, trdid, cycle ); 1440 hal_vmm_display( XPTR( local_cxy , process ) , true ); 1429 1441 #endif 1430 1442 … … 1438 1450 cycle = (uint32_t)hal_get_cycles(); 1439 1451 if( DEBUG_PROCESS_MAKE_FORK < cycle ) 1440 printk("\n[%s] thread[%x,%x] / child takes TXT ownership / cycle %d\n",1441 __FUNCTION__ , pid, trdid, cycle );1452 printk("\n[%s] thread[%x,%x] / child_process %x takes TXT ownership / cycle %d\n", 1453 __FUNCTION__ , pid, trdid, new_pid, cycle ); 1442 1454 #endif 1443 1455 … … 1471 1483 #endif 1472 1484 1473 // set COW flag in DATA, ANON, REMOTE vsegs forparent process VMM1485 // set COW flag in DATA, ANON, REMOTE vsegs in parent process VMM 1474 1486 // this includes all parent process copies in all clusters 1475 1487 if( parent_process_cxy == local_cxy ) // reference is local … … 1489 1501 cycle = (uint32_t)hal_get_cycles(); 1490 1502 if( DEBUG_PROCESS_MAKE_FORK < cycle ) 1491 printk("\n[%s] thread[%x,%x] set COW in parent and child / cycle %d\n",1503 printk("\n[%s] thread[%x,%x] set COW in DATA / ANON / REMOTE for parent and child / cycle %d\n", 1492 1504 __FUNCTION__, pid, trdid, cycle ); 1493 1505 #endif … … 1546 1558 #if DEBUG_PROCESS_MAKE_EXEC 1547 1559 uint32_t cycle = (uint32_t)hal_get_cycles(); 1548 if( local_cxy == 0x11)1560 if( DEBUG_PROCESS_MAKE_EXEC < cycle ) 1549 1561 printk("\n[%s] thread[%x,%x] enters for %s / cycle %d\n", 1550 1562 __FUNCTION__, pid, thread->trdid, path, cycle ); … … 1569 1581 #if (DEBUG_PROCESS_MAKE_EXEC & 1) 1570 1582 cycle = (uint32_t)hal_get_cycles(); 1571 if( local_cxy == 0x11)1583 if( DEBUG_PROCESS_MAKE_EXEC < cycle ) 1572 1584 printk("\n[%s] thread[%x,%x] opened file <%s> / cycle %d\n", 1573 1585 __FUNCTION__, pid, thread->trdid, path, cycle ); … … 1579 1591 #if (DEBUG_PROCESS_MAKE_EXEC & 1) 1580 1592 cycle = (uint32_t)hal_get_cycles(); 1581 if( local_cxy == 0x11)1593 if( DEBUG_PROCESS_MAKE_EXEC < cycle ) 1582 1594 printk("\n[%s] thread[%x,%x] deleted existing threads / cycle %d\n", 1583 1595 __FUNCTION__, pid, thread->trdid, cycle ); … … 1589 1601 #if( DEBUG_PROCESS_MAKE_EXEC & 1 ) 1590 1602 cycle = (uint32_t)hal_get_cycles(); 1591 if( local_cxy == 0x11)1603 if( DEBUG_PROCESS_MAKE_EXEC < cycle ) 1592 1604 printk("\n[%s] thread[%x,%x] completed VMM reset / cycle %d\n", 1593 1605 __FUNCTION__, pid, thread->trdid, cycle ); … … 1606 1618 #if( DEBUG_PROCESS_MAKE_EXEC & 1 ) 1607 1619 cycle = (uint32_t)hal_get_cycles(); 1608 if( local_cxy == 0x11)1620 if( DEBUG_PROCESS_MAKE_EXEC < cycle ) 1609 1621 printk("\n[%s] thread[%x,%x] registered args/envs vsegs / cycle %d\n", 1610 1622 __FUNCTION__, pid, thread->trdid, cycle ); … … 1624 1636 #if( DEBUG_PROCESS_MAKE_EXEC & 1 ) 1625 1637 cycle = (uint32_t)hal_get_cycles(); 1626 if( local_cxy == 0x11)1638 if( DEBUG_PROCESS_MAKE_EXEC < cycle ) 1627 1639 printk("\n[%s] thread[%x,%x] registered code/data vsegs / cycle %d\n", 1628 1640 __FUNCTION__, pid, thread->trdid, cycle ); … … 1674 1686 hal_core_sleep(); 1675 1687 } 1688 1689 #if (DEBUG_PROCESS_ZERO_CREATE & 1) 1690 if( DEBUG_PROCESS_ZERO_CREATE < cycle ) 1691 printk("\n[%s] allocated pid %x in cluster %x\n", __FUNCTION__, pid, local_cxy ); 1692 #endif 1676 1693 1677 1694 // initialize PID, REF_XP, PARENT_XP, and STATE … … 1684 1701 process->term_state = 0; 1685 1702 1686 // initi lise VSL as empty1703 // initialize VSL as empty 1687 1704 vmm->vsegs_nr = 0; 1688 1705 xlist_root_init( XPTR( local_cxy , &vmm->vsegs_root ) ); 1689 1706 1690 // initialise GPT as empty 1707 #if (DEBUG_PROCESS_ZERO_CREATE & 1) 1708 if( DEBUG_PROCESS_ZERO_CREATE < cycle ) 1709 printk("\n[%s] initialized VSL empty in cluster %x\n", __FUNCTION__, local_cxy ); 1710 #endif 1711 1712 // initialize GPT as empty 1691 1713 error = hal_gpt_create( &vmm->gpt ); 1714 1692 1715 if( error ) 1693 1716 { … … 1695 1718 hal_core_sleep(); 1696 1719 } 1720 1721 #if (DEBUG_PROCESS_ZERO_CREATE & 1) 1722 if( DEBUG_PROCESS_ZERO_CREATE < cycle ) 1723 printk("\n[%s] initialized GPT empty in cluster %x\n", __FUNCTION__, local_cxy ); 1724 #endif 1697 1725 1698 1726 // initialize VSL and GPT locks … … 1701 1729 // create kernel vsegs in GPT and VSL, as required by the hardware architecture 1702 1730 error = hal_vmm_kernel_init( info ); 1731 1703 1732 if( error ) 1704 1733 { … … 1706 1735 hal_core_sleep(); 1707 1736 } 1737 1738 #if (DEBUG_PROCESS_ZERO_CREATE & 1) 1739 if( DEBUG_PROCESS_ZERO_CREATE < cycle ) 1740 printk("\n[%s] initialized hal specific VMM in cluster%x\n", __FUNCTION__, local_cxy ); 1741 #endif 1708 1742 1709 1743 // reset th_tbl[] array and associated fields … … 1716 1750 rwlock_init( &process->th_lock , LOCK_PROCESS_THTBL ); 1717 1751 1752 #if (DEBUG_PROCESS_ZERO_CREATE & 1) 1753 if( DEBUG_PROCESS_ZERO_CREATE < cycle ) 1754 printk("\n[%s] initialized th_tbl[] in cluster%x\n", __FUNCTION__, local_cxy ); 1755 #endif 1718 1756 1719 1757 // reset children list as empty … … 1722 1760 remote_queuelock_init( XPTR( local_cxy , &process->children_lock ), 1723 1761 LOCK_PROCESS_CHILDREN ); 1762 1763 #if (DEBUG_PROCESS_ZERO_CREATE & 1) 1764 if( DEBUG_PROCESS_ZERO_CREATE < cycle ) 1765 printk("\n[%s] initialized children list in cluster%x\n", __FUNCTION__, local_cxy ); 1766 #endif 1724 1767 1725 1768 // register kernel process in cluster manager local_list … … 1759 1802 // allocates memory for process descriptor from local cluster 1760 1803 process = process_alloc(); 1804 1805 1761 1806 if( process == NULL ) 1762 1807 { … … 1840 1885 1841 1886 #if (DEBUG_PROCESS_INIT_CREATE & 1) 1842 hal_vmm_display( process, true );1887 hal_vmm_display( XPTR( local_cxy , process ) , true ); 1843 1888 #endif 1844 1889 -
trunk/kernel/kern/process.h
r625 r635 4 4 * Authors Ghassan Almaless (2008,2009,2010,2011,2012) 5 5 * Mohamed Lamine Karaoui (2015) 6 * Alain Greiner (2016,2017,2018 )6 * Alain Greiner (2016,2017,2018,2019) 7 7 * 8 8 * Copyright (c) UPMC Sorbonne Universites … … 231 231 * descriptor, defined by the <parent_xp> argument. The <process> and <pid> arguments 232 232 * are previously allocated by the caller. This function can be called by two functions: 233 * 1)process_init_create() : process is the INIT process, and parent is process-zero.234 * 2) process_make_fork(): the parent process descriptor is generally remote.233 * - process_init_create() : process is the INIT process, and parent is process-zero. 234 * - process_make_fork() : the parent process descriptor is generally remote. 235 235 * The following fields are initialised : 236 236 * - It set the pid / ppid / ref_xp / parent_xp / state fields. -
trunk/kernel/kern/rpc.c
r632 r635 75 75 &rpc_vmm_get_vseg_server, // 20 76 76 &rpc_vmm_global_update_pte_server, // 21 77 &rpc_ kcm_alloc_server,// 2278 &rpc_ kcm_free_server,// 2377 &rpc_undefined, // 22 78 &rpc_undefined, // 23 79 79 &rpc_mapper_sync_server, // 24 80 &rpc_ mapper_handle_miss_server,// 2580 &rpc_undefined, // 25 81 81 &rpc_vmm_delete_vseg_server, // 26 82 82 &rpc_vmm_create_vseg_server, // 27 83 83 &rpc_vmm_set_cow_server, // 28 84 &rpc_ hal_vmm_display_server,// 2984 &rpc_undefined, // 29 85 85 }; 86 86 … … 111 111 "GET_VSEG", // 20 112 112 "GLOBAL_UPDATE_PTE", // 21 113 " KCM_ALLOC",// 22114 " KCM_FREE",// 23113 "undefined_22", // 22 114 "undefined_23", // 23 115 115 "MAPPER_SYNC", // 24 116 " MAPPER_HANDLE_MISS",// 25116 "undefined_25", // 25 117 117 "VMM_DELETE_VSEG", // 26 118 118 "VMM_CREATE_VSEG", // 27 119 119 "VMM_SET_COW", // 28 120 " VMM_DISPLAY",// 29120 "undefined_29", // 29 121 121 }; 122 122 … … 557 557 // release memory to local pmem 558 558 kmem_req_t req; 559 req.type = KMEM_P AGE;559 req.type = KMEM_PPM; 560 560 req.ptr = page; 561 561 kmem_free( &req ); … … 2231 2231 ///////////////////////////////////////////////////////////////////////////////////////// 2232 2232 2233 /* 2233 2234 ////////////////////////////////////////// 2234 2235 void rpc_kcm_alloc_client( cxy_t cxy, … … 2304 2305 #endif 2305 2306 } 2307 */ 2306 2308 2307 2309 ///////////////////////////////////////////////////////////////////////////////////////// … … 2309 2311 ///////////////////////////////////////////////////////////////////////////////////////// 2310 2312 2313 /* 2311 2314 ///////////////////////////////////////// 2312 2315 void rpc_kcm_free_client( cxy_t cxy, … … 2377 2380 #endif 2378 2381 } 2379 2380 ///////////////////////////////////////////////////////////////////////////////////////// 2381 // [25] Marshaling functions attached to RPC_MAPPER_SYNC 2382 */ 2383 2384 ///////////////////////////////////////////////////////////////////////////////////////// 2385 // [24] Marshaling functions attached to RPC_MAPPER_SYNC 2382 2386 ///////////////////////////////////////////////////////////////////////////////////////// 2383 2387 … … 2459 2463 ///////////////////////////////////////////////////////////////////////////////////////// 2460 2464 2465 /* 2461 2466 ////////////////////////////////////////////////////////// 2462 2467 void rpc_mapper_handle_miss_client( cxy_t cxy, … … 2541 2546 #endif 2542 2547 } 2548 */ 2543 2549 2544 2550 ///////////////////////////////////////////////////////////////////////////////////////// … … 2784 2790 2785 2791 ///////////////////////////////////////////////////////////////////////////////////////// 2786 // [29] Marshaling functions attached to RPC_VMM_DISPLAY 2787 ///////////////////////////////////////////////////////////////////////////////////////// 2788 2792 // [29] RPC_VMM_DISPLAY deprecated [AG] June 2019 2793 ///////////////////////////////////////////////////////////////////////////////////////// 2794 2795 /* 2789 2796 ///////////////////////////////////////////// 2790 2797 void rpc_hal_vmm_display_client( cxy_t cxy, … … 2856 2863 } 2857 2864 2858 2865 */ -
trunk/kernel/kern/rpc.h
r632 r635 60 60 typedef enum 61 61 { 62 RPC_UNDEFINED_0 = 0, // RPC_PMEM_GET_PAGES deprecated [AG]63 RPC_UNDEFINED_1 = 1, // RPC_PMEM_RELEASE_PAGES deprecated [AG]64 RPC_UNDEFINED_2 = 2, // RPC_PMEM_DISPLAY deprecated [AG]62 RPC_UNDEFINED_0 = 0, // RPC_PMEM_GET_PAGES deprecated [AG] 63 RPC_UNDEFINED_1 = 1, // RPC_PMEM_RELEASE_PAGES deprecated [AG] 64 RPC_UNDEFINED_2 = 2, // RPC_PMEM_DISPLAY deprecated [AG] 65 65 RPC_PROCESS_MAKE_FORK = 3, 66 66 RPC_USER_DIR_CREATE = 4, … … 84 84 RPC_VMM_GET_VSEG = 20, 85 85 RPC_VMM_GLOBAL_UPDATE_PTE = 21, 86 RPC_ KCM_ALLOC = 22,87 RPC_ KCM_FREE = 23,86 RPC_UNDEFINED_22 = 22, // RPC_KCM_ALLOC deprecated [AG] 87 RPC_UNDEFINED_23 = 23, // RPC_KCM_FREE deprecated [AG] 88 88 RPC_MAPPER_SYNC = 24, 89 RPC_ MAPPER_HANDLE_MISS = 25,89 RPC_UNDEFUNED_25 = 25, // RPC_MAPPER_HANDLE_MISS deprecated [AG] 90 90 RPC_VMM_DELETE_VSEG = 26, 91 91 RPC_VMM_CREATE_VSEG = 27, 92 92 RPC_VMM_SET_COW = 28, 93 RPC_ VMM_DISPLAY = 29,93 RPC_UNDEFINED_29 = 29, // RPC_VMM_DISPLAY deprecated [AG] 94 94 95 95 RPC_MAX_INDEX = 30, … … 574 574 * @ buf_xp : [out] buffer for extended pointer on allocated buffer. 575 575 **********************************************************************************/ 576 577 /* 576 578 void rpc_kcm_alloc_client( cxy_t cxy, 577 579 uint32_t kmem_type, … … 579 581 580 582 void rpc_kcm_alloc_server( xptr_t xp ); 583 */ 581 584 582 585 /*********************************************************************************** … … 588 591 * @ kmem_type : [in] KCM object type (as defined in kmem.h). 589 592 **********************************************************************************/ 593 594 /* 590 595 void rpc_kcm_free_client( cxy_t cxy, 591 596 void * buf, … … 593 598 594 599 void rpc_kcm_free_server( xptr_t xp ); 600 */ 595 601 596 602 /*********************************************************************************** … … 621 627 * @ error : [out] error status (0 if success). 622 628 **********************************************************************************/ 629 /* 623 630 void rpc_mapper_handle_miss_client( cxy_t cxy, 624 631 struct mapper_s * mapper, … … 628 635 629 636 void rpc_mapper_handle_miss_server( xptr_t xp ); 630 637 */ 631 638 /*********************************************************************************** 632 639 * [26] The RPC_VMM_DELETE_VSEG allows any client thread to request a remote … … 699 706 * @ detailed : [in] detailed display if true. 700 707 **********************************************************************************/ 708 709 /* 701 710 void rpc_hal_vmm_display_client( cxy_t cxy, 702 711 struct process_s * process, … … 704 713 705 714 void rpc_hal_vmm_display_server( xptr_t xp ); 706 715 */ 707 716 708 717 #endif -
trunk/kernel/kern/scheduler.c
r630 r635 180 180 sched = &core->scheduler; 181 181 182 ////////////////// scan user threads to handle bothACK and DELETE requests182 ////////////////// scan user threads to handle ACK and DELETE requests 183 183 root = &sched->u_root; 184 184 iter = root->next; … … 195 195 { 196 196 197 // check t hread blocked197 // check target thread blocked 198 198 assert( (thread->blocked & THREAD_BLOCKED_GLOBAL) , "thread not blocked" ); 199 199 … … 206 206 207 207 // handle REQ_DELETE only if target thread != calling thread 208 if( (thread->flags & THREAD_FLAG_REQ_DELETE) && (thread != CURRENT_THREAD) ) 209 { 208 if( thread->flags & THREAD_FLAG_REQ_DELETE ) 209 { 210 211 // check calling thread != target thread 212 assert( (thread != CURRENT_THREAD) , "calling thread cannot delete itself" ); 213 210 214 // get thread process descriptor 211 215 process = thread->process; … … 497 501 remote_fifo_t * fifo = &LOCAL_CLUSTER->rpc_fifo[lid]; 498 502 503 #if DEBUG_SCHED_YIELD 504 uint32_t cycle = (uint32_t)hal_get_cycles(); 505 #endif 506 499 507 #if (DEBUG_SCHED_YIELD & 0x1) 500 if( sched->trace )508 if( sched->trace || (cycle > DEBUG_SCHED_YIELD) ) 501 509 sched_display( lid ); 502 510 #endif … … 551 559 552 560 #if DEBUG_SCHED_YIELD 553 if( sched->trace )561 if( sched->trace || (cycle > DEBUG_SCHED_YIELD) ) 554 562 printk("\n[%s] core[%x,%d] / cause = %s\n" 555 563 " thread %x (%s) (%x,%x) => thread %x (%s) (%x,%x) / cycle %d\n", 556 564 __FUNCTION__, local_cxy, lid, cause, 557 565 current, thread_type_str(current->type), current->process->pid, current->trdid,next , 558 thread_type_str(next->type) , next->process->pid , next->trdid , (uint32_t)hal_get_cycles());566 thread_type_str(next->type) , next->process->pid , next->trdid , cycle ); 559 567 #endif 560 568 … … 567 575 busylock_release( &sched->lock ); 568 576 569 #if (DEBUG_SCHED_YIELD & 1)570 if( sched->trace )577 #if DEBUG_SCHED_YIELD 578 if( sched->trace || (cycle > DEBUG_SCHED_YIELD) ) 571 579 printk("\n[%s] core[%x,%d] / cause = %s\n" 572 580 " thread %x (%s) (%x,%x) continue / cycle %d\n", -
trunk/kernel/kern/thread.c
r633 r635 78 78 static thread_t * thread_alloc( void ) 79 79 { 80 page_t * page; // pointer on page descriptor containing thread descriptor81 80 kmem_req_t req; // kmem request 82 81 83 82 // allocates memory for thread descriptor + kernel stack 84 req.type = KMEM_P AGE;85 req. size= CONFIG_THREAD_DESC_ORDER;83 req.type = KMEM_PPM; 84 req.order = CONFIG_THREAD_DESC_ORDER; 86 85 req.flags = AF_KERNEL | AF_ZERO; 87 page = kmem_alloc( &req ); 88 89 if( page == NULL ) return NULL; 90 91 // return pointer on new thread descriptor 92 xptr_t base_xp = ppm_page2base( XPTR(local_cxy , page ) ); 93 return GET_PTR( base_xp ); 86 87 return kmem_alloc( &req ); 94 88 95 89 } // end thread_alloc() … … 125 119 { 126 120 127 // check type and trdid fields initialized121 // check type and trdid fields are initialized 128 122 assert( (thread->type == type) , "bad type argument" ); 129 123 assert( (thread->trdid == trdid) , "bad trdid argument" ); … … 133 127 thread_t * this = CURRENT_THREAD; 134 128 if( DEBUG_THREAD_INIT < cycle ) 135 printk("\n[%s] thread[%x,%x] enter for thread %x in process %x/ cycle %d\n",136 __FUNCTION__, this->process->pid, this->trdid, thread->trdid, process->pid, cycle );129 printk("\n[%s] thread[%x,%x] enter for thread[%x,%x] / cycle %d\n", 130 __FUNCTION__, this->process->pid, this->trdid, process->pid, thread->trdid, cycle ); 137 131 #endif 138 132 … … 192 186 cycle = (uint32_t)hal_get_cycles(); 193 187 if( DEBUG_THREAD_INIT < cycle ) 194 printk("\n[%s] thread[%x,%x] exit for thread %x in process %x/ cycle %d\n",195 __FUNCTION__, this->process->pid, this->trdid, thread, process->pid, cycle );188 printk("\n[%s] thread[%x,%x] exit for thread[%x,%x] / cycle %d\n", 189 __FUNCTION__, this->process->pid, this->trdid, process->pid, thread->trdid, cycle ); 196 190 #endif 197 191 … … 580 574 vpn_t parent_vpn_size = hal_remote_l32( XPTR( parent_cxy, &parent_us_vseg->vpn_size ) ); 581 575 vpn_t child_vpn_base = child_us_vseg->vpn_base; 576 582 577 for( parent_vpn = parent_vpn_base , child_vpn = child_vpn_base ; 583 578 parent_vpn < (parent_vpn_base + parent_vpn_size) ; … … 625 620 #if (DEBUG_THREAD_USER_FORK & 1) 626 621 if( DEBUG_THREAD_USER_FORK < cycle ) 627 printk("\n[%s] thread[%x,%x] copied all stack vseg PTEs tochild GPT\n",622 printk("\n[%s] thread[%x,%x] copied STACK vseg PTEs & set COW in child GPT\n", 628 623 __FUNCTION__, this->process->pid, this->trdid ); 629 624 #endif … … 636 631 #if (DEBUG_THREAD_USER_FORK & 1) 637 632 if( DEBUG_THREAD_USER_FORK < cycle ) 638 printk("\n[%s] thread[%x,%x] set the COW flag for stackvseg in parent GPT\n",633 printk("\n[%s] thread[%x,%x] set COW for STACK vseg in parent GPT\n", 639 634 __FUNCTION__, this->process->pid, this->trdid ); 640 635 #endif … … 906 901 thread_assert_can_yield( thread , __FUNCTION__ ); 907 902 908 // update target process instrumentation counter 909 // process->vmm.pgfault_nr += thread->info.pgfault_nr; 903 #if CONFIG_INSTRUMENTATION_PGFAULTS 904 process->vmm.false_pgfault_nr += thread->info.false_pgfault_nr; 905 process->vmm.local_pgfault_nr += thread->info.local_pgfault_nr; 906 process->vmm.global_pgfault_nr += thread->info.global_pgfault_nr; 907 process->vmm.false_pgfault_cost += thread->info.false_pgfault_cost; 908 process->vmm.local_pgfault_cost += thread->info.local_pgfault_cost; 909 process->vmm.global_pgfault_cost += thread->info.global_pgfault_cost; 910 #endif 910 911 911 912 // remove thread from process th_tbl[] 912 913 count = process_remove_thread( thread ); 913 914 914 // release memory allocated for CPU context and FPU context if required915 // release memory allocated for CPU context and FPU context 915 916 hal_cpu_context_destroy( thread ); 916 917 hal_fpu_context_destroy( thread ); … … 933 934 // release memory for thread descriptor (including kernel stack) 934 935 kmem_req_t req; 935 xptr_t base_xp = ppm_base2page( XPTR(local_cxy , thread ) ); 936 937 req.type = KMEM_PAGE; 938 req.ptr = GET_PTR( base_xp ); 936 req.type = KMEM_PPM; 937 req.ptr = thread; 939 938 kmem_free( &req ); 940 939 -
trunk/kernel/kern/thread.h
r629 r635 101 101 { 102 102 uint32_t false_pgfault_nr; /*! number of local page fault */ 103 uint32_t local_pgfault_nr; /*! number of local page fault */ 104 uint32_t global_pgfault_nr; /*! number of global page fault */ 103 105 uint32_t false_pgfault_cost; /*! cumulated cost */ 104 uint32_t local_pgfault_nr; /*! number of local page fault */105 106 uint32_t local_pgfault_cost; /*! cumulated cost */ 106 uint32_t global_pgfault_nr; /*! number of global page fault */107 107 uint32_t global_pgfault_cost; /*! cumulated cost */ 108 108 … … 339 339 * this. This includes the thread descriptor itself, the associated CPU and FPU context, 340 340 * and the physical memory allocated for an user thread stack. 341 * This function does not remove the thread from the scheduler, as this is done by 342 * the scheduler itself. 341 343 *************************************************************************************** 342 344 * @ thread : pointer on the thread descriptor to release. … … 394 396 * The calling thread can run in any cluster, as it uses remote accesses. 395 397 * This function makes a kernel panic if the target thread is the main thread, 396 * because *the main thread deletion will cause the process deletion, and a process398 * because the main thread deletion will cause the process deletion, and a process 397 399 * must be deleted by the parent process, running the wait function. 398 400 * If the target thread is running in "attached" mode, and the <is_forced> argument -
trunk/kernel/kernel_config.h
r634 r635 25 25 #define _KERNEL_CONFIG_H_ 26 26 27 #define CONFIG_ALMOS_VERSION "Version 2.1 / May 2019"28 29 27 //////////////////////////////////////////////////////////////////////////////////////////// 30 28 // KERNEL DEBUG … … 98 96 #define DEBUG_HAL_GPT_DESTROY 0 99 97 #define DEBUG_HAL_GPT_LOCK_PTE 0 98 #define DEBUG_HAL_GPT_SET_COW 0 100 99 #define DEBUG_HAL_GPT_SET_PTE 0 101 100 #define DEBUG_HAL_IOC_RX 0 … … 109 108 110 109 #define DEBUG_KCM 0 110 #define DEBUG_KCM_REMOTE 0 111 111 112 #define DEBUG_KMEM 0 113 #define DEBUG_KMEM_REMOTE 0 112 114 113 115 #define DEBUG_KERNEL_INIT 0 … … 145 147 #define DEBUG_RPC_SERVER_GENERIC 0 146 148 147 #define DEBUG_RPC_KCM_ALLOC 0148 #define DEBUG_RPC_KCM_FREE 0149 #define DEBUG_RPC_MAPPER_HANDLE_MISS 0150 149 #define DEBUG_RPC_MAPPER_MOVE_USER 0 151 150 #define DEBUG_RPC_PROCESS_MAKE_FORK 0 … … 233 232 #define DEBUG_VFS_CHDIR 0 234 233 #define DEBUG_VFS_CLOSE 0 235 #define DEBUG_VFS_DENTRY_CREATE 0 234 #define DEBUG_VFS_DENTRY_CREATE 0 236 235 #define DEBUG_VFS_FILE_CREATE 0 237 236 #define DEBUG_VFS_GET_PATH 0 … … 256 255 #define DEBUG_VMM_GET_ONE_PPN 0 257 256 #define DEBUG_VMM_GET_PTE 0 258 #define DEBUG_VMM_HANDLE_PAGE_FAULT 19000000257 #define DEBUG_VMM_HANDLE_PAGE_FAULT 0 259 258 #define DEBUG_VMM_HANDLE_COW 0 260 259 #define DEBUG_VMM_MMAP_ALLOC 0 … … 316 315 #define LOCK_FATFS_FAT 36 // remote (RW) protect exclusive access to the FATFS FAT 317 316 317 //////////////////////////////////////////////////////////////////////////////////////////// 318 // GENERAL CONFIGURATION 319 //////////////////////////////////////////////////////////////////////////////////////////// 320 321 #define CONFIG_VERSION "Version 2.2 / June 2019" 318 322 319 323 //////////////////////////////////////////////////////////////////////////////////////////// … … 376 380 #define CONFIG_VFS_ROOT_IS_EX2FS 0 // root FS is EX2FS if non zero 377 381 378 #define CONFIG_MAPPER_GRDXT_W1 7// number of bits for RADIX_TREE_IX1379 #define CONFIG_MAPPER_GRDXT_W2 7 // number of bits for RADIX_TREE_IX2380 #define CONFIG_MAPPER_GRDXT_W3 6// number of bits for RADIX_TREE_IX3382 #define CONFIG_MAPPER_GRDXT_W1 6 // number of bits for RADIX_TREE_IX1 383 #define CONFIG_MAPPER_GRDXT_W2 7 // number of bits for RADIX_TREE_IX2 384 #define CONFIG_MAPPER_GRDXT_W3 7 // number of bits for RADIX_TREE_IX3 381 385 382 386 //////////////////////////////////////////////////////////////////////////////////////////// … … 445 449 #define CONFIG_PPM_MAX_RSVD 32 // max reserved zones on the machine 446 450 447 #define CONFIG_KCM_SLOT_SIZE 64 // smallest allocated block (bytes)448 449 451 #define CONFIG_PPM_PAGE_ALIGNED __attribute__((aligned(CONFIG_PPM_PAGE_SIZE))) 450 452 451 452 453 //////////////////////////////////////////////////////////////////////////////////////////// 453 454 // INSTRUMENTATION 454 455 //////////////////////////////////////////////////////////////////////////////////////////// 455 456 456 #define CONFIG_INSTRUMENTATION_SYSCALLS 0457 #define CONFIG_INSTRUMENTATION_PGFAULTS 1458 457 #define CONFIG_INSTRUMENTATION_SYSCALLS 0 458 #define CONFIG_INSTRUMENTATION_PGFAULTS 1 459 #define CONFIG_INSTRUMENTATION_FOOTPRINT 1 459 460 460 461 -
trunk/kernel/libk/bits.c
r473 r635 3 3 * 4 4 * Author Ghassan Almaless (2008,2009,2010,2011,2012) 5 * Alain Greiner (2016 )5 * Alain Greiner (2016,2017,2018,2019) 6 6 * 7 7 * Copyright (c) UPMC Sorbonne Universites -
trunk/kernel/libk/bits.h
r457 r635 3 3 * 4 4 * Author Ghassan Almaless (2008,2009,2010,2011,2012) 5 * Alain Greiner (2016 )5 * Alain Greiner (2016,2017,2018,2019) 6 6 * 7 7 * Copyright (c) UPMC Sorbonne Universites … … 189 189 190 190 /********************************************************************************************* 191 * This function returns the number of bits to code a non-zero unsigned integer value. 192 ********************************************************************************************* 193 * @ val : value to analyse 194 * @ returns number of bits 195 ********************************************************************************************/ 196 static inline uint32_t bits_nr( uint32_t val ) 197 { 198 register uint32_t i; 199 200 for( i=0 ; val > 0 ; i++ ) 201 val = val >> 1; 202 203 return i; 204 } 205 206 /********************************************************************************************* 207 * This function takes an unsigned integer value as input argument, and returns another 208 * unsigned integer, that is the (base 2) logarithm of the smallest power of 2 contained 209 * in the input value. 191 * This function takes a positive integer <val> as input argument, and returns the smallest 192 * integer <order> such as : 1<<order >= val. 193 * In other words, <order> is the min number of bits to encode <val> values. 210 194 ********************************************************************************************* 211 195 * @ val : value to analyse … … 214 198 static inline uint32_t bits_log2( uint32_t val ) 215 199 { 216 return (val == 0) ? 1 : bits_nr( val ) - 1; 200 uint32_t i; 201 202 if( val > 0 ) 203 { 204 val--; 205 for( i=0 ; val > 0 ; i++ ) val = val >> 1; 206 return i; 207 } 208 return 0; 217 209 } 218 210 -
trunk/kernel/libk/elf.c
r625 r635 2 2 * elf.c - elf parser: find and map process CODE and DATA segments 3 3 * 4 * Authors Alain Greiner (2016 )4 * Authors Alain Greiner (2016,2017,2018,2019) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 201 201 printk("\n[%s] thread[%x,%x] found %s vseg / base %x / size %x\n" 202 202 " file_size %x / file_offset %x / mapper_xp %l / cycle %d\n", 203 __FUNCTION__ , this->process _pid, this->trdid,203 __FUNCTION__ , this->process->pid, this->trdid, 204 204 vseg_type_str(vseg->type) , vseg->min , vseg->max - vseg->min , 205 vseg->file_size , vseg->file_offset , vseg->mapper_xp );205 vseg->file_size , vseg->file_offset , vseg->mapper_xp, cycle ); 206 206 #endif 207 207 … … 262 262 263 263 // allocate memory for segment descriptors array 264 req.type = KMEM_ GENERIC;265 req. size = segs_size;264 req.type = KMEM_KCM; 265 req.order = bits_log2(segs_size); 266 266 req.flags = AF_KERNEL; 267 267 segs_base = kmem_alloc( &req ); -
trunk/kernel/libk/grdxt.c
r626 r635 30 30 #include <grdxt.h> 31 31 32 //////////////////////////////////////////////////////////////////////////////////////// 33 // Local access functions 34 //////////////////////////////////////////////////////////////////////////////////////// 35 32 36 ///////////////////////////////// 33 37 error_t grdxt_init( grdxt_t * rt, … … 44 48 45 49 // allocates first level array 46 req.type = KMEM_ GENERIC;47 req. size = sizeof(void *) << ix1_width;50 req.type = KMEM_KCM; 51 req.order = ix1_width + ( (sizeof(void*) == 4) ? 2 : 3 ); 48 52 req.flags = AF_KERNEL | AF_ZERO; 49 53 root = kmem_alloc( &req ); 50 if( root == NULL ) return ENOMEM; 54 55 if( root == NULL ) 56 { 57 printk("\n[ERROR] in %s : cannot allocate first level array\n", __FUNCTION__); 58 return -1; 59 } 51 60 52 61 rt->root = root; … … 71 80 uint32_t ix1; 72 81 uint32_t ix2; 73 74 // check rt 82 uint32_t ix3; 83 75 84 assert( (rt != NULL) , "pointer on radix tree is NULL\n" ); 76 77 req.type = KMEM_GENERIC;78 85 79 86 for( ix1=0 ; ix1 < (uint32_t)(1 << w1) ; ix1++ ) … … 89 96 if( ptr3 == NULL ) continue; 90 97 98 for( ix3=0 ; ix3 < (uint32_t)(1 << w3) ; ix3++ ) 99 { 100 if( ptr3[ix3] != NULL ) 101 { 102 printk("\n[WARNING] in %s : ptr3[%d][%d][%d] non empty\n", 103 __FUNCTION__, ix1, ix2, ix3 ); 104 } 105 } 106 91 107 // release level 3 array 108 req.type = KMEM_KCM; 92 109 req.ptr = ptr3; 93 req.type = KMEM_GENERIC;94 req.size = sizeof(void *) * (1 << w3);95 110 kmem_free( &req ); 96 111 } 97 112 98 113 // release level 2 array 114 req.type = KMEM_KCM; 99 115 req.ptr = ptr2; 100 req.type = KMEM_GENERIC;101 req.size = sizeof(void *) * (1 << w2);102 116 kmem_free( &req ); 103 117 } 104 118 105 119 // release level 1 array 120 req.type = KMEM_KCM; 106 121 req.ptr = ptr1; 107 req.type = KMEM_GENERIC;108 req.size = sizeof(void *) * (1 << w1);109 122 kmem_free( &req ); 110 123 111 124 } // end grdxt_destroy() 112 113 ////////////////////////////////////114 void grdxt_display( xptr_t rt_xp,115 char * name )116 {117 uint32_t ix1;118 uint32_t ix2;119 uint32_t ix3;120 121 // check rt_xp122 assert( (rt_xp != XPTR_NULL) , "pointer on radix tree is NULL\n" );123 124 // get cluster and local pointer on remote rt descriptor125 grdxt_t * rt_ptr = GET_PTR( rt_xp );126 cxy_t rt_cxy = GET_CXY( rt_xp );127 128 // get widths129 uint32_t w1 = hal_remote_l32( XPTR( rt_cxy , &rt_ptr->ix1_width ) );130 uint32_t w2 = hal_remote_l32( XPTR( rt_cxy , &rt_ptr->ix2_width ) );131 uint32_t w3 = hal_remote_l32( XPTR( rt_cxy , &rt_ptr->ix3_width ) );132 133 void ** ptr1 = hal_remote_lpt( XPTR( rt_cxy , &rt_ptr->root ) );134 135 printk("\n***** Generic Radix Tree for <%s>\n", name );136 137 for( ix1=0 ; ix1 < (uint32_t)(1<<w1) ; ix1++ )138 {139 void ** ptr2 = hal_remote_lpt( XPTR( rt_cxy , &ptr1[ix1] ) );140 if( ptr2 == NULL ) continue;141 142 for( ix2=0 ; ix2 < (uint32_t)(1<<w2) ; ix2++ )143 {144 void ** ptr3 = hal_remote_lpt( XPTR( rt_cxy , &ptr2[ix2] ) );145 if( ptr3 == NULL ) continue;146 147 for( ix3=0 ; ix3 < (uint32_t)(1<<w3) ; ix3++ )148 {149 void * value = hal_remote_lpt( XPTR( rt_cxy , &ptr3[ix3] ) );150 if( value == NULL ) continue;151 152 uint32_t key = (ix1<<(w2+w3)) + (ix2<<w3) + ix3;153 printk(" - key = %x / value = %x\n", key , (intptr_t)value );154 }155 }156 }157 158 } // end grdxt_display()159 125 160 126 //////////////////////////////////// … … 177 143 uint32_t ix3 = key & ((1 << w3) - 1); // index in level 3 array 178 144 179 void ** ptr1 = rt->root; // pointer on level 1 array 180 void ** ptr2; // pointer on level 2 array 181 void ** ptr3; // pointer on level 3 array 182 183 // If required, we must allocate memory for the selected level 2 array, 184 // and update the level 1 array. 185 if( ptr1[ix1] == NULL ) 145 // get ptr1 146 void ** ptr1 = rt->root; 147 148 if( ptr1 == NULL ) return -1; 149 150 // get ptr2 151 void ** ptr2 = ptr1[ix1]; 152 153 // If required, allocate memory for the missing level 2 array 154 if( ptr2 == NULL ) 186 155 { 187 156 // allocate memory for level 2 array 188 req.type = KMEM_GENERIC;189 req. size = sizeof(void *) << w2;157 req.type = KMEM_KCM; 158 req.order = w2 + ( (sizeof(void*) == 4) ? 2 : 3 ); 190 159 req.flags = AF_KERNEL | AF_ZERO; 191 160 ptr2 = kmem_alloc( &req ); 192 if( ptr2 == NULL) return ENOMEM; 161 162 if( ptr2 == NULL) return -1; 193 163 194 164 // update level 1 array 195 165 ptr1[ix1] = ptr2; 196 166 } 197 else // get pointer on selected level 2 array. 198 { 199 ptr2 = ptr1[ix1]; 200 } 201 202 // If required, we must allocate memory for the selected level 3 array, 203 // and update the level 2 array. 204 if( ptr2[ix2] == NULL ) 167 168 // get ptr3 169 void ** ptr3 = ptr2[ix2]; 170 171 // If required, allocate memory for the missing level 3 array 172 if( ptr3 == NULL ) 205 173 { 206 174 // allocate memory for level 3 array 207 req.type = KMEM_ GENERIC;208 req. size = sizeof(void *) << w3;175 req.type = KMEM_KCM; 176 req.order = w3 + ( (sizeof(void*) == 4) ? 2 : 3 ); 209 177 req.flags = AF_KERNEL | AF_ZERO; 210 178 ptr3 = kmem_alloc( &req ); 211 if( ptr3 == NULL) return ENOMEM; 179 180 if( ptr3 == NULL) return -1; 212 181 213 182 // update level 3 array 214 183 ptr2[ix2] = ptr3; 215 184 } 216 else // get pointer on selected level 3 array.217 {218 ptr3 = ptr2[ix2];219 }220 221 // selected slot in level 3 array must be empty222 if( ptr3[ix3] != NULL ) return EEXIST;223 185 224 186 // register the value 225 187 ptr3[ix3] = value; 188 226 189 hal_fence(); 227 190 … … 246 209 uint32_t ix3 = key & ((1 << w3) - 1); // index in level 3 array 247 210 248 void ** ptr1 = rt->root; // pointer on level 1 array 249 void ** ptr2; // pointer on level 2 array 250 void ** ptr3; // pointer on level 3 array 211 // get ptr1 212 void ** ptr1 = rt->root; 213 214 if( ptr1 == NULL ) return NULL; 251 215 252 216 // get ptr2 253 ptr2 = ptr1[ix1]; 217 void ** ptr2 = ptr1[ix1]; 218 254 219 if( ptr2 == NULL ) return NULL; 255 220 256 221 // get ptr3 257 ptr3 = ptr2[ix2]; 222 void ** ptr3 = ptr2[ix2]; 223 258 224 if( ptr3 == NULL ) return NULL; 259 225 … … 303 269 304 270 } // end grdxt_lookup() 305 306 ////////////////////////////////////////////307 xptr_t grdxt_remote_lookup( xptr_t rt_xp,308 uint32_t key )309 {310 // get cluster and local pointer on remote rt descriptor311 grdxt_t * rt_ptr = GET_PTR( rt_xp );312 cxy_t rt_cxy = GET_CXY( rt_xp );313 314 // get widths315 uint32_t w1 = hal_remote_l32( XPTR( rt_cxy , &rt_ptr->ix1_width ) );316 uint32_t w2 = hal_remote_l32( XPTR( rt_cxy , &rt_ptr->ix2_width ) );317 uint32_t w3 = hal_remote_l32( XPTR( rt_cxy , &rt_ptr->ix3_width ) );318 319 // Check key value320 assert( ((key >> (w1 + w2 + w3)) == 0 ), "illegal key value %x\n", key );321 322 // compute indexes323 uint32_t ix1 = key >> (w2 + w3); // index in level 1 array324 uint32_t ix2 = (key >> w3) & ((1 << w2) -1); // index in level 2 array325 uint32_t ix3 = key & ((1 << w3) - 1); // index in level 3 array326 327 // get ptr1328 void ** ptr1 = hal_remote_lpt( XPTR( rt_cxy , &rt_ptr->root ) );329 330 // get ptr2331 void ** ptr2 = hal_remote_lpt( XPTR( rt_cxy , &ptr1[ix1] ) );332 if( ptr2 == NULL ) return XPTR_NULL;333 334 // get ptr3335 void ** ptr3 = hal_remote_lpt( XPTR( rt_cxy , &ptr2[ix2] ) );336 if( ptr3 == NULL ) return XPTR_NULL;337 338 // get pointer on registered item339 void * item_ptr = hal_remote_lpt( XPTR( rt_cxy , &ptr3[ix3] ) );340 341 // return extended pointer on registered item342 if ( item_ptr == NULL ) return XPTR_NULL;343 else return XPTR( rt_cxy , item_ptr );344 345 } // end grdxt_remote_lookup()346 271 347 272 ////////////////////////////////////// … … 400 325 401 326 } // end grdxt_get_first() 327 328 329 330 //////////////////////////////////////////////////////////////////////////////////////// 331 // Remote access functions 332 //////////////////////////////////////////////////////////////////////////////////////// 333 334 ////////////////////////////////////////////// 335 error_t grdxt_remote_insert( xptr_t rt_xp, 336 uint32_t key, 337 void * value ) 338 { 339 kmem_req_t req; 340 341 // get cluster and local pointer on remote rt descriptor 342 cxy_t rt_cxy = GET_CXY( rt_xp ); 343 grdxt_t * rt_ptr = GET_PTR( rt_xp ); 344 345 // get widths 346 uint32_t w1 = hal_remote_l32( XPTR( rt_cxy , &rt_ptr->ix1_width ) ); 347 uint32_t w2 = hal_remote_l32( XPTR( rt_cxy , &rt_ptr->ix2_width ) ); 348 uint32_t w3 = hal_remote_l32( XPTR( rt_cxy , &rt_ptr->ix3_width ) ); 349 350 // Check key value 351 assert( ((key >> (w1 + w2 + w3)) == 0 ), "illegal key value %x\n", key ); 352 353 // compute indexes 354 uint32_t ix1 = key >> (w2 + w3); // index in level 1 array 355 uint32_t ix2 = (key >> w3) & ((1 << w2) -1); // index in level 2 array 356 uint32_t ix3 = key & ((1 << w3) - 1); // index in level 3 array 357 358 // get ptr1 359 void ** ptr1 = hal_remote_lpt( XPTR( rt_cxy , &rt_ptr->root ) ); 360 361 if( ptr1 == NULL ) return -1; 362 363 // get ptr2 364 void ** ptr2 = hal_remote_lpt( XPTR( rt_cxy , &ptr1[ix1] ) ); 365 366 // allocate memory for the missing level_2 array if required 367 if( ptr2 == NULL ) 368 { 369 // allocate memory in remote cluster 370 req.type = KMEM_KCM; 371 req.order = w2 + ((sizeof(void*) == 4) ? 2 : 3 ); 372 req.flags = AF_ZERO | AF_KERNEL; 373 ptr2 = kmem_remote_alloc( rt_cxy , &req ); 374 375 if( ptr2 == NULL ) return -1; 376 377 // update level_1 entry 378 hal_remote_spt( XPTR( rt_cxy , &ptr1[ix1] ) , ptr2 ); 379 } 380 381 // get ptr3 382 void ** ptr3 = hal_remote_lpt( XPTR( rt_cxy , &ptr2[ix2] ) ); 383 384 // allocate memory for the missing level_3 array if required 385 if( ptr3 == NULL ) 386 { 387 // allocate memory in remote cluster 388 req.type = KMEM_KCM; 389 req.order = w3 + ((sizeof(void*) == 4) ? 2 : 3 ); 390 req.flags = AF_ZERO | AF_KERNEL; 391 ptr3 = kmem_remote_alloc( rt_cxy , &req ); 392 393 if( ptr3 == NULL ) return -1; 394 395 // update level_2 entry 396 hal_remote_spt( XPTR( rt_cxy , &ptr2[ix2] ) , ptr3 ); 397 } 398 399 // register value in level_3 array 400 hal_remote_spt( XPTR( rt_cxy , &ptr3[ix3] ) , value ); 401 402 hal_fence(); 403 404 return 0; 405 406 } // end grdxt_remote_insert() 407 408 //////////////////////////////////////////// 409 void * grdxt_remote_remove( xptr_t rt_xp, 410 uint32_t key ) 411 { 412 // get cluster and local pointer on remote rt descriptor 413 cxy_t rt_cxy = GET_CXY( rt_xp ); 414 grdxt_t * rt_ptr = GET_PTR( rt_xp ); 415 416 // get widths 417 uint32_t w1 = hal_remote_l32( XPTR( rt_cxy , &rt_ptr->ix1_width ) ); 418 uint32_t w2 = hal_remote_l32( XPTR( rt_cxy , &rt_ptr->ix2_width ) ); 419 uint32_t w3 = hal_remote_l32( XPTR( rt_cxy , &rt_ptr->ix3_width ) ); 420 421 // Check key value 422 assert( ((key >> (w1 + w2 + w3)) == 0 ), "illegal key value %x\n", key ); 423 424 // compute indexes 425 uint32_t ix1 = key >> (w2 + w3); // index in level 1 array 426 uint32_t ix2 = (key >> w3) & ((1 << w2) -1); // index in level 2 array 427 uint32_t ix3 = key & ((1 << w3) - 1); // index in level 3 array 428 429 // get ptr1 430 void ** ptr1 = hal_remote_lpt( XPTR( rt_cxy , &rt_ptr->root ) ); 431 432 // get ptr2 433 void ** ptr2 = hal_remote_lpt( XPTR( rt_cxy , &ptr1[ix1] ) ); 434 if( ptr2 == NULL ) return NULL; 435 436 // get ptr3 437 void ** ptr3 = hal_remote_lpt( XPTR( rt_cxy , &ptr2[ix2] ) ); 438 if( ptr3 == NULL ) return NULL; 439 440 // get value 441 void * value = hal_remote_lpt( XPTR( rt_cxy , &ptr3[ix3] ) ); 442 443 // reset selected slot 444 hal_remote_spt( XPTR( rt_cxy, &ptr3[ix3] ) , NULL ); 445 hal_fence(); 446 447 return value; 448 449 } // end grdxt_remote_remove() 450 451 //////////////////////////////////////////// 452 xptr_t grdxt_remote_lookup( xptr_t rt_xp, 453 uint32_t key ) 454 { 455 // get cluster and local pointer on remote rt descriptor 456 grdxt_t * rt_ptr = GET_PTR( rt_xp ); 457 cxy_t rt_cxy = GET_CXY( rt_xp ); 458 459 // get widths 460 uint32_t w1 = hal_remote_l32( XPTR( rt_cxy , &rt_ptr->ix1_width ) ); 461 uint32_t w2 = hal_remote_l32( XPTR( rt_cxy , &rt_ptr->ix2_width ) ); 462 uint32_t w3 = hal_remote_l32( XPTR( rt_cxy , &rt_ptr->ix3_width ) ); 463 464 // Check key value 465 assert( ((key >> (w1 + w2 + w3)) == 0 ), "illegal key value %x\n", key ); 466 467 // compute indexes 468 uint32_t ix1 = key >> (w2 + w3); // index in level 1 array 469 uint32_t ix2 = (key >> w3) & ((1 << w2) -1); // index in level 2 array 470 uint32_t ix3 = key & ((1 << w3) - 1); // index in level 3 array 471 472 // get ptr1 473 void ** ptr1 = hal_remote_lpt( XPTR( rt_cxy , &rt_ptr->root ) ); 474 475 // get ptr2 476 void ** ptr2 = hal_remote_lpt( XPTR( rt_cxy , &ptr1[ix1] ) ); 477 if( ptr2 == NULL ) return XPTR_NULL; 478 479 // get ptr3 480 void ** ptr3 = hal_remote_lpt( XPTR( rt_cxy , &ptr2[ix2] ) ); 481 if( ptr3 == NULL ) return XPTR_NULL; 482 483 // get pointer on registered item 484 void * item_ptr = hal_remote_lpt( XPTR( rt_cxy , &ptr3[ix3] ) ); 485 486 // return extended pointer on registered item 487 if ( item_ptr == NULL ) return XPTR_NULL; 488 else return XPTR( rt_cxy , item_ptr ); 489 490 } // end grdxt_remote_lookup() 491 492 /////////////////////////i///////////////// 493 void grdxt_remote_display( xptr_t rt_xp, 494 char * name ) 495 { 496 uint32_t ix1; 497 uint32_t ix2; 498 uint32_t ix3; 499 500 // check rt_xp 501 assert( (rt_xp != XPTR_NULL) , "pointer on radix tree is NULL\n" ); 502 503 // get cluster and local pointer on remote rt descriptor 504 grdxt_t * rt_ptr = GET_PTR( rt_xp ); 505 cxy_t rt_cxy = GET_CXY( rt_xp ); 506 507 // get widths 508 uint32_t w1 = hal_remote_l32( XPTR( rt_cxy , &rt_ptr->ix1_width ) ); 509 uint32_t w2 = hal_remote_l32( XPTR( rt_cxy , &rt_ptr->ix2_width ) ); 510 uint32_t w3 = hal_remote_l32( XPTR( rt_cxy , &rt_ptr->ix3_width ) ); 511 512 void ** ptr1 = hal_remote_lpt( XPTR( rt_cxy , &rt_ptr->root ) ); 513 514 printk("\n***** Generic Radix Tree for <%s>\n", name ); 515 516 for( ix1=0 ; ix1 < (uint32_t)(1<<w1) ; ix1++ ) 517 { 518 void ** ptr2 = hal_remote_lpt( XPTR( rt_cxy , &ptr1[ix1] ) ); 519 if( ptr2 == NULL ) continue; 520 521 for( ix2=0 ; ix2 < (uint32_t)(1<<w2) ; ix2++ ) 522 { 523 void ** ptr3 = hal_remote_lpt( XPTR( rt_cxy , &ptr2[ix2] ) ); 524 if( ptr3 == NULL ) continue; 525 526 for( ix3=0 ; ix3 < (uint32_t)(1<<w3) ; ix3++ ) 527 { 528 void * value = hal_remote_lpt( XPTR( rt_cxy , &ptr3[ix3] ) ); 529 if( value == NULL ) continue; 530 531 uint32_t key = (ix1<<(w2+w3)) + (ix2<<w3) + ix3; 532 printk(" - key = %x / value = %x\n", key , (intptr_t)value ); 533 } 534 } 535 } 536 537 } // end grdxt_remote_display() 538 539 -
trunk/kernel/libk/grdxt.h
r626 r635 36 36 * Memory for the second and third levels arrays is dynamically allocated by the 37 37 * grdxt_insert() function and is only released by grdxt_destroy(). 38 * - This structure is entirely contained in one single cluster. 39 * - All modifications (insert / remove) must be done by a thread running in local cluster. 40 * - Lookup can be done by a thread running in any cluster (local or remote). 38 * This structure is entirely contained in one single cluster, but to allow any thread 39 * to access it, two sets of access functions are defined: 40 * - local threads can use access function using local pointers. 41 * - remote threads must use the access functions using extended pointers. 41 42 ****************************************************************************************** 42 43 * When it is used by the mapper implementing the file cache: … … 54 55 grdxt_t; 55 56 57 //////////////////////////////////////////////////////////////////////////////////////////// 58 // Local access functions 59 //////////////////////////////////////////////////////////////////////////////////////////// 60 56 61 /******************************************************************************************* 57 62 * This function initialises the radix-tree descriptor, 63 * It must be called by a local thread. 58 64 * and allocates memory for the first level array of pointers. 59 65 ******************************************************************************************* … … 71 77 /******************************************************************************************* 72 78 * This function releases all memory allocated to the radix-tree infrastructure. 73 * The radix-tree is supposed to be empty, but this is NOT checked by this function. 79 * It must be called by a local thread. 80 * A warning message is printed on the kernel TXT0 if the radix tree is not empty. 74 81 ******************************************************************************************* 75 82 * @ rt : pointer on the radix-tree descriptor. … … 79 86 /******************************************************************************************* 80 87 * This function insert a new item in the radix-tree. 88 * It must be called by a local thread. 81 89 * It dynamically allocates memory for new second and third level arrays if required. 82 90 ******************************************************************************************* … … 84 92 * @ key : key value. 85 93 * @ value : pointer on item to be registered in radix-tree. 86 * @ returns 0 if success / returns ENOMEM if no memory, or EINVAL ifillegal key.94 * @ returns 0 if success / returns -1 if no memory, or illegal key. 87 95 ******************************************************************************************/ 88 96 error_t grdxt_insert( grdxt_t * rt, … … 91 99 92 100 /******************************************************************************************* 93 * This function removes an item identified by its key, and returns a pointer 94 * on the removed item. No memory is released. 101 * This function removes an item identified by its key from the radix tree, 102 * It must be called by a local thread. 103 * and returns a pointer on the removed item. No memory is released. 95 104 ******************************************************************************************* 96 105 * @ rt : pointer on the radix-tree descriptor. … … 103 112 /******************************************************************************************* 104 113 * This function returns to a local client, a local pointer on the item identified 114 * It must be called by a local thread. 105 115 * by the <key> argument, from the radix tree identified by the <rt> local pointer. 106 116 ******************************************************************************************* … … 113 123 114 124 /******************************************************************************************* 115 * This function returns to a - possibly remote - remote client, an extended pointer116 * on the item identified by the <key> argument, from the radix tree identified by117 * the <rt_xp> remote pointer.118 *******************************************************************************************119 * @ rt_xp : extended pointer on the radix-tree descriptor.120 * @ key : key value.121 * @ returns an extended pointer on found item if success / returns XPTR_NULL if failure.122 ******************************************************************************************/123 xptr_t grdxt_remote_lookup( xptr_t rt_xp,124 uint32_t key );125 126 /*******************************************************************************************127 125 * This function scan all radix-tree entries in increasing key order, starting from 126 * It must be called by a local thread. 128 127 * the value defined by the <key> argument, and return a pointer on the first valid 129 128 * registered item, and the found item key value. … … 138 137 uint32_t * found_key ); 139 138 139 //////////////////////////////////////////////////////////////////////////////////////////// 140 // Remote access functions 141 //////////////////////////////////////////////////////////////////////////////////////////// 142 143 /******************************************************************************************* 144 * This function insert a new item in a - possibly remote - radix tree. 145 * It dynamically allocates memory for new second and third level arrays if required. 146 ******************************************************************************************* 147 * @ rt_xp : extended pointer on the radix-tree descriptor. 148 * @ key : key value. 149 * @ value : pointer on item to be registered in radix-tree. 150 * @ returns 0 if success / returns -1 if no memory, or illegal key. 151 ******************************************************************************************/ 152 error_t grdxt_remote_insert( xptr_t rt_xp, 153 uint32_t key, 154 void * value ); 155 156 /******************************************************************************************* 157 * This function removes an item identified by its key from a - possibly remote - radix 158 * tree, and returns a local pointer on the removed item. No memory is released. 159 ******************************************************************************************* 160 * @ rt_xp : pointer on the radix-tree descriptor. 161 * @ key : key value. 162 * @ returns local pointer on removed item if success / returns NULL if failure. 163 ******************************************************************************************/ 164 void * grdxt_remote_remove( xptr_t rt_xp, 165 uint32_t key ); 166 167 /******************************************************************************************* 168 * This function returns to a - possibly remote - client, an extended pointer 169 * on the item identified by the <key> argument, from the radix tree identified by 170 * the <rt_xp> remote pointer. 171 ******************************************************************************************* 172 * @ rt_xp : extended pointer on the radix-tree descriptor. 173 * @ key : key value. 174 * @ returns an extended pointer on found item if success / returns XPTR_NULL if failure. 175 ******************************************************************************************/ 176 xptr_t grdxt_remote_lookup( xptr_t rt_xp, 177 uint32_t key ); 178 140 179 /******************************************************************************************* 141 180 * This function displays the current content of a possibly remote radix_tree. … … 144 183 * @ string : radix tree identifier. 145 184 ******************************************************************************************/ 146 void grdxt_display( xptr_t rt_xp, 147 char * string ); 148 185 void grdxt_remote_display( xptr_t rt_xp, 186 char * string ); 149 187 150 188 #endif /* _GRDXT_H_ */ -
trunk/kernel/libk/list.h
r632 r635 304 304 **************************************************************************/ 305 305 306 #define LIST_REMOTE_FIRST( cxy , root , type , member ) 307 ({ list_entry_t * __first = hal_remote_lpt( XPTR( cxy , &root->next ) );\308 LIST_ELEMENT( __first , type , member ); })306 #define LIST_REMOTE_FIRST( cxy , root , type , member ) \ 307 LIST_ELEMENT( hal_remote_lpt( XPTR( (cxy) , &(root)->next ) ), \ 308 type , member ) 309 309 310 310 /*************************************************************************** … … 314 314 * item(s) from the traversed list. 315 315 *************************************************************************** 316 * @ cxy : remote listcluster identifier316 * @ cxy : remote cluster identifier 317 317 * @ root : pointer on the root list_entry 318 318 * @ iter : pointer on the current list_entry -
trunk/kernel/libk/remote_barrier.c
r632 r635 83 83 pthread_barrierattr_t * attr ) 84 84 { 85 xptr_t gen_barrier_xp; // extended pointer on generic barrier descriptor86 85 generic_barrier_t * gen_barrier_ptr; // local pointer on generic barrier descriptor 87 86 void * barrier; // local pointer on implementation barrier descriptor … … 97 96 98 97 // allocate memory for generic barrier descriptor 99 if( ref_cxy == local_cxy ) // reference cluster is local 100 { 101 req.type = KMEM_GEN_BARRIER; 102 req.flags = AF_ZERO; 103 gen_barrier_ptr = kmem_alloc( &req ); 104 gen_barrier_xp = XPTR( local_cxy , gen_barrier_ptr ); 105 } 106 else // reference cluster is remote 107 { 108 rpc_kcm_alloc_client( ref_cxy, 109 KMEM_GEN_BARRIER, 110 &gen_barrier_xp ); 111 gen_barrier_ptr = GET_PTR( gen_barrier_xp ); 112 } 98 req.type = KMEM_KCM; 99 req.order = bits_log2( sizeof(generic_barrier_t) ); 100 req.flags = AF_ZERO | AF_KERNEL; 101 gen_barrier_ptr = kmem_remote_alloc( ref_cxy , &req ); 113 102 114 103 if( gen_barrier_ptr == NULL ) … … 124 113 barrier = simple_barrier_create( count ); 125 114 126 if( barrier == NULL ) 127 { 128 printk("\n[ERROR] in %s : cannot create simple barrier\n", __FUNCTION__); 129 return -1; 130 } 115 if( barrier == NULL ) return -1; 131 116 } 132 117 else // QDT barrier implementation … … 147 132 barrier = dqt_barrier_create( x_size , y_size , nthreads ); 148 133 149 if( barrier == NULL ) 150 { 151 printk("\n[ERROR] in %s : cannot create DQT barrier descriptor\n", __FUNCTION__); 152 return -1; 153 } 134 if( barrier == NULL ) return -1; 154 135 } 155 136 … … 211 192 212 193 // release memory allocated to barrier descriptor 213 if( gen_barrier_cxy == local_cxy ) 214 { 215 req.type = KMEM_GEN_BARRIER; 216 req.ptr = gen_barrier_ptr; 217 kmem_free( &req ); 218 } 219 else 220 { 221 rpc_kcm_free_client( gen_barrier_cxy, 222 gen_barrier_ptr, 223 KMEM_GEN_BARRIER ); 224 } 194 req.type = KMEM_KCM; 195 req.ptr = gen_barrier_ptr; 196 kmem_remote_free( ref_cxy , &req ); 197 225 198 } // end generic_barrier_destroy() 226 199 … … 273 246 simple_barrier_t * simple_barrier_create( uint32_t count ) 274 247 { 275 xptr_t barrier_xp;248 kmem_req_t req; 276 249 simple_barrier_t * barrier; 277 250 … … 285 258 286 259 // allocate memory for simple barrier descriptor 287 if( ref_cxy == local_cxy ) // reference is local 288 { 289 kmem_req_t req; 290 req.type = KMEM_SMP_BARRIER; 291 req.flags = AF_ZERO; 292 barrier = kmem_alloc( &req ); 293 barrier_xp = XPTR( local_cxy , barrier ); 294 } 295 else // reference is remote 296 { 297 rpc_kcm_alloc_client( ref_cxy, 298 KMEM_SMP_BARRIER, 299 &barrier_xp ); 300 barrier = GET_PTR( barrier_xp ); 301 } 302 303 if( barrier == NULL ) return NULL; 260 req.type = KMEM_KCM; 261 req.order = bits_log2( sizeof(simple_barrier_t) ); 262 req.flags = AF_ZERO | AF_KERNEL; 263 barrier = kmem_remote_alloc( ref_cxy , &req ); 264 265 if( barrier == NULL ) 266 { 267 printk("\n[ERROR] in %s : cannot create simple barrier\n", __FUNCTION__ ); 268 return NULL; 269 } 304 270 305 271 // initialise simple barrier descriptor … … 325 291 void simple_barrier_destroy( xptr_t barrier_xp ) 326 292 { 293 kmem_req_t req; 294 327 295 // get barrier cluster and local pointer 328 296 cxy_t barrier_cxy = GET_CXY( barrier_xp ); … … 330 298 331 299 // release memory allocated for barrier descriptor 332 if( barrier_cxy == local_cxy ) 333 { 334 kmem_req_t req; 335 req.type = KMEM_SMP_BARRIER; 336 req.ptr = barrier_ptr; 337 kmem_free( &req ); 338 } 339 else 340 { 341 rpc_kcm_free_client( barrier_cxy, 342 barrier_ptr, 343 KMEM_SMP_BARRIER ); 344 } 300 req.type = KMEM_KCM; 301 req.ptr = barrier_ptr; 302 kmem_remote_free( barrier_cxy , &req ); 345 303 346 304 #if DEBUG_BARRIER_DESTROY … … 498 456 499 457 #if DEBUG_BARRIER_CREATE 500 staticvoid dqt_barrier_display( xptr_t barrier_xp );458 void dqt_barrier_display( xptr_t barrier_xp ); 501 459 #endif 502 460 … … 506 464 uint32_t nthreads ) 507 465 { 508 xptr_t dqt_page_xp;509 page_t * rpc_page;510 xptr_t rpc_page_xp;511 466 dqt_barrier_t * barrier; // local pointer on DQT barrier descriptor 512 467 xptr_t barrier_xp; // extended pointer on DQT barrier descriptor 513 468 uint32_t z; // actual DQT size == max(x_size,y_size) 514 469 uint32_t levels; // actual number of DQT levels 515 xptr_t rpc_xp; // extended pointer on RPC descriptors array516 rpc_desc_t * rpc; // pointer on RPC descriptors array517 uint32_t responses; // responses counter for parallel RPCs518 reg_t save_sr; // for critical section519 470 uint32_t x; // X coordinate in QDT mesh 520 471 uint32_t y; // Y coordinate in QDT mesh … … 522 473 kmem_req_t req; // kmem request 523 474 524 // compute size and number of DQT levels475 // compute number of DQT levels, depending on the mesh size 525 476 z = (x_size > y_size) ? x_size : y_size; 526 477 levels = (z < 2) ? 1 : (z < 3) ? 2 : (z < 5) ? 3 : (z < 9) ? 4 : 5; … … 529 480 assert( (z <= 16) , "DQT mesh size larger than (16*16)\n"); 530 481 531 // check RPC descriptor size532 assert( (sizeof(rpc_desc_t) <= 128), "RPC descriptor larger than 128 bytes\n");533 534 482 // check size of an array of 5 DQT nodes 535 483 assert( (sizeof(dqt_node_t) * 5 <= 512 ), "array of DQT nodes larger than 512 bytes\n"); … … 538 486 assert( (sizeof(dqt_barrier_t) <= 0x4000 ), "DQT barrier descriptor larger than 4 pages\n"); 539 487 540 // get pointer on local client process descriptor488 // get pointer on client thread and process descriptors 541 489 thread_t * this = CURRENT_THREAD; 542 490 process_t * process = this->process; … … 553 501 cxy_t ref_cxy = GET_CXY( ref_xp ); 554 502 555 // 1. allocate 4 4 Kbytes pages for DQT barrier descriptor in reference cluster 556 dqt_page_xp = ppm_remote_alloc_pages( ref_cxy , 2 ); 557 558 if( dqt_page_xp == XPTR_NULL ) return NULL; 559 560 // get pointers on DQT barrier descriptor 561 barrier_xp = ppm_page2base( dqt_page_xp ); 562 barrier = GET_PTR( barrier_xp ); 503 // 1. allocate 4 small pages for the DQT barrier descriptor in reference cluster 504 req.type = KMEM_PPM; 505 req.order = 2; // 4 small pages == 16 Kbytes 506 req.flags = AF_ZERO | AF_KERNEL; 507 barrier = kmem_remote_alloc( ref_cxy , &req ); 508 509 if( barrier == NULL ) 510 { 511 printk("\n[ERROR] in %s : cannot create DQT barrier\n", __FUNCTION__ ); 512 return NULL; 513 } 514 515 // get pointers on DQT barrier descriptor in reference cluster 516 barrier_xp = XPTR( ref_cxy , barrier ); 563 517 564 518 // initialize global parameters in DQT barrier descriptor … … 569 523 #if DEBUG_BARRIER_CREATE 570 524 if( cycle > DEBUG_BARRIER_CREATE ) 571 printk("\n[%s] thread[%x,%x] created DQT barrier descriptor at(%x,%x)\n",525 printk("\n[%s] thread[%x,%x] created DQT barrier descriptor(%x,%x)\n", 572 526 __FUNCTION__, process->pid, this->trdid, ref_cxy, barrier ); 573 527 #endif 574 528 575 // 2. allocate memory from local cluster for an array of 256 RPCs descriptors 576 // cannot share the RPC descriptor, because the returned argument is not shared 577 req.type = KMEM_PAGE; 578 req.size = 3; // 8 pages == 32 Kbytes 579 req.flags = AF_ZERO; 580 rpc_page = kmem_alloc( &req ); 581 rpc_page_xp = XPTR( local_cxy , rpc_page ); 582 583 // get pointers on RPC descriptors array 584 rpc_xp = ppm_page2base( rpc_page_xp ); 585 rpc = GET_PTR( rpc_xp ); 586 587 #if DEBUG_BARRIER_CREATE 588 if( cycle > DEBUG_BARRIER_CREATE ) 589 printk("\n[%s] thread[%x,%x] created RPC descriptors array at (%x,%s)\n", 590 __FUNCTION__, process->pid, this->trdid, local_cxy, rpc ); 591 #endif 592 593 // 3. send parallel RPCs to all existing clusters covered by the DQT 594 // to allocate memory for an array of 5 DQT nodes in each cluster 529 // 2. allocate memory for an array of 5 DQT nodes 530 // in all existing clusters covered by the DQDT 595 531 // (5 nodes per cluster <= 512 bytes per cluster) 596 597 responses = 0; // initialize RPC responses counter 598 599 // mask IRQs 600 hal_disable_irq( &save_sr); 601 602 // client thread blocks itself 603 thread_block( XPTR( local_cxy , this ) , THREAD_BLOCKED_RPC ); 604 532 // and complete barrier descriptor initialisation. 605 533 for ( x = 0 ; x < x_size ; x++ ) 606 534 { 607 535 for ( y = 0 ; y < y_size ; y++ ) 608 536 { 609 // send RPC to existing clusters only 537 cxy_t cxy = HAL_CXY_FROM_XY( x , y ); // target cluster identifier 538 xptr_t local_array_xp; // xptr of nodes array in cluster cxy 539 540 // allocate memory in existing clusters only 610 541 if( LOCAL_CLUSTER->cluster_info[x][y] ) 611 542 { 612 cxy_t cxy = HAL_CXY_FROM_XY( x , y ); // target cluster identifier 613 614 // build a specific RPC descriptor for each target cluster 615 rpc[cxy].rsp = &responses; 616 rpc[cxy].blocking = false; 617 rpc[cxy].index = RPC_KCM_ALLOC; 618 rpc[cxy].thread = this; 619 rpc[cxy].lid = this->core->lid; 620 rpc[cxy].args[0] = (uint64_t)KMEM_512_BYTES; 621 622 // atomically increment expected responses counter 623 hal_atomic_add( &responses , 1 ); 624 625 // send a non-blocking RPC to allocate 512 bytes in target cluster 626 rpc_send( cxy , &rpc[cxy] ); 627 } 628 } 629 } 630 631 #if DEBUG_BARRIER_CREATE 632 if( cycle > DEBUG_BARRIER_CREATE ) 633 printk("\n[%s] thread[%x,%x] sent all RPC requests to allocate dqt_nodes array\n", 634 __FUNCTION__, process->pid, this->trdid ); 635 #endif 636 637 // client thread deschedule 638 sched_yield("blocked on parallel rpc_kcm_alloc"); 639 640 // restore IRQs 641 hal_restore_irq( save_sr); 642 643 // 4. initialize the node_xp[x][y][l] array in DQT barrier descriptor 644 // the node_xp[x][y][0] value is available in rpc.args[1] 645 646 #if DEBUG_BARRIER_CREATE 647 if( cycle > DEBUG_BARRIER_CREATE ) 648 printk("\n[%s] thread[%x,%x] initialises array of pointers on dqt_nodes\n", 649 __FUNCTION__, process->pid, this->trdid ); 650 #endif 651 652 for ( x = 0 ; x < x_size ; x++ ) 653 { 654 for ( y = 0 ; y < y_size ; y++ ) 655 { 656 cxy_t cxy = HAL_CXY_FROM_XY( x , y ); // target cluster identifier 657 xptr_t array_xp = (xptr_t)rpc[cxy].args[1]; // x_pointer on node array 658 uint32_t offset = sizeof( dqt_node_t ); // size of a DQT node 659 660 // set values into the node_xp[x][y][l] array 661 for ( l = 0 ; l < levels ; l++ ) 662 { 663 xptr_t node_xp = array_xp + (offset * l); 664 hal_remote_s64( XPTR( ref_cxy , &barrier->node_xp[x][y][l] ), node_xp ); 665 666 #if DEBUG_BARRIER_CREATE 543 req.type = KMEM_KCM; 544 req.order = 9; // 512 bytes 545 req.flags = AF_ZERO | AF_KERNEL; 546 547 void * ptr = kmem_remote_alloc( cxy , &req ); 548 549 if( ptr == NULL ) 550 { 551 printk("\n[ERROR] in %s : cannot allocate DQT in cluster %x\n", 552 __FUNCTION__, cxy ); 553 return NULL; 554 } 555 556 // build extended pointer on local node array in cluster cxy 557 local_array_xp = XPTR( cxy , ptr ); 558 559 // initialize the node_xp[x][y][l] array in barrier descriptor 560 for ( l = 0 ; l < levels ; l++ ) 561 { 562 xptr_t node_xp = local_array_xp + ( l * sizeof(dqt_node_t) ); 563 hal_remote_s64( XPTR( ref_cxy , &barrier->node_xp[x][y][l] ), node_xp ); 564 565 #if (DEBUG_BARRIER_CREATE & 1) 667 566 if( cycle > DEBUG_BARRIER_CREATE ) 668 567 printk(" - dqt_node_xp[%d,%d,%d] = (%x,%x) / &dqt_node_xp = %x\n", 669 568 x , y , l , GET_CXY( node_xp ), GET_PTR( node_xp ), &barrier->node_xp[x][y][l] ); 670 569 #endif 570 } 671 571 } 672 } 673 } 674 675 // 5. release memory locally allocated for the RPCs array 676 req.type = KMEM_PAGE; 677 req.ptr = rpc_page; 678 kmem_free( &req ); 572 else // register XPTR_NULL for all non-existing entries 573 { 574 for ( l = 0 ; l < levels ; l++ ) 575 { 576 hal_remote_s64( XPTR( ref_cxy , &barrier->node_xp[x][y][l] ), XPTR_NULL ); 577 } 578 } 579 } // end for y 580 } // end for x 679 581 680 582 #if DEBUG_BARRIER_CREATE 681 583 if( cycle > DEBUG_BARRIER_CREATE ) 682 printk("\n[%s] thread[%x,%x] released memory for RPC descriptors array\n",584 printk("\n[%s] thread[%x,%x] initialized array of pointers in DQT barrier\n", 683 585 __FUNCTION__, process->pid, this->trdid ); 684 586 #endif 685 587 686 // 6. initialise all distributed DQT nodes using remote accesses588 // 3. initialise all distributed DQT nodes using remote accesses 687 589 // and the pointers stored in the node_xp[x][y][l] array 688 590 for ( x = 0 ; x < x_size ; x++ ) … … 827 729 void dqt_barrier_destroy( xptr_t barrier_xp ) 828 730 { 829 page_t * rpc_page;830 xptr_t rpc_page_xp;831 rpc_desc_t * rpc; // local pointer on RPC descriptors array832 xptr_t rpc_xp; // extended pointer on RPC descriptor array833 reg_t save_sr; // for critical section834 731 kmem_req_t req; // kmem request 835 836 thread_t * this = CURRENT_THREAD; 732 uint32_t x; 733 uint32_t y; 734 837 735 838 736 // get DQT barrier descriptor cluster and local pointer … … 841 739 842 740 #if DEBUG_BARRIER_DESTROY 741 thread_t * this = CURRENT_THREAD; 843 742 uint32_t cycle = (uint32_t)hal_get_cycles(); 844 743 if( cycle > DEBUG_BARRIER_DESTROY ) … … 851 750 uint32_t y_size = hal_remote_l32( XPTR( barrier_cxy , &barrier_ptr->y_size ) ); 852 751 853 // 1. allocate memory from local cluster for an array of 256 RPCs descriptors 854 // cannot share the RPC descriptor, because the "buf" argument is not shared 855 req.type = KMEM_PAGE; 856 req.size = 3; // 8 pages == 32 Kbytes 857 req.flags = AF_ZERO; 858 rpc_page = kmem_alloc( &req ); 859 rpc_page_xp = XPTR( local_cxy , rpc_page ); 860 861 // get pointers on RPC descriptors array 862 rpc_xp = ppm_page2base( rpc_page_xp ); 863 rpc = GET_PTR( rpc_xp ); 864 865 // 2. send parallel RPCs to all existing clusters covered by the DQT 866 // to release memory allocated for the arrays of DQT nodes in each cluster 867 868 uint32_t responses = 0; // initialize RPC responses counter 869 870 // mask IRQs 871 hal_disable_irq( &save_sr); 872 873 // client thread blocks itself 874 thread_block( XPTR( local_cxy , this ) , THREAD_BLOCKED_RPC ); 875 876 uint32_t x , y; 877 878 #if DEBUG_BARRIER_DESTROY 879 if( cycle > DEBUG_BARRIER_DESTROY ) 880 printk("\n[%s] thread[%x,%x] send RPCs to release the distributed dqt_node array\n", 881 __FUNCTION__, this->process->pid, this->trdid ); 882 #endif 883 752 // 1. release memory allocated for the DQT nodes 753 // in all clusters covered by the QDT mesh 884 754 for ( x = 0 ; x < x_size ; x++ ) 885 755 { 886 756 for ( y = 0 ; y < y_size ; y++ ) 887 757 { 888 // send RPC to existing cluster only 758 // compute target cluster identifier 759 cxy_t cxy = HAL_CXY_FROM_XY( x , y ); 760 761 // existing cluster only 889 762 if( LOCAL_CLUSTER->cluster_info[x][y] ) 890 763 { 891 // compute target cluster identifier892 cxy_t cxy = HAL_CXY_FROM_XY( x , y );893 894 764 // get local pointer on dqt_nodes array in target cluster 895 765 xptr_t buf_xp_xp = XPTR( barrier_cxy , &barrier_ptr->node_xp[x][y][0] ); … … 899 769 assert( (cxy == GET_CXY(buf_xp)) , "bad extended pointer on dqt_nodes array\n" ); 900 770 901 // build a specific RPC descriptor 902 rpc[cxy].rsp = &responses; 903 rpc[cxy].blocking = false; 904 rpc[cxy].index = RPC_KCM_FREE; 905 rpc[cxy].thread = this; 906 rpc[cxy].lid = this->core->lid; 907 rpc[cxy].args[0] = (uint64_t)(intptr_t)buf; 908 rpc[cxy].args[1] = (uint64_t)KMEM_512_BYTES; 909 910 // atomically increment expected responses counter 911 hal_atomic_add( &responses , 1 ); 912 771 req.type = KMEM_KCM; 772 req.ptr = buf; 773 kmem_remote_free( cxy , &req ); 774 913 775 #if DEBUG_BARRIER_DESTROY 776 thread_t * this = CURRENT_THREAD; 777 uint32_t cycle = (uint32_t)hal_get_cycles(); 914 778 if( cycle > DEBUG_BARRIER_DESTROY ) 915 printk(" - target cluster(%d,%d) / buffer %x\n", x, y, buf ); 916 #endif 917 // send a non-blocking RPC to release 512 bytes in target cluster 918 rpc_send( cxy , &rpc[cxy] ); 779 printk("\n[%s] thread[%x,%x] released node array %x in cluster %x / cycle %d\n", 780 __FUNCTION__, this->process->pid, this->trdid, buf, cxy, cycle ); 781 #endif 919 782 } 920 783 } 921 784 } 922 785 923 // client thread deschedule 924 sched_yield("blocked on parallel rpc_kcm_free"); 925 926 // restore IRQs 927 hal_restore_irq( save_sr); 928 929 // 3. release memory locally allocated for the RPC descriptors array 930 req.type = KMEM_PAGE; 931 req.ptr = rpc_page; 932 kmem_free( &req ); 933 934 // 4. release memory allocated for barrier descriptor 935 xptr_t page_xp = ppm_base2page( barrier_xp ); 936 cxy_t page_cxy = GET_CXY( page_xp ); 937 page_t * page_ptr = GET_PTR( page_xp ); 938 939 ppm_remote_free_pages( page_cxy , page_ptr ); 786 // 2. release memory allocated for barrier descriptor in ref cluster 787 req.type = KMEM_PPM; 788 req.ptr = barrier_ptr; 789 kmem_remote_free( barrier_cxy , &req ); 940 790 941 791 #if DEBUG_BARRIER_DESTROY 942 792 cycle = (uint32_t)hal_get_cycles(); 943 793 if( cycle > DEBUG_BARRIER_DESTROY ) 944 printk("\n[%s] thread[%x,%x] exit for barrier (%x,%x) / cycle %d\n",794 printk("\n[%s] thread[%x,%x] release barrier descriptor (%x,%x) / cycle %d\n", 945 795 __FUNCTION__, this->process->pid, this->trdid, barrier_cxy, barrier_ptr, cycle ); 946 796 #endif … … 1022 872 { 1023 873 uint32_t level = hal_remote_l32( XPTR( node_cxy , &node_ptr->level )); 1024 uint32_t arity = hal_remote_l32( XPTR( node_cxy , &node_ptr->arity ));1025 uint32_t count = hal_remote_l32( XPTR( node_cxy , &node_ptr->current ));1026 874 xptr_t pa_xp = hal_remote_l32( XPTR( node_cxy , &node_ptr->parent_xp )); 1027 875 xptr_t c0_xp = hal_remote_l32( XPTR( node_cxy , &node_ptr->child_xp[0] )); … … 1030 878 xptr_t c3_xp = hal_remote_l32( XPTR( node_cxy , &node_ptr->child_xp[3] )); 1031 879 1032 printk(" . level %d : (%x,%x) / %d on %d /P(%x,%x) / C0(%x,%x)"880 printk(" . level %d : (%x,%x) / P(%x,%x) / C0(%x,%x)" 1033 881 " C1(%x,%x) / C2(%x,%x) / C3(%x,%x)\n", 1034 level, node_cxy, node_ptr, count, arity,882 level, node_cxy, node_ptr, 1035 883 GET_CXY(pa_xp), GET_PTR(pa_xp), 1036 884 GET_CXY(c0_xp), GET_PTR(c0_xp), -
trunk/kernel/libk/remote_condvar.c
r581 r635 2 2 * remote_condvar.c - remote kernel condition variable implementation. 3 3 * 4 * Authors Alain Greiner (2016,2017,2018 )4 * Authors Alain Greiner (2016,2017,2018,2019) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 86 86 { 87 87 remote_condvar_t * condvar_ptr; 88 xptr_t condvar_xp;88 kmem_req_t req; 89 89 90 90 // get pointer on local process descriptor … … 98 98 process_t * ref_ptr = (process_t *)GET_PTR( ref_xp ); 99 99 100 // allocate memory for new condvar in reference cluster 101 if( ref_cxy == local_cxy ) // local cluster is the reference 102 { 103 kmem_req_t req; 104 req.type = KMEM_CONDVAR; 105 req.flags = AF_ZERO; 106 condvar_ptr = kmem_alloc( &req ); 107 condvar_xp = XPTR( local_cxy , condvar_ptr ); 108 } 109 else // reference cluster is remote 110 { 111 rpc_kcm_alloc_client( ref_cxy , KMEM_CONDVAR , &condvar_xp ); 112 condvar_ptr = GET_PTR( condvar_xp ); 113 } 114 115 if( condvar_xp == XPTR_NULL ) return 0xFFFFFFFF; 100 req.type = KMEM_KCM; 101 req.order = bits_log2( sizeof(remote_condvar_t) ); 102 req.flags = AF_ZERO | AF_KERNEL; 103 condvar_ptr = kmem_alloc( &req ); 104 105 if( condvar_ptr == NULL ) 106 { 107 printk("\n[ERROR] in %s : cannot create condvar\n", __FUNCTION__ ); 108 return -1; 109 } 116 110 117 111 // initialise condvar … … 136 130 void remote_condvar_destroy( xptr_t condvar_xp ) 137 131 { 132 kmem_req_t req; 133 138 134 // get pointer on local process descriptor 139 135 process_t * process = CURRENT_THREAD->process; … … 166 162 167 163 // release memory allocated for condvar descriptor 168 if( condvar_cxy == local_cxy ) // reference is local 169 { 170 kmem_req_t req; 171 req.type = KMEM_SEM; 172 req.ptr = condvar_ptr; 173 kmem_free( &req ); 174 } 175 else // reference is remote 176 { 177 rpc_kcm_free_client( condvar_cxy , condvar_ptr , KMEM_CONDVAR ); 178 } 164 req.type = KMEM_KCM; 165 req.ptr = condvar_ptr; 166 kmem_remote_free( ref_cxy , &req ); 179 167 180 168 } // end remote_convar_destroy() -
trunk/kernel/libk/remote_condvar.h
r581 r635 2 2 * remote_condvar.h: POSIX condition variable definition. 3 3 * 4 * Authors Alain Greiner (2016,2017,2018 )4 * Authors Alain Greiner (2016,2017,2018,2019) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 78 78 * This function implements the CONVAR_INIT operation. 79 79 * This function creates and initializes a remote_condvar, identified by its virtual 80 * address <vaddr> in the client process reference cluster, using RPC if required.80 * address <vaddr> in the client process reference cluster, using remote access. 81 81 * It registers this user condvar in the reference process descriptor. 82 82 ******************************************************************************************* -
trunk/kernel/libk/remote_mutex.c
r619 r635 2 2 * remote_mutex.c - POSIX mutex implementation. 3 3 * 4 * Authors Alain Greiner (2016,2017,2018 )4 * Authors Alain Greiner (2016,2017,2018,2019) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 84 84 error_t remote_mutex_create( intptr_t ident ) 85 85 { 86 xptr_t mutex_xp;87 86 remote_mutex_t * mutex_ptr; 87 kmem_req_t req; 88 88 89 89 // get pointer on local process descriptor … … 97 97 process_t * ref_ptr = (process_t *)GET_PTR( ref_xp ); 98 98 99 // allocate memory for mutex descriptor 100 if( ref_cxy == local_cxy ) // local cluster is the reference 101 { 102 kmem_req_t req; 103 req.type = KMEM_MUTEX; 104 req.flags = AF_ZERO; 105 mutex_ptr = kmem_alloc( &req ); 106 mutex_xp = XPTR( local_cxy , mutex_ptr ); 107 } 108 else // reference is remote 109 { 110 rpc_kcm_alloc_client( ref_cxy , KMEM_MUTEX , &mutex_xp ); 111 mutex_ptr = GET_PTR( mutex_xp ); 112 } 113 114 if( mutex_ptr == NULL ) return 0xFFFFFFFF; 99 // allocate memory for mutex descriptor in reference cluster 100 req.type = KMEM_KCM; 101 req.order = bits_log2( sizeof(remote_mutex_t) ); 102 req.flags = AF_ZERO | AF_KERNEL; 103 mutex_ptr = kmem_remote_alloc( ref_cxy , &req ); 104 105 if( mutex_ptr == NULL ) 106 { 107 printk("\n[ERROR] in %s : cannot create mutex\n", __FUNCTION__); 108 return -1; 109 } 115 110 116 111 // initialise mutex … … 150 145 void remote_mutex_destroy( xptr_t mutex_xp ) 151 146 { 147 kmem_req_t req; 148 152 149 // get pointer on local process descriptor 153 150 process_t * process = CURRENT_THREAD->process; … … 174 171 175 172 // release memory allocated for mutex descriptor 176 if( mutex_cxy == local_cxy ) // reference is local 177 { 178 kmem_req_t req; 179 req.type = KMEM_MUTEX; 180 req.ptr = mutex_ptr; 181 kmem_free( &req ); 182 } 183 else // reference is remote 184 { 185 rpc_kcm_free_client( mutex_cxy , mutex_ptr , KMEM_MUTEX ); 186 } 173 req.type = KMEM_KCM; 174 req.ptr = mutex_ptr; 175 kmem_remote_free( mutex_cxy , &req ); 187 176 188 177 } // end remote_mutex_destroy() -
trunk/kernel/libk/remote_sem.c
r563 r635 2 2 * remote_sem.c - POSIX unnamed semaphore implementation. 3 3 * 4 * Author Alain Greiner (2016,2017,2018 )4 * Author Alain Greiner (2016,2017,2018,2019) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 86 86 uint32_t value ) 87 87 { 88 kmem_req_t req; 88 89 remote_sem_t * sem_ptr; 89 xptr_t sem_xp;90 90 91 91 // get pointer on local process descriptor … … 100 100 101 101 // allocate memory for new semaphore in reference cluster 102 if( ref_cxy == local_cxy ) // local cluster is the reference 103 { 104 kmem_req_t req; 105 req.type = KMEM_SEM; 106 req.flags = AF_ZERO; 107 sem_ptr = kmem_alloc( &req ); 108 sem_xp = XPTR( local_cxy , sem_ptr ); 102 req.type = KMEM_KCM; 103 req.order = bits_log2( sizeof(remote_sem_t) ); 104 req.flags = AF_ZERO | AF_KERNEL; 105 sem_ptr = kmem_remote_alloc( ref_cxy, &req ); 106 107 if( sem_ptr == NULL ) 108 { 109 printk("\n[ERROR] in %s : cannot create semaphore\n", __FUNCTION__ ); 110 return -1; 109 111 } 110 else // reference is remote111 {112 rpc_kcm_alloc_client( ref_cxy , KMEM_SEM , &sem_xp );113 sem_ptr = GET_PTR( sem_xp );114 }115 116 if( sem_xp == XPTR_NULL ) return 0xFFFFFFFF;117 112 118 113 // initialise semaphore … … 149 144 void remote_sem_destroy( xptr_t sem_xp ) 150 145 { 146 kmem_req_t req; 147 151 148 // get pointer on local process descriptor 152 149 process_t * process = CURRENT_THREAD->process; … … 179 176 180 177 // release memory allocated for semaphore descriptor 181 if( sem_cxy == local_cxy ) // reference is local 182 { 183 kmem_req_t req; 184 req.type = KMEM_SEM; 185 req.ptr = sem_ptr; 186 kmem_free( &req ); 187 } 188 else // reference is remote 189 { 190 rpc_kcm_free_client( sem_cxy , sem_ptr , KMEM_SEM ); 191 } 178 req.type = KMEM_KCM; 179 req.ptr = sem_ptr; 180 kmem_remote_free( sem_cxy , &req ); 192 181 193 182 } // end remote_sem_destroy() -
trunk/kernel/libk/user_dir.c
r633 r635 93 93 uint32_t attr; // attributes for all GPT entries 94 94 uint32_t dirents_per_page; // number of dirent descriptors per page 95 xptr_t page_xp; // extended pointer on page descriptor96 95 page_t * page; // local pointer on page descriptor 97 xptr_t base_xp; // extended pointer on physical page base98 96 struct dirent * base; // local pointer on physical page base 99 97 uint32_t total_dirents; // total number of dirents in dirent array … … 126 124 127 125 // check dirent size 128 assert( ( sizeof(struct dirent) == 64), "sizeof(dirent) !=64\n");126 assert( ( sizeof(struct dirent) == 64), "sizeof(dirent) must be 64\n"); 129 127 130 128 // compute number of dirent per page … … 135 133 136 134 // allocate memory for a local user_dir descriptor 137 req.type = KMEM_DIR; 138 req.flags = AF_ZERO; 135 req.type = KMEM_KCM; 136 req.order = bits_log2( sizeof(user_dir_t) ); 137 req.flags = AF_ZERO | AF_KERNEL; 139 138 dir = kmem_alloc( &req ); 140 139 … … 146 145 } 147 146 148 // Build an initialize the dirent array as a list of p hysical pages.147 // Build an initialize the dirent array as a list of pages. 149 148 // For each iteration in this while loop: 150 149 // - allocate one physical 4 Kbytes (64 dirent slots) … … 163 162 { 164 163 // allocate one physical page 165 req.type = KMEM_P AGE;166 req. size= 0;164 req.type = KMEM_PPM; 165 req.order = 0; 167 166 req.flags = AF_ZERO; 168 page = kmem_alloc( &req );169 170 if( page == NULL )167 base = kmem_alloc( &req ); 168 169 if( base == NULL ) 171 170 { 172 171 printk("\n[ERROR] in %s : cannot allocate page in cluster %x\n", … … 174 173 goto user_dir_create_failure; 175 174 } 176 177 // get pointer on page base (array of dirents)178 page_xp = XPTR( local_cxy , page );179 base_xp = ppm_page2base( page_xp );180 base = GET_PTR( base_xp );181 175 182 176 // call the relevant FS specific function to copy up to 64 dirents in page … … 198 192 total_dirents += entries; 199 193 194 // get page descriptor pointer from base 195 page = GET_PTR( ppm_base2page( XPTR( local_cxy , base ) ) ); 196 200 197 // register page in temporary list 201 198 list_add_last( &root , &page->list ); … … 303 300 304 301 // release the user_dir descriptor 305 req.type = KMEM_ DIR;302 req.type = KMEM_KCM; 306 303 req.ptr = dir; 307 304 kmem_free( &req ); … … 364 361 365 362 // release local user_dir_t structure 366 req.type = KMEM_ DIR;363 req.type = KMEM_KCM; 367 364 req.ptr = dir; 368 365 kmem_free( &req ); … … 372 369 { 373 370 page = LIST_FIRST( &root , page_t , list ); 374 req.type = KMEM_PAGE; 375 req.ptr = page; 371 372 // get base from page descriptor pointer 373 base = GET_PTR( ppm_page2base( XPTR( local_cxy , page ) ) ); 374 375 req.type = KMEM_PPM; 376 req.ptr = base; 376 377 kmem_free( &req ); 377 378 } … … 492 493 // release local user_dir_t structure 493 494 kmem_req_t req; 494 req.type = KMEM_ DIR;495 req.type = KMEM_KCM; 495 496 req.ptr = dir; 496 497 kmem_free( &req ); -
trunk/kernel/libk/user_dir.h
r629 r635 78 78 * This function allocates memory and initializes a user_dir_t structure in the cluster 79 79 * containing the directory inode identified by the <inode> argument and map the 80 * user accessible dirent array in the reference user process V MM, identified by the80 * user accessible dirent array in the reference user process VSL, identified by the 81 81 * <ref_xp> argument. 82 82 * It must be executed by a thread running in the cluster containing the target inode. -
trunk/kernel/libk/xhtab.c
r614 r635 2 2 * xhtab.c - Remote access embedded hash table implementation. 3 3 * 4 * Author Alain Greiner (2016,2017)4 * Author Alain Greiner (2016,2017,2018,2019) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 134 134 uint32_t i; 135 135 136 // initialize readlock136 // initialize lock 137 137 remote_busylock_init( XPTR( local_cxy , &xhtab->lock), LOCK_XHTAB_STATE ); 138 138 … … 153 153 } 154 154 155 for( i=0 ; i < XHASHTAB_SIZE ; i++ ) 156 { 157 xlist_root_init( XPTR( local_cxy , &xhtab->roots[i] ) ); 158 } 159 160 #if DEBUG_XHTAB 161 printk("\n@@@ %s for xhtab (%x,%x)\n" 155 #if DEBUG_XHTAB 156 printk("\n[%s] for xhtab (%x,%x)\n" 162 157 " - index_from_key = %x (@ %x)\n" 163 158 " - item_match_key = %x (@ %x)\n" … … 169 164 #endif 170 165 166 for( i=0 ; i < XHASHTAB_SIZE ; i++ ) 167 { 168 xlist_root_init( XPTR( local_cxy , &xhtab->roots[i] ) ); 169 170 #if (DEBUG_XHTAB & 1) 171 printk("\n - initialize root[%d] / %x\n", i , &xhtab->roots[i] ); 172 #endif 173 174 } 175 171 176 } // end xhtab_init() 172 177 173 ////////////////////////////////////// 174 xptr_t xhtab_scan( xptr_t xhtab_xp, 175 uint32_t index, 176 void * key ) 178 ///////////////////////////////////////////////////////////////////////////////////////////// 179 // This static function traverse the subset identified by the <index> argument 180 // to find an item identified by the <key> argument. 181 ///////////////////////////////////////////////////////////////////////////////////////////// 182 // @ xhtab_xp : extended pointer on the xhtab descriptor. 183 // @ index : subset index. 184 // @ key : searched key value. 185 // @ return extended pointer on the found item if success / return XPTR_NULL if not found. 186 ///////////////////////////////////////////////////////////////////////////////////////////// 187 static xptr_t xhtab_scan( xptr_t xhtab_xp, 188 uint32_t index, 189 void * key ) 177 190 { 178 191 xptr_t xlist_xp; // xlist_entry_t (iterator) … … 220 233 index_from_key_t * index_from_key; // function pointer 221 234 222 #if DEBUG_XHTAB 223 printk("\n[%s] enter / key %s\n", __FUNCTION__, key ); 224 #endif 225 226 // get xhtab cluster and local pointer 227 xhtab_cxy = GET_CXY( xhtab_xp ); 228 xhtab_ptr = GET_PTR( xhtab_xp ); 235 // get xhtab cluster and local pointer 236 xhtab_cxy = GET_CXY( xhtab_xp ); 237 xhtab_ptr = GET_PTR( xhtab_xp ); 238 239 #if DEBUG_XHTAB 240 printk("\n[%s] enter / xhtab (%x,%x) / key = <%s> / cycle %d\n", 241 __FUNCTION__, xhtab_cxy, xhtab_ptr, key, (uint32_t)hal_get_cycles() ); 242 #endif 243 244 // build extended pointer on xhtab lock 245 xptr_t lock_xp = XPTR( xhtab_cxy , &xhtab_ptr->lock ); 229 246 230 247 // get pointer on "index_from_key" function 231 248 index_from_key = (index_from_key_t *)hal_remote_lpt( XPTR( xhtab_cxy , 232 249 &xhtab_ptr->index_from_key ) ); 250 #if DEBUG_XHTAB 251 printk("\n[%s] remote = %x / direct = %x / @ = %x\n", 252 __FUNCTION__, index_from_key, xhtab_ptr->index_from_key, &xhtab_ptr->index_from_key ); 253 #endif 254 233 255 // compute index from key 234 256 index = index_from_key( key ); 235 257 258 #if DEBUG_XHTAB 259 printk("\n[%s] index = %x\n", __FUNCTION__, index ); 260 #endif 261 236 262 // take the lock protecting hash table 237 remote_busylock_acquire( XPTR( xhtab_cxy , &xhtab_ptr->lock ));238 239 // search a matching item 263 remote_busylock_acquire( lock_xp ); 264 265 // search a matching item in subset 240 266 item_xp = xhtab_scan( xhtab_xp , index , key ); 241 267 242 if( item_xp != XPTR_NULL ) // error if found268 if( item_xp != XPTR_NULL ) // error if item already registered 243 269 { 244 270 // release the lock protecting hash table 245 remote_busylock_release( XPTR( xhtab_cxy , &xhtab_ptr->lock ));271 remote_busylock_release( lock_xp ); 246 272 247 273 return -1; … … 256 282 257 283 // release the lock protecting hash table 258 remote_busylock_release( XPTR( xhtab_cxy , &xhtab_ptr->lock ));284 remote_busylock_release( lock_xp ); 259 285 260 286 #if DEBUG_XHTAB 261 printk("\n[%s] success / %s\n", __FUNCTION__, key );287 printk("\n[%s] success / <%s>\n", __FUNCTION__, key ); 262 288 #endif 263 289 -
trunk/kernel/libk/xhtab.h
r614 r635 2 2 * xhtab.h - Remote access embedded hash table definition. 3 3 * 4 * Author Alain Greiner (2016,2017,2018 )4 * Author Alain Greiner (2016,2017,2018,2019) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 38 38 // The main goal is to speedup search by key in a large number of items of same type. 39 39 // For this purpose the set of all registered items is split in several subsets. 40 // Each subset is organised as an embedded double linked xlist s.40 // Each subset is organised as an embedded double linked xlist. 41 41 // - an item is uniquely identified by a <key>, that is a item specific pointer, 42 42 // that can be a - for example - a char* defining the item "name". … … 64 64 65 65 /****************************************************************************************** 66 * This define the four item_type_specific function prototypes that must be defined66 * Here are the four item_type_specific function prototypes that must be defined 67 67 * for each item type. 68 68 *****************************************************************************************/ … … 74 74 75 75 /****************************************************************************************** 76 * This define the supported item types.76 * This define the currently supported item types. 77 77 * - The XHTAB_DENTRY_TYPE is used to implement the set of directory entries for a 78 78 * directory inode : the "children" inode field is an embedded xhtab. -
trunk/kernel/mm/kcm.c
r619 r635 1 1 /* 2 * kcm.c - Per clusterKernel Cache Manager implementation.2 * kcm.c - Kernel Cache Manager implementation. 3 3 * 4 * Author Ghassan Almaless (2008,2009,2010,2011,2012) 5 * Alain Greiner (2016,2017,2018,2019) 4 * Author Alain Greiner (2016,2017,2018,2019) 6 5 * 7 6 * Copyright (c) UPMC Sorbonne Universites … … 38 37 39 38 39 ///////////////////////////////////////////////////////////////////////////////////// 40 // Local access functions 41 ///////////////////////////////////////////////////////////////////////////////////// 42 40 43 ////////////////////////////////////////////////////////////////////////////////////// 41 // This static function returns pointer on an allocated block from an active page. 42 // It returns NULL if no block available in selected page. 43 // It changes the page status if required. 44 // This static function must be called by a local thread. 45 // It returns a pointer on a block allocated from a non-full kcm_page. 46 // It makes a panic if no block is available in selected page. 47 // It changes the page status as required. 44 48 ////////////////////////////////////////////////////////////////////////////////////// 45 // @ kcm : pointer on kcm allocator. 46 // @ kcm_page : pointer on active kcm page to use. 47 ///////////////////////////////////////////////////////////////////////////////////// 48 static void * kcm_get_block( kcm_t * kcm, 49 kcm_page_t * kcm_page ) 50 { 51 52 #if DEBUG_KCM 53 thread_t * this = CURRENT_THREAD; 54 uint32_t cycle = (uint32_t)hal_get_cycles(); 49 // @ kcm : pointer on KCM allocator. 50 // @ kcm_page : pointer on a non-full kcm_page. 51 // @ return pointer on allocated block. 52 ///////////////////////////////////////////////////////////////////////////////////// 53 static void * __attribute__((noinline)) kcm_get_block( kcm_t * kcm, 54 kcm_page_t * kcm_page ) 55 { 56 // initialise variables 57 uint32_t size = 1 << kcm->order; 58 uint32_t max = kcm->max_blocks; 59 uint32_t count = kcm_page->count; 60 uint64_t status = kcm_page->status; 61 62 assert( (count < max) , "kcm_page should not be full" ); 63 64 uint32_t index = 1; 65 uint64_t mask = (uint64_t)0x2; 66 uint32_t found = 0; 67 68 // allocate first free block in kcm_page, update status, 69 // and count , compute index of allocated block in kcm_page 70 while( index <= max ) 71 { 72 if( (status & mask) == 0 ) // block non allocated 73 { 74 kcm_page->status = status | mask; 75 kcm_page->count = count + 1; 76 found = 1; 77 78 break; 79 } 80 81 index++; 82 mask <<= 1; 83 } 84 85 // change the page list if almost full 86 if( count == max-1 ) 87 { 88 list_unlink( &kcm_page->list); 89 kcm->active_pages_nr--; 90 91 list_add_first( &kcm->full_root , &kcm_page->list ); 92 kcm->full_pages_nr ++; 93 } 94 95 // compute return pointer 96 void * ptr = (void *)((intptr_t)kcm_page + (index * size) ); 97 98 #if (DEBUG_KCM & 1) 99 thread_t * this = CURRENT_THREAD; 100 uint32_t cycle = (uint32_t)hal_get_cycles(); 55 101 if( DEBUG_KCM < cycle ) 56 printk("\n[%s] thread[%x,%x] enters for %s / page %x / count %d / active %d\n", 57 __FUNCTION__, this->process->pid, this->trdid, kmem_type_str(kcm->type), 58 (intptr_t)kcm_page , kcm_page->count , kcm_page->active ); 59 #endif 60 61 assert( kcm_page->active , "kcm_page should be active" ); 62 63 // get first block available 64 int32_t index = bitmap_ffs( kcm_page->bitmap , kcm->blocks_nr ); 65 66 assert( (index != -1) , "kcm_page should not be full" ); 67 68 // allocate block 69 bitmap_clear( kcm_page->bitmap , index ); 70 71 // increase kcm_page count 72 kcm_page->count ++; 73 74 // change the kcm_page to busy if no more free block in page 75 if( kcm_page->count >= kcm->blocks_nr ) 76 { 77 kcm_page->active = 0; 78 list_unlink( &kcm_page->list); 79 kcm->active_pages_nr --; 80 81 list_add_first( &kcm->busy_root , &kcm_page->list); 82 kcm->busy_pages_nr ++; 83 kcm_page->busy = 1; 84 } 85 86 // compute return pointer 87 void * ptr = (void *)((intptr_t)kcm_page + CONFIG_KCM_SLOT_SIZE 88 + (index * kcm->block_size) ); 89 90 #if DEBUG_KCM 91 cycle = (uint32_t)hal_get_cycles(); 92 if( DEBUG_KCM < cycle ) 93 printk("\n[%s] thread[%x,%x] exit for %s / ptr %x / page %x / count %d\n", 94 __FUNCTION__, this->process->pid, this->trdid, kmem_type_str(kcm->type), 95 (intptr_t)ptr, (intptr_t)kcm_page, kcm_page->count ); 102 printk("\n[%s] thread[%x,%x] allocated block %x in page %x / size %d / count %d / cycle %d\n", 103 __FUNCTION__, this->process->pid, this->trdid, ptr, kcm_page, size, count + 1, cycle ); 96 104 #endif 97 105 98 106 return ptr; 99 } 100 101 ///////////////////////////////////////////////////////////////////////////////////// 102 // This static function releases a previously allocated block. 103 // It changes the kcm_page status if required. 104 ///////////////////////////////////////////////////////////////////////////////////// 105 // @ kcm : pointer on kcm allocator. 106 // @ kcm_page : pointer on kcm_page. 107 // @ ptr : pointer on block to be released. 108 ///////////////////////////////////////////////////////////////////////////////////// 109 static void kcm_put_block ( kcm_t * kcm, 110 kcm_page_t * kcm_page, 111 void * ptr ) 112 { 113 uint32_t index; 114 107 108 } // end kcm_get_block() 109 110 ///////////////////////////////////////////////////////////////////////////////////// 111 // This private static function must be called by a local thread. 112 // It releases a previously allocated block to the relevant kcm_page. 113 // It makes a panic if the released block is not allocated in this page. 114 // It changes the kcm_page status as required. 115 ///////////////////////////////////////////////////////////////////////////////////// 116 // @ kcm : pointer on kcm allocator. 117 // @ kcm_page : pointer on kcm_page. 118 // @ block_ptr : pointer on block to be released. 119 ///////////////////////////////////////////////////////////////////////////////////// 120 static void __attribute__((noinline)) kcm_put_block ( kcm_t * kcm, 121 kcm_page_t * kcm_page, 122 void * block_ptr ) 123 { 124 // initialise variables 125 uint32_t max = kcm->max_blocks; 126 uint32_t size = 1 << kcm->order; 127 uint32_t count = kcm_page->count; 128 uint64_t status = kcm_page->status; 129 115 130 // compute block index from block pointer 116 index = ((uint8_t *)ptr - (uint8_t *)kcm_page - CONFIG_KCM_SLOT_SIZE) / kcm->block_size; 117 118 assert( !bitmap_state( kcm_page->bitmap , index ) , "page already freed" ); 119 120 assert( (kcm_page->count > 0) , "count already zero" ); 121 122 bitmap_set( kcm_page->bitmap , index ); 123 kcm_page->count --; 124 125 // change the page to active if it was busy 126 if( kcm_page->busy ) 127 { 128 kcm_page->busy = 0; 131 uint32_t index = ((intptr_t)block_ptr - (intptr_t)kcm_page) / size; 132 133 // compute mask in bit vector 134 uint64_t mask = ((uint64_t)0x1) << index; 135 136 assert( (status & mask) , "released block not allocated : status (%x,%x) / mask(%x,%x)", 137 GET_CXY(status), GET_PTR(status), GET_CXY(mask ), GET_PTR(mask ) ); 138 139 // update status & count in kcm_page 140 kcm_page->status = status & ~mask; 141 kcm_page->count = count - 1; 142 143 // change the page mode if page was full 144 if( count == max ) 145 { 129 146 list_unlink( &kcm_page->list ); 130 kcm-> busy_pages_nr --;147 kcm->full_pages_nr --; 131 148 132 149 list_add_last( &kcm->active_root, &kcm_page->list ); 133 150 kcm->active_pages_nr ++; 134 kcm_page->active = 1; 135 } 136 137 // change the kcm_page to free if last block in active page 138 if( (kcm_page->active) && (kcm_page->count == 0) ) 139 { 140 kcm_page->active = 0; 141 list_unlink( &kcm_page->list); 142 kcm->active_pages_nr --; 143 144 list_add_first( &kcm->free_root , &kcm_page->list); 145 kcm->free_pages_nr ++; 146 } 147 } 148 149 ///////////////////////////////////////////////////////////////////////////////////// 150 // This static function allocates one page from PPM. It initializes 151 // the kcm_page descriptor, and introduces the new kcm_page into freelist. 152 ///////////////////////////////////////////////////////////////////////////////////// 153 static error_t freelist_populate( kcm_t * kcm ) 154 { 155 page_t * page; 156 kcm_page_t * kcm_page; 157 kmem_req_t req; 158 159 // get one page from local PPM 160 req.type = KMEM_PAGE; 161 req.size = 0; 162 req.flags = AF_KERNEL; 163 page = kmem_alloc( &req ); 164 165 if( page == NULL ) 166 { 167 printk("\n[ERROR] in %s : failed to allocate page in cluster %x\n", 168 __FUNCTION__ , local_cxy ); 169 return ENOMEM; 170 } 171 172 // get page base address 173 xptr_t base_xp = ppm_page2base( XPTR( local_cxy , page ) ); 174 kcm_page = (kcm_page_t *)GET_PTR( base_xp ); 175 176 // initialize KCM-page descriptor 177 bitmap_set_range( kcm_page->bitmap , 0 , kcm->blocks_nr ); 178 179 kcm_page->busy = 0; 180 kcm_page->active = 0; 181 kcm_page->count = 0; 182 kcm_page->kcm = kcm; 183 kcm_page->page = page; 184 185 // introduce new page in free-list 186 list_add_first( &kcm->free_root , &kcm_page->list ); 187 kcm->free_pages_nr ++; 188 189 return 0; 190 } 191 192 ///////////////////////////////////////////////////////////////////////////////////// 193 // This private function gets one KCM page from the KCM freelist. 194 // It populates the freelist if required. 195 ///////////////////////////////////////////////////////////////////////////////////// 196 static kcm_page_t * freelist_get( kcm_t * kcm ) 197 { 198 error_t error; 199 kcm_page_t * kcm_page; 200 201 // get a new page from PPM if freelist empty 202 if( kcm->free_pages_nr == 0 ) 203 { 204 error = freelist_populate( kcm ); 205 if( error ) return NULL; 206 } 207 208 // get first KCM page from freelist and unlink it 209 kcm_page = LIST_FIRST( &kcm->free_root, kcm_page_t , list ); 210 list_unlink( &kcm_page->list ); 211 kcm->free_pages_nr --; 151 } 152 153 #if (DEBUG_KCM & 1) 154 thread_t * this = CURRENT_THREAD; 155 uint32_t cycle = (uint32_t)hal_get_cycles(); 156 if( DEBUG_KCM < cycle ) 157 printk("\n[%s] thread[%x,%x] released block %x in page %x / size %d / count %d / cycle %d\n", 158 __FUNCTION__, this->process->pid, this->trdid, block_ptr, kcm_page, size, count - 1, cycle ); 159 #endif 160 161 } // kcm_put_block() 162 163 ///////////////////////////////////////////////////////////////////////////////////// 164 // This private static function must be called by a local thread. 165 // It returns one non-full kcm_page with te following policy : 166 // - if the "active_list" is non empty, it returns the first "active" page, 167 // without modifying the KCM state. 168 // - if the "active_list" is empty, it allocates a new page fromm PPM, inserts 169 // this page in the active_list, and returns it. 170 ///////////////////////////////////////////////////////////////////////////////////// 171 // @ kcm : local pointer on local KCM allocator. 172 // @ return pointer on a non-full kcm page if success / returns NULL if no memory. 173 ///////////////////////////////////////////////////////////////////////////////////// 174 static kcm_page_t * __attribute__((noinline)) kcm_get_page( kcm_t * kcm ) 175 { 176 kcm_page_t * kcm_page; 177 178 uint32_t active_pages_nr = kcm->active_pages_nr; 179 180 if( active_pages_nr > 0 ) // return first active page 181 { 182 kcm_page = LIST_FIRST( &kcm->active_root , kcm_page_t , list ); 183 } 184 else // allocate a new page from PPM 185 { 186 // get one 4 Kbytes page from local PPM 187 page_t * page = ppm_alloc_pages( 0 ); 188 189 if( page == NULL ) 190 { 191 printk("\n[ERROR] in %s : failed to allocate page in cluster %x\n", 192 __FUNCTION__ , local_cxy ); 193 194 return NULL; 195 } 196 197 // get page base address 198 xptr_t base_xp = ppm_page2base( XPTR( local_cxy , page ) ); 199 200 // get local pointer on kcm_page 201 kcm_page = GET_PTR( base_xp ); 202 203 // initialize kcm_page descriptor 204 kcm_page->status = 0; 205 kcm_page->count = 0; 206 kcm_page->kcm = kcm; 207 kcm_page->page = page; 208 209 // introduce new page in KCM active_list 210 list_add_first( &kcm->active_root , &kcm_page->list ); 211 kcm->active_pages_nr ++; 212 } 212 213 213 214 return kcm_page; 214 } 215 216 } // end kcm_get_page() 215 217 216 218 ////////////////////////////// 217 219 void kcm_init( kcm_t * kcm, 218 uint32_t type ) 219 { 220 221 // the kcm_page descriptor must fit in the KCM slot 222 assert( (sizeof(kcm_page_t) <= CONFIG_KCM_SLOT_SIZE) , "KCM slot too small\n" ); 223 224 // the allocated object must fit in one single page 225 assert( (kmem_type_size(type) <= (CONFIG_PPM_PAGE_SIZE - CONFIG_KCM_SLOT_SIZE)), 226 "allocated object requires more than one single page\n" ); 220 uint32_t order) 221 { 222 223 assert( ((order > 5) && (order < 12)) , "order must be in [6,11]" ); 227 224 228 225 // initialize lock 229 busylock_init( &kcm->lock , LOCK_KCM_STATE ); 230 231 // initialize KCM type 232 kcm->type = type; 226 remote_busylock_init( XPTR( local_cxy , &kcm->lock ) , LOCK_KCM_STATE ); 233 227 234 228 // initialize KCM page lists 235 kcm->free_pages_nr = 0; 236 kcm->busy_pages_nr = 0; 229 kcm->full_pages_nr = 0; 237 230 kcm->active_pages_nr = 0; 238 list_root_init( &kcm->free_root ); 239 list_root_init( &kcm->busy_root ); 231 list_root_init( &kcm->full_root ); 240 232 list_root_init( &kcm->active_root ); 241 233 242 // initialize block size 243 uint32_t block_size = ARROUND_UP( kmem_type_size( type ) , CONFIG_KCM_SLOT_SIZE ); 244 kcm->block_size = block_size; 245 246 // initialize number of blocks per page 247 uint32_t blocks_nr = (CONFIG_PPM_PAGE_SIZE - CONFIG_KCM_SLOT_SIZE) / block_size; 248 kcm->blocks_nr = blocks_nr; 249 234 // initialize order and max_blocks 235 kcm->order = order; 236 kcm->max_blocks = ( CONFIG_PPM_PAGE_SIZE >> order ) - 1; 237 250 238 #if DEBUG_KCM 251 239 thread_t * this = CURRENT_THREAD; 252 240 uint32_t cycle = (uint32_t)hal_get_cycles(); 253 241 if( DEBUG_KCM < cycle ) 254 printk("\n[%s] thread[%x,%x] initialised KCM %s : block_size %d / blocks_nr %d\n", 255 __FUNCTION__, this->process->pid, this->trdid, 256 kmem_type_str( kcm->type ), block_size, blocks_nr ); 257 #endif 258 259 } 242 printk("\n[%s] thread[%x,%x] initialised KCM / order %d / max_blocks %d\n", 243 __FUNCTION__, this->process->pid, this->trdid, order, kcm->max_blocks ); 244 #endif 245 246 } // end kcm_init() 260 247 261 248 /////////////////////////////// … … 263 250 { 264 251 kcm_page_t * kcm_page; 265 list_entry_t * iter; 252 253 // build extended pointer on KCM lock 254 xptr_t lock_xp = XPTR( local_cxy , &kcm->lock ); 266 255 267 256 // get KCM lock 268 busylock_acquire( &kcm->lock ); 269 270 // release all free pages 271 LIST_FOREACH( &kcm->free_root , iter ) 272 { 273 kcm_page = (kcm_page_t *)LIST_ELEMENT( iter , kcm_page_t , list ); 274 list_unlink( iter ); 275 kcm->free_pages_nr --; 257 remote_busylock_acquire( lock_xp ); 258 259 // release all full pages 260 while( list_is_empty( &kcm->full_root ) == false ) 261 { 262 kcm_page = LIST_FIRST( &kcm->full_root , kcm_page_t , list ); 263 list_unlink( &kcm_page->list ); 276 264 ppm_free_pages( kcm_page->page ); 277 265 } 278 266 279 // release all active pages 280 LIST_FOREACH( &kcm->active_root , iter ) 281 { 282 kcm_page = (kcm_page_t *)LIST_ELEMENT( iter , kcm_page_t , list ); 283 list_unlink( iter ); 284 kcm->free_pages_nr --; 267 // release all empty pages 268 while( list_is_empty( &kcm->active_root ) == false ) 269 { 270 kcm_page = LIST_FIRST( &kcm->active_root , kcm_page_t , list ); 271 list_unlink( &kcm_page->list ); 285 272 ppm_free_pages( kcm_page->page ); 286 273 } 287 274 288 // release all busy pages289 LIST_FOREACH( &kcm->busy_root , iter )290 {291 kcm_page = (kcm_page_t *)LIST_ELEMENT( iter , kcm_page_t , list );292 list_unlink( iter );293 kcm->free_pages_nr --;294 ppm_free_pages( kcm_page->page );295 }296 297 275 // release KCM lock 298 busylock_release( &kcm->lock);276 remote_busylock_release( lock_xp ); 299 277 } 300 278 301 /////////////////////////////// 302 void * kcm_alloc( kcm_t * kcm ) 303 { 279 ////////////////////////////////// 280 void * kcm_alloc( uint32_t order ) 281 { 282 kcm_t * kcm_ptr; 304 283 kcm_page_t * kcm_page; 305 void * ptr = NULL; // pointer on block 284 void * block_ptr; 285 286 // min block size is 64 bytes 287 if( order < 6 ) order = 6; 288 289 assert( (order < 12) , "order = %d / must be less than 12" , order ); 290 291 // get local pointer on relevant KCM allocator 292 kcm_ptr = &LOCAL_CLUSTER->kcm[order - 6]; 293 294 // build extended pointer on local KCM lock 295 xptr_t lock_xp = XPTR( local_cxy , &kcm_ptr->lock ); 296 297 // get KCM lock 298 remote_busylock_acquire( lock_xp ); 299 300 // get a non-full kcm_page 301 kcm_page = kcm_get_page( kcm_ptr ); 302 303 if( kcm_page == NULL ) 304 { 305 remote_busylock_release( lock_xp ); 306 return NULL; 307 } 308 309 // get a block from selected active page 310 block_ptr = kcm_get_block( kcm_ptr , kcm_page ); 311 312 // release lock 313 remote_busylock_release( lock_xp ); 314 315 #if DEBUG_KCM 316 thread_t * this = CURRENT_THREAD; 317 uint32_t cycle = (uint32_t)hal_get_cycles(); 318 if( DEBUG_KCM < cycle ) 319 printk("\n[%s] thread[%x,%x] allocated block %x / order %d / kcm %x / status[%x,%x] / count %d\n", 320 __FUNCTION__, this->process->pid, this->trdid, block_ptr, order, kcm_ptr, 321 GET_CXY(kcm_page->status), GET_PTR(kcm_page->status), kcm_page->count ); 322 #endif 323 324 return block_ptr; 325 326 } // end kcm_alloc() 327 328 ///////////////////////////////// 329 void kcm_free( void * block_ptr ) 330 { 331 kcm_t * kcm_ptr; 332 kcm_page_t * kcm_page; 333 334 // check argument 335 assert( (block_ptr != NULL) , "block pointer cannot be NULL" ); 336 337 // get local pointer on KCM page 338 kcm_page = (kcm_page_t *)((intptr_t)block_ptr & ~CONFIG_PPM_PAGE_MASK); 339 340 // get local pointer on KCM descriptor 341 kcm_ptr = kcm_page->kcm; 342 343 #if DEBUG_KCM 344 thread_t * this = CURRENT_THREAD; 345 uint32_t cycle = (uint32_t)hal_get_cycles(); 346 if( DEBUG_KCM < cycle ) 347 printk("\n[%s] thread[%x,%x] release block %x / order %d / kcm %x / status [%x,%x] / count %d\n", 348 __FUNCTION__, this->process->pid, this->trdid, block_ptr, kcm_ptr->order, kcm_ptr, 349 GET_CXY(kcm_page->status), GET_PTR(kcm_page->status), kcm_page->count ); 350 #endif 351 352 // build extended pointer on local KCM lock 353 xptr_t lock_xp = XPTR( local_cxy , &kcm_ptr->lock ); 306 354 307 355 // get lock 308 busylock_acquire( &kcm->lock ); 309 310 // get an active page 311 if( list_is_empty( &kcm->active_root ) ) // no active page => get one 312 { 313 // get a page from free list 314 kcm_page = freelist_get( kcm ); 315 316 if( kcm_page == NULL ) 317 { 318 busylock_release( &kcm->lock ); 319 return NULL; 320 } 321 322 // insert page in active list 323 list_add_first( &kcm->active_root , &kcm_page->list ); 324 kcm->active_pages_nr ++; 325 kcm_page->active = 1; 326 } 327 else // get first page from active list 328 { 329 // get page pointer from active list 330 kcm_page = (kcm_page_t *)LIST_FIRST( &kcm->active_root , kcm_page_t , list ); 356 remote_busylock_acquire( lock_xp ); 357 358 // release block 359 kcm_put_block( kcm_ptr , kcm_page , block_ptr ); 360 361 // release lock 362 remote_busylock_release( lock_xp ); 363 } 364 365 ///////////////////////////////////////////////////////////////////////////////////// 366 // Remote access functions 367 ///////////////////////////////////////////////////////////////////////////////////// 368 369 ///////////////////////////////////////////////////////////////////////////////////// 370 // This static function can be called by any thread running in any cluster. 371 // It returns a local pointer on a block allocated from an non-full kcm_page. 372 // It makes a panic if no block available in selected page. 373 // It changes the page status as required. 374 ///////////////////////////////////////////////////////////////////////////////////// 375 // @ kcm_cxy : remote KCM cluster identidfier. 376 // @ kcm_ptr : local pointer on remote KCM allocator. 377 // @ kcm_page : pointer on active kcm page to use. 378 // @ return a local pointer on the allocated block. 379 ///////////////////////////////////////////////////////////////////////////////////// 380 static void * __attribute__((noinline)) kcm_remote_get_block( cxy_t kcm_cxy, 381 kcm_t * kcm_ptr, 382 kcm_page_t * kcm_page ) 383 { 384 uint32_t order = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->order ) ); 385 uint32_t max = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->max_blocks ) ); 386 uint32_t count = hal_remote_l32( XPTR( kcm_cxy , &kcm_page->count ) ); 387 uint64_t status = hal_remote_l64( XPTR( kcm_cxy , &kcm_page->status ) ); 388 uint32_t size = 1 << order; 389 390 assert( (count < max) , "kcm_page should not be full" ); 391 392 uint32_t index = 1; 393 uint64_t mask = (uint64_t)0x2; 394 uint32_t found = 0; 395 396 // allocate first free block in kcm_page, update status, 397 // and count , compute index of allocated block in kcm_page 398 while( index <= max ) 399 { 400 if( (status & mask) == 0 ) // block non allocated 401 { 402 hal_remote_s64( XPTR( kcm_cxy , &kcm_page->status ) , status | mask ); 403 hal_remote_s64( XPTR( kcm_cxy , &kcm_page->count ) , count + 1 ); 404 found = 1; 405 break; 406 } 407 408 index++; 409 mask <<= 1; 410 } 411 412 // change the page list if almost full 413 if( count == max-1 ) 414 { 415 list_remote_unlink( kcm_cxy , &kcm_page->list ); 416 hal_remote_atomic_add( XPTR( kcm_cxy , &kcm_ptr->active_pages_nr ) , -1 ); 417 418 list_remote_add_first( kcm_cxy , &kcm_ptr->full_root , &kcm_page->list ); 419 hal_remote_atomic_add( XPTR( kcm_cxy , &kcm_ptr->full_pages_nr ) , 1 ); 420 } 421 422 // compute return pointer 423 void * ptr = (void *)((intptr_t)kcm_page + (index * size) ); 424 425 #if DEBUG_KCM_REMOTE 426 thread_t * this = CURRENT_THREAD; 427 uint32_t cycle = (uint32_t)hal_get_cycles(); 428 if( DEBUG_KCM_REMOTE < cycle ) 429 printk("\n[%s] thread[%x,%x] get block %x in page %x / cluster %x / size %x / count %d\n", 430 __FUNCTION__, this->process->pid, this->trdid, 431 ptr, kcm_page, kcm_cxy, size, count + 1 ); 432 #endif 433 434 return ptr; 435 436 } // end kcm_remote_get_block() 437 438 ///////////////////////////////////////////////////////////////////////////////////// 439 // This private static function can be called by any thread running in any cluster. 440 // It releases a previously allocated block to the relevant kcm_page. 441 // It changes the kcm_page status as required. 442 ///////////////////////////////////////////////////////////////////////////////////// 443 // @ kcm_cxy : remote KCM cluster identifier 444 // @ kcm_ptr : local pointer on remote KCM. 445 // @ kcm_page : local pointer on kcm_page. 446 // @ block_ptr : pointer on block to be released. 447 ///////////////////////////////////////////////////////////////////////////////////// 448 static void __attribute__((noinline)) kcm_remote_put_block ( cxy_t kcm_cxy, 449 kcm_t * kcm_ptr, 450 kcm_page_t * kcm_page, 451 void * block_ptr ) 452 { 453 uint32_t max = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->max_blocks ) ); 454 uint32_t order = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->order ) ); 455 uint32_t count = hal_remote_l32( XPTR( kcm_cxy , &kcm_page->count ) ); 456 uint64_t status = hal_remote_l64( XPTR( kcm_cxy , &kcm_page->status ) ); 457 uint32_t size = 1 << order; 458 459 // compute block index from block pointer 460 uint32_t index = ((intptr_t)block_ptr - (intptr_t)kcm_page) / size; 461 462 // compute mask in bit vector 463 uint64_t mask = 1 << index; 464 465 assert( (status & mask) , "released page not allocated" ); 466 467 // update status & count in kcm_page 468 hal_remote_s64( XPTR( kcm_cxy , &kcm_page->status ) , status & ~mask ); 469 hal_remote_s32( XPTR( kcm_cxy , &kcm_page->count ) , count - 1 ); 470 471 // change the page list if page was full 472 if( count == max ) 473 { 474 list_remote_unlink( kcm_cxy , &kcm_page->list ); 475 hal_remote_atomic_add( XPTR( kcm_cxy , &kcm_ptr->full_pages_nr ) , -1 ); 476 477 list_remote_add_last( kcm_cxy , &kcm_ptr->active_root, &kcm_page->list ); 478 hal_remote_atomic_add( XPTR( kcm_cxy , &kcm_ptr->active_pages_nr ) , 1 ); 479 } 480 481 #if (DEBUG_KCM_REMOTE & 1) 482 thread_t * this = CURRENT_THREAD; 483 uint32_t cycle = (uint32_t)hal_get_cycles(); 484 if( DEBUG_KCM_REMOTE < cycle ) 485 printk("\n[%s] thread[%x,%x] released block %x in page %x / cluster %x / size %x / count %d\n", 486 __FUNCTION__, this->process->pid, this->trdid, block_ptr, kcm_page, size, count - 1 ) 487 #endif 488 489 } // end kcm_remote_put_block() 490 491 ///////////////////////////////////////////////////////////////////////////////////// 492 // This private static function can be called by any thread running in any cluster. 493 // It gets one non-full KCM page from the remote KCM. 494 // It allocates a page from remote PPM to populate the freelist, and initialises 495 // the kcm_page descriptor when required. 496 ///////////////////////////////////////////////////////////////////////////////////// 497 static kcm_page_t * __attribute__((noinline)) kcm_remote_get_page( cxy_t kcm_cxy, 498 kcm_t * kcm_ptr ) 499 { 500 kcm_page_t * kcm_page; // local pointer on remote KCM page 501 502 uint32_t active_pages_nr = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->active_pages_nr ) ); 503 504 if( active_pages_nr > 0 ) // return first active page 505 { 506 kcm_page = LIST_REMOTE_FIRST( kcm_cxy , &kcm_ptr->active_root , kcm_page_t , list ); 507 } 508 else // allocate a new page from PPM 509 { 510 // get one 4 Kbytes page from remote PPM 511 page_t * page = ppm_remote_alloc_pages( kcm_cxy , 0 ); 512 513 if( page == NULL ) 514 { 515 printk("\n[ERROR] in %s : failed to allocate page in cluster %x\n", 516 __FUNCTION__ , kcm_cxy ); 517 518 return NULL; 519 } 520 521 // get remote page base address 522 xptr_t base_xp = ppm_page2base( XPTR( kcm_cxy , page ) ); 523 524 // get local pointer on kcm_page 525 kcm_page = GET_PTR( base_xp ); 526 527 // initialize kcm_page descriptor 528 hal_remote_s32( XPTR( kcm_cxy , &kcm_page->count ) , 0 ); 529 hal_remote_s64( XPTR( kcm_cxy , &kcm_page->status ) , 0 ); 530 hal_remote_spt( XPTR( kcm_cxy , &kcm_page->kcm ) , kcm_ptr ); 531 hal_remote_spt( XPTR( kcm_cxy , &kcm_page->page ) , page ); 532 533 // introduce new page in remote KCM active_list 534 list_remote_add_first( kcm_cxy , &kcm_ptr->active_root , &kcm_page->list ); 535 hal_remote_atomic_add( XPTR( kcm_cxy , &kcm_ptr->active_pages_nr ) , 1 ); 536 } 537 538 return kcm_page; 539 540 } // end kcm_remote_get_page() 541 542 ///////////////////////////////////////// 543 void * kcm_remote_alloc( cxy_t kcm_cxy, 544 uint32_t order ) 545 { 546 kcm_t * kcm_ptr; 547 kcm_page_t * kcm_page; 548 void * block_ptr; 549 550 if( order < 6 ) order = 6; 551 552 assert( (order < 12) , "order = %d / must be less than 12" , order ); 553 554 // get local pointer on relevant KCM allocator 555 kcm_ptr = &LOCAL_CLUSTER->kcm[order - 6]; 556 557 // build extended pointer on remote KCM lock 558 xptr_t lock_xp = XPTR( kcm_cxy , &kcm_ptr->lock ); 559 560 // get lock 561 remote_busylock_acquire( lock_xp ); 562 563 // get a non-full kcm_page 564 kcm_page = kcm_remote_get_page( kcm_cxy , kcm_ptr ); 565 566 if( kcm_page == NULL ) 567 { 568 remote_busylock_release( lock_xp ); 569 return NULL; 331 570 } 332 571 333 572 // get a block from selected active page 334 // cannot fail, as an active page cannot be full... 335 ptr = kcm_get_block( kcm , kcm_page ); 573 block_ptr = kcm_remote_get_block( kcm_cxy , kcm_ptr , kcm_page ); 336 574 337 575 // release lock 338 busylock_release( &kcm->lock ); 339 340 return ptr; 576 remote_busylock_release( lock_xp ); 577 578 #if DEBUG_KCM_REMOTE 579 thread_t * this = CURRENT_THREAD; 580 uint32_t cycle = (uint32_t)hal_get_cycles(); 581 if( DEBUG_KCM_REMOTE < cycle ) 582 printk("\n[%s] thread[%x,%x] allocated block %x / order %d / kcm[%x,%x]\n", 583 __FUNCTION__, this->process->pid, this->trdid, block_ptr, order, kcm_cxy, kcm_ptr ); 584 #endif 585 586 return block_ptr; 587 588 } // end kcm_remote_alloc() 589 590 ///////////////////////////////////// 591 void kcm_remote_free( cxy_t kcm_cxy, 592 void * block_ptr ) 593 { 594 kcm_t * kcm_ptr; 595 kcm_page_t * kcm_page; 596 597 // check argument 598 assert( (block_ptr != NULL) , "block pointer cannot be NULL" ); 599 600 // get local pointer on remote KCM page 601 kcm_page = (kcm_page_t *)((intptr_t)block_ptr & ~CONFIG_PPM_PAGE_MASK); 602 603 // get local pointer on remote KCM 604 kcm_ptr = hal_remote_lpt( XPTR( kcm_cxy , &kcm_page->kcm ) ); 605 606 // build extended pointer on remote KCM lock 607 xptr_t lock_xp = XPTR( kcm_cxy , &kcm_ptr->lock ); 608 609 // get lock 610 remote_busylock_acquire( lock_xp ); 611 612 // release block 613 kcm_remote_put_block( kcm_cxy , kcm_ptr , kcm_page , block_ptr ); 614 615 // release lock 616 remote_busylock_release( lock_xp ); 617 618 #if DEBUG_KCM_REMOTE 619 thread_t * this = CURRENT_THREAD; 620 uint32_t cycle = (uint32_t)hal_get_cycles(); 621 uint32_t order = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->order ) ); 622 if( DEBUG_KCM_REMOTE < cycle ) 623 printk("\n[%s] thread[%x,%x] released block %x / order %d / kcm[%x,%x]\n", 624 __FUNCTION__, this->process->pid, this->trdid, block_ptr, order, kcm_cxy, kcm_ptr ); 625 #endif 626 627 } // end kcm_remote_free 628 629 ///////////////////////////////////////// 630 void kcm_remote_display( cxy_t kcm_cxy, 631 kcm_t * kcm_ptr ) 632 { 633 uint32_t order = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->order) ); 634 uint32_t full_pages_nr = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->full_pages_nr ) ); 635 uint32_t active_pages_nr = hal_remote_l32( XPTR( kcm_cxy , &kcm_ptr->active_pages_nr ) ); 636 637 printk("*** KCM / cxy %x / order %d / full_pages %d / empty_pages %d / active_pages %d\n", 638 kcm_cxy, order, full_pages_nr, active_pages_nr ); 341 639 } 342 343 ///////////////////////////344 void kcm_free( void * ptr )345 {346 kcm_page_t * kcm_page;347 kcm_t * kcm;348 349 // check argument350 assert( (ptr != NULL) , "pointer cannot be NULL" );351 352 kcm_page = (kcm_page_t *)((intptr_t)ptr & ~CONFIG_PPM_PAGE_MASK);353 kcm = kcm_page->kcm;354 355 // get lock356 busylock_acquire( &kcm->lock );357 358 // release block359 kcm_put_block( kcm , kcm_page , ptr );360 361 // release lock362 busylock_release( &kcm->lock );363 }364 365 ////////////////////////////366 void kcm_print (kcm_t * kcm)367 {368 printk("*** KCM type = %s / free_pages = %d / busy_pages = %d / active_pages = %d\n",369 kmem_type_str( kcm->type ) ,370 kcm->free_pages_nr ,371 kcm->busy_pages_nr ,372 kcm->active_pages_nr );373 } -
trunk/kernel/mm/kcm.h
r619 r635 1 1 /* 2 * kcm.h - Per-clusterKernel Cache Manager definition.2 * kcm.h - Kernel Cache Manager definition. 3 3 * 4 * Authors Ghassan Almaless (2008,2009,2010,2011,2012) 5 * Alain Greiner (2016,2017,2018) 4 * Authors Alain Greiner (2016,2017,2018,2019) 6 5 * 7 6 * Copyright (c) UPMC Sorbonne Universites … … 33 32 #include <kmem.h> 34 33 34 35 #define KCM_PAGE_FULL 0 36 #define KCM_PAGE_EMPTY 1 37 #define KCM_PAGE_ACTIVE 2 38 35 39 /**************************************************************************************** 36 40 * This structure defines a generic Kernel Cache Manager, that is a block allocator, 37 * for fixed size objects. It exists a specific KCM allocator for each object type. 38 * The actual allocated block size is the smallest multiple of the KCM slot, that 39 * contain one single object. The KCM slot is 64 bytes, as it must be large enough 40 * to store the kcm_page descriptor, defined below. 41 * The various KCM allocators themselves are not statically allocated in the cluster 42 * manager, but are dynamically allocated when required, using the embedded KCM 43 * allocator defined in the cluster manager, to allocate the other ones... 41 * for fixed size objects. It exists in each cluster a specific KCM allocator for 42 * the following block sizes: 64, 128, 256, 512, 1024, 2048 bytes. 43 * These six KCM allocators are initialized by the cluster_init() function. 44 * 45 * Each KCM cache is implemented as a set o 4 Kbytes pages. A kcm_page is split in slots, 46 * where each slot can contain one block. in each kcm_page, the first slot (that cannot 47 * be smaller than 64 bytes) contains the kcm page descriptor, defined below 48 * 49 * To allow any thread running in any cluster to directly access the KCM of any cluster, 50 * ALMOS-MKH defines two sets of access functions, for local or remote access. 44 51 ***************************************************************************************/ 45 52 46 53 typedef struct kcm_s 47 54 { 48 busylock_t lock; /*! protect KCM ammocator */ 49 uint32_t block_size; /*! rounded block size (bytes) */ 50 uint32_t blocks_nr; /*! max number of blocks per page */ 55 remote_busylock_t lock; /*! protect KCM allocator */ 51 56 57 list_entry_t full_root; /*! root of full pages list */ 52 58 list_entry_t active_root; /*! root of active pages list */ 53 list_entry_t busy_root; /*! root of busy pages list */54 list_entry_t free_root; /*! root of free pages list */55 59 56 uint32_t free_pages_nr; /*! number of free pages */ 57 uint32_t busy_pages_nr; /*! number of busy pages */ 60 uint32_t full_pages_nr; /*! number of busy pages */ 58 61 uint32_t active_pages_nr; /*! number of active pages */ 59 62 60 uint32_t type; /*! KCM type */ 63 uint32_t order; /*! ln( block_size ) */ 64 uint32_t max_blocks; /*! max number of blocks per page */ 61 65 } 62 66 kcm_t; … … 65 69 /**************************************************************************************** 66 70 * This structure defines a KCM-page descriptor. 67 * A KCM-page contains at most (CONFIG_PPM_PAGE_SIZE / CONFIG_KCM_SLOT_SIZE) blocks. 68 * This kcm page descriptor is stored in the first slot of the page. 71 * A KCM-page contains at most (CONFIG_PPM_PAGE_SIZE / CONFIG_KCM_SLOT_SIZE) slots, 72 * and each slot contains one block. The kcm page descriptor is stored in first slot. 73 * The current allocation status is defined by the 64 bits "status" bit vector: each 74 * non zero bit defines an allocated block / "counts is the number of allocated blocks. 75 * Each kcm_page is registered in one of the two following page_list: 76 * - full : when count == max 77 * - active : count < max 69 78 ***************************************************************************************/ 70 79 71 80 typedef struct kcm_page_s 72 81 { 73 uint32_t bitmap[2]; /*! at most 64 blocks in a single page */ 74 list_entry_t list; /*! [active / busy / free] list member */ 75 kcm_t * kcm; /*! pointer on kcm allocator */ 76 page_t * page; /*! pointer on the physical page descriptor */ 77 uint32_t count; /*! number of allocated blocks */ 78 uint32_t busy; /*! page busy if non zero */ 79 uint32_t active; /*! page active if non zero */ 82 uint64_t status; /*! bit vector: non-zero == allocated */ 83 uint32_t count; /*! number of allocated blocks in page */ 84 list_entry_t list; /*! [active / busy / free] list member */ 85 kcm_t * kcm; /*! pointer on kcm allocator */ 86 page_t * page; /*! pointer on the physical page descriptor */ 80 87 } 81 88 kcm_page_t; 82 89 83 90 /**************************************************************************************** 84 * This function initializes a generic Kernel Cache Manager. 91 * This function must be called by a local thread. 92 * It initializes a Kernel Cache Manager, depending on block size. 85 93 **************************************************************************************** 86 94 * @ kcm : pointer on KCM manager to initialize. 87 * @ type : KCM allocator type.95 * @ order : ln(block_size). 88 96 ***************************************************************************************/ 89 97 void kcm_init( kcm_t * kcm, 90 uint32_t type);98 uint32_t order ); 91 99 92 100 /**************************************************************************************** 93 * This function releases all memory allocated to a generic Kernel Cache Manager. 101 * This function must be called by a local thread. 102 * It releases all memory allocated to a Kernel Cache Manager. 94 103 **************************************************************************************** 95 104 * @ kcm : pointer on KCM manager to destroy. … … 98 107 99 108 /**************************************************************************************** 100 * This function allocates one single object from a Kernel Cache Manager101 * The object size must be smaller than one page size.109 * This function must be called by a local thread. 110 * It allocates one block from the local Kernel Cache Manager. 102 111 **************************************************************************************** 103 * @ kcm : pointer on the selected KCM allocator112 * @ order : ln( block-size ) == KCM allocator identifier. 104 113 * @ return pointer on allocated block if success / return NULL if failure 105 114 ***************************************************************************************/ 106 void * kcm_alloc( kcm_t * kcm);115 void * kcm_alloc( uint32_t order ); 107 116 108 117 /**************************************************************************************** 109 * This function releases a previouly allocated block containing one object. 118 * This function must be called by a local thread. 119 * It releases a previouly allocated block to the local Kernel Cache Manager. 110 120 **************************************************************************************** 111 * @ ptr : local pointer on the allocated buffer.121 * @ block_ptr : local pointer on the released block. 112 122 ***************************************************************************************/ 113 void kcm_free( void *ptr );123 void kcm_free( void * block_ptr ); 114 124 115 125 /**************************************************************************************** 116 * This function prints KCM allocator state (for debug only). 126 * This function can be called by any thread running in any cluster. 127 * It allocates one fixed size block from a remote Kernel Cache Manager. 117 128 **************************************************************************************** 118 * @ kcm : local pointer on the selected KCM allocator. 129 * @ kcm_cxy : remote KCM cluster identifier. 130 * @ order : ln( block-size ) == KCM allocator identifier. 131 * @ return a local pointer on allocated block if success / return NULL if failure 119 132 ***************************************************************************************/ 120 void kcm_print( kcm_t * kcm ); 133 void * kcm_remote_alloc( cxy_t kcm_cxy, 134 uint32_t order ); 135 136 /**************************************************************************************** 137 * This function can be called by any thread running in any cluster. 138 * It releases a previouly allocated block to a remote Kernel Cache Manager. 139 **************************************************************************************** 140 * @ kcm_cxy : remote KCM cluster identifier. 141 * @ block_ptr : local pointer on the released buffer in remote cluster. 142 ***************************************************************************************/ 143 void kcm_remote_free( cxy_t kcm_cxy, 144 void * block_ptr ); 145 146 /**************************************************************************************** 147 * This debug function can be called by any thread running in any cluster. 148 * It diplays on TXT0 the current state of a local KCM allocator. 149 **************************************************************************************** 150 * @ kcm_cxy : remote KCM cluster identifier. 151 * @ kcm_ptr : local pointer on remote KCM. 152 ***************************************************************************************/ 153 void kcm_remote_display( cxy_t kcm_cxy, 154 kcm_t * kcm_ptr ); 121 155 122 156 #endif /* _KCM_H_ */ -
trunk/kernel/mm/khm.h
r619 r635 32 32 /******************************************************************************************* 33 33 * This structure defines a Kernel Heap Manager (KHM) in a given cluster. 34 * It is used to allocate memory objects, that too large, or not enough replicated34 * It is used to allocate memory objects, that are too large, or not enough replicated 35 35 * to use a dedicated KCM allocator. 36 36 ******************************************************************************************/ … … 54 54 { 55 55 uint32_t busy; /*! free block if zero */ 56 uint32_t size; /*! block size 56 uint32_t size; /*! block size */ 57 57 } 58 58 khm_block_t; -
trunk/kernel/mm/kmem.c
r619 r635 2 2 * kmem.c - kernel memory allocator implementation. 3 3 * 4 * Authors Ghassan Almaless (2008,2009,2010,2011,2012) 5 * Alain Greiner (2016,2017,2018) 4 * Authors Alain Greiner (2016,2017,2018,2019) 6 5 * 7 6 * Copyright (c) UPMC Sorbonne Universites … … 27 26 #include <hal_special.h> 28 27 #include <printk.h> 29 #include < busylock.h>28 #include <cluster.h> 30 29 #include <memcpy.h> 31 30 #include <khm.h> 32 31 #include <ppm.h> 32 #include <kcm.h> 33 33 #include <page.h> 34 #include <cluster.h>35 #include <thread.h>36 #include <process.h>37 #include <chdev.h>38 #include <mapper.h>39 #include <vfs.h>40 #include <fatfs.h>41 #include <ramfs.h>42 #include <user_dir.h>43 #include <remote_sem.h>44 #include <remote_barrier.h>45 #include <remote_mutex.h>46 #include <remote_condvar.h>47 #include <mapper.h>48 #include <grdxt.h>49 #include <vseg.h>50 34 #include <kmem.h> 51 52 /////////////////////////////////53 void kmem_print_kcm_table( void )54 {55 uint32_t index;56 kcm_t * kcm;57 cluster_t * cluster = LOCAL_CLUSTER;58 59 printk("\n *** KCM Pointers Table ***\n");60 61 for( index = 0 ; index < KMEM_TYPES_NR ; index++ )62 {63 kcm = cluster->kcm_tbl[index];64 if( kcm != NULL )65 {66 if( index == kcm->type )67 {68 printk(" - KCM[%s] (at address %x) is OK\n",69 kmem_type_str( index ) , (intptr_t)kcm );70 }71 else72 {73 printk(" - KCM[%s] (at address %x) is KO : has type %s\n",74 kmem_type_str( index ) , (intptr_t)kcm , kmem_type_str( kcm->type ) );75 }76 }77 }78 }79 80 /////////////////////////////////////////81 uint32_t kmem_type_size( uint32_t type )82 {83 if ( type == KMEM_PAGE ) return CONFIG_PPM_PAGE_SIZE;84 else if( type == KMEM_GENERIC ) return 0;85 else if( type == KMEM_KCM ) return sizeof( kcm_t );86 else if( type == KMEM_VSEG ) return sizeof( vseg_t );87 else if( type == KMEM_DEVICE ) return sizeof( chdev_t );88 else if( type == KMEM_MAPPER ) return sizeof( mapper_t );89 else if( type == KMEM_PROCESS ) return sizeof( process_t );90 else if( type == KMEM_CPU_CTX ) return CONFIG_CPU_CTX_SIZE;91 else if( type == KMEM_FPU_CTX ) return CONFIG_FPU_CTX_SIZE;92 else if( type == KMEM_GEN_BARRIER ) return sizeof( generic_barrier_t );93 94 else if( type == KMEM_SMP_BARRIER ) return sizeof( simple_barrier_t );95 else if( type == KMEM_DEVFS_CTX ) return sizeof( fatfs_ctx_t );96 else if( type == KMEM_FATFS_CTX ) return sizeof( fatfs_ctx_t );97 else if( type == KMEM_VFS_CTX ) return sizeof( vfs_ctx_t );98 else if( type == KMEM_VFS_INODE ) return sizeof( vfs_inode_t );99 else if( type == KMEM_VFS_DENTRY ) return sizeof( vfs_dentry_t );100 else if( type == KMEM_VFS_FILE ) return sizeof( vfs_file_t );101 else if( type == KMEM_SEM ) return sizeof( remote_sem_t );102 else if( type == KMEM_CONDVAR ) return sizeof( remote_condvar_t );103 else if( type == KMEM_MUTEX ) return sizeof( remote_mutex_t );104 105 else if( type == KMEM_DIR ) return sizeof( user_dir_t );106 else if( type == KMEM_512_BYTES ) return 512;107 108 else return 0;109 }110 111 /////////////////////////////////////112 char * kmem_type_str( uint32_t type )113 {114 if ( type == KMEM_PAGE ) return "KMEM_PAGE";115 else if( type == KMEM_GENERIC ) return "KMEM_GENERIC";116 else if( type == KMEM_KCM ) return "KMEM_KCM";117 else if( type == KMEM_VSEG ) return "KMEM_VSEG";118 else if( type == KMEM_DEVICE ) return "KMEM_DEVICE";119 else if( type == KMEM_MAPPER ) return "KMEM_MAPPER";120 else if( type == KMEM_PROCESS ) return "KMEM_PROCESS";121 else if( type == KMEM_CPU_CTX ) return "KMEM_CPU_CTX";122 else if( type == KMEM_FPU_CTX ) return "KMEM_FPU_CTX";123 else if( type == KMEM_GEN_BARRIER ) return "KMEM_GEN_BARRIER";124 125 else if( type == KMEM_SMP_BARRIER ) return "KMEM_SMP_BARRIER";126 else if( type == KMEM_DEVFS_CTX ) return "KMEM_DEVFS_CTX";127 else if( type == KMEM_FATFS_CTX ) return "KMEM_FATFS_CTX";128 else if( type == KMEM_VFS_CTX ) return "KMEM_VFS_CTX";129 else if( type == KMEM_VFS_INODE ) return "KMEM_VFS_INODE";130 else if( type == KMEM_VFS_DENTRY ) return "KMEM_VFS_DENTRY";131 else if( type == KMEM_VFS_FILE ) return "KMEM_VFS_FILE";132 else if( type == KMEM_SEM ) return "KMEM_SEM";133 else if( type == KMEM_CONDVAR ) return "KMEM_CONDVAR";134 else if( type == KMEM_MUTEX ) return "KMEM_MUTEX";135 136 else if( type == KMEM_DIR ) return "KMEM_DIR";137 else if( type == KMEM_512_BYTES ) return "KMEM_512_BYTES";138 139 else return "undefined";140 }141 142 /////////////////////////////////////////////////////////////////////////////////////////////143 // This static function dynamically allocates and initializes a specific KCM allocator.144 // It uses the KCM allocator embedded in cluster manager, initialized by cluster_init().145 /////////////////////////////////////////////////////////////////////////////////////////////146 static error_t kmem_create_kcm( uint32_t type )147 {148 kcm_t * kcm;149 150 assert( ((type > 1) && (type < KMEM_TYPES_NR) ) , "illegal KCM type" );151 152 #if DEBUG_KMEM153 thread_t * this = CURRENT_THREAD;154 uint32_t cycle = (uint32_t)hal_get_cycles();155 if( DEBUG_KMEM < cycle )156 printk("\n[%s] thread[%x,%x] enter / KCM type %s missing in cluster %x / cycle %d\n",157 __FUNCTION__, this->process->pid, this->trdid, kmem_type_str( type ), local_cxy, cycle );158 #endif159 160 cluster_t * cluster = LOCAL_CLUSTER;161 162 // allocate memory for the requested KCM allocator163 // from the KCM allocator embedded in cluster descriptor164 kcm = kcm_alloc( &cluster->kcm );165 166 if( kcm == NULL )167 {168 printk("\n[ERROR] in %s : failed to create KCM type %d in cluster %x\n",169 __FUNCTION__ , type , local_cxy );170 return ENOMEM;171 }172 173 // initialize the new KCM allocator174 kcm_init( kcm , type );175 176 // register it in the KCM pointers Table177 cluster->kcm_tbl[type] = kcm;178 179 hal_fence();180 181 #if DEBUG_KMEM182 cycle = (uint32_t)hal_get_cycles();183 if( DEBUG_KMEM < cycle )184 printk("\n[%s] thread[%x,%x] exit / cycle %d\n",185 __FUNCTION__, this->process->pid, this->trdid, cycle );186 #endif187 188 return 0;189 }190 35 191 36 ///////////////////////////////////// 192 37 void * kmem_alloc( kmem_req_t * req ) 193 38 { 194 cluster_t * cluster = LOCAL_CLUSTER; 195 196 uint32_t type; 197 uint32_t flags; 198 uint32_t size; // ln( pages ) if PPM / bytes if KHM / unused if KCM 199 void * ptr; // memory buffer if KHM or KCM / page descriptor if PPM 39 uint32_t type; // KMEM_PPM / KMEM_KCM / KMEM_KHM 40 uint32_t flags; // AF_NONE / AF_ZERO / AF_KERNEL 41 uint32_t order; // PPM: ln(pages) / KCM: ln(bytes) / KHM: bytes 200 42 201 43 type = req->type; 202 size = req->size;44 order = req->order; 203 45 flags = req->flags; 204 46 205 assert( (type < KMEM_TYPES_NR) , "illegal KMEM request type" ); 47 ////////////////////////////////// PPM 48 if( type == KMEM_PPM ) 49 { 50 // allocate the number of requested pages 51 page_t * page_ptr = (void *)ppm_alloc_pages( order ); 52 53 if( page_ptr == NULL ) 54 { 55 printk("\n[ERROR] in %s : PPM failed / order %d / cluster %x\n", 56 __FUNCTION__ , order , local_cxy ); 57 return NULL; 58 } 59 60 xptr_t page_xp = XPTR( local_cxy , page_ptr ); 61 62 // reset page if requested 63 if( flags & AF_ZERO ) page_zero( page_ptr ); 64 65 // get pointer on buffer from the page descriptor 66 void * ptr = GET_PTR( ppm_page2base( page_xp ) ); 206 67 207 68 #if DEBUG_KMEM 208 thread_t * this = CURRENT_THREAD;209 uint32_t cycle = (uint32_t)hal_get_cycles();69 thread_t * this = CURRENT_THREAD; 70 uint32_t cycle = (uint32_t)hal_get_cycles(); 210 71 if( DEBUG_KMEM < cycle ) 211 printk("\n[%s] thread [%x,%x] enter / %s / size %d / cluster %x / cycle %d\n", 72 printk("\n[%s] thread[%x,%x] from PPM / %d page(s) / ppn %x / cxy %x / cycle %d\n", 73 __FUNCTION__, this->process->pid, this->trdid, 74 1<<order, ppm_page2ppn(XPTR(local_cxy,ptr)), local_cxy, cycle ); 75 #endif 76 return ptr; 77 } 78 ///////////////////////////////////// KCM 79 else if( type == KMEM_KCM ) 80 { 81 // allocate memory from KCM 82 void * ptr = kcm_alloc( order ); 83 84 if( ptr == NULL ) 85 { 86 printk("\n[ERROR] in %s : KCM failed / order %d / cluster %x\n", 87 __FUNCTION__ , order , local_cxy ); 88 return NULL; 89 } 90 91 // reset memory if requested 92 if( flags & AF_ZERO ) memset( ptr , 0 , 1<<order ); 93 94 #if DEBUG_KMEM 95 thread_t * this = CURRENT_THREAD; 96 uint32_t cycle = (uint32_t)hal_get_cycles(); 97 if( DEBUG_KMEM < cycle ) 98 printk("\n[%s] thread [%x,%x] from KCM / %d bytes / base %x / cxy %x / cycle %d\n", 99 __FUNCTION__, this->process->pid, this->trdid, 100 1<<order, ptr, local_cxy, cycle ); 101 #endif 102 return ptr; 103 } 104 //////////////////////////////////// KHM 105 else if( type == KMEM_KHM ) 106 { 107 // allocate memory from KHM 108 void * ptr = khm_alloc( &LOCAL_CLUSTER->khm , order ); 109 110 if( ptr == NULL ) 111 { 112 printk("\n[ERROR] in %s : KHM failed / order %d / cluster %x\n", 113 __FUNCTION__ , order , local_cxy ); 114 return NULL; 115 } 116 117 // reset memory if requested 118 if( flags & AF_ZERO ) memset( ptr , 0 , order ); 119 120 #if DEBUG_KMEM 121 thread_t * this = CURRENT_THREAD; 122 uint32_t cycle = (uint32_t)hal_get_cycles(); 123 if( DEBUG_KMEM < cycle ) 124 printk("\n[%s] thread[%x,%x] from KHM / %d bytes / base %x / cxy %x / cycle %d\n", 212 125 __FUNCTION__, this->process->pid, this->trdid, 213 kmem_type_str( type ), size, local_cxy, cycle ); 214 #endif 215 216 // analyse request type 217 if( type == KMEM_PAGE ) // PPM allocator 218 { 219 // allocate the number of requested pages 220 ptr = (void *)ppm_alloc_pages( size ); 221 if( ptr == NULL ) 222 { 223 printk("\n[ERROR] in %s : failed for type %d / size %d in cluster %x\n", 224 __FUNCTION__ , type , size , local_cxy ); 225 return NULL; 226 } 227 228 // reset page if requested 229 if( flags & AF_ZERO ) page_zero( (page_t *)ptr ); 230 231 #if DEBUG_KMEM 232 cycle = (uint32_t)hal_get_cycles(); 233 if( DEBUG_KMEM < cycle ) 234 printk("\n[%s] thread[%x,%x] exit / %d page(s) allocated / ppn %x / cycle %d\n", 235 __FUNCTION__, this->process->pid, this->trdid, 236 1<<size, ppm_page2ppn(XPTR(local_cxy,ptr)), cycle ); 237 #endif 238 239 } 240 else if( type == KMEM_GENERIC ) // KHM allocator 241 { 242 // allocate memory from KHM 243 ptr = khm_alloc( &cluster->khm , size ); 244 if( ptr == NULL ) 245 { 246 printk("\n[ERROR] in %s : failed for type %d / size %d in cluster %x\n", 247 __FUNCTION__ , type , size , local_cxy ); 248 return NULL; 249 } 250 251 // reset memory if requested 252 if( flags & AF_ZERO ) memset( ptr , 0 , size ); 253 254 #if DEBUG_KMEM 255 cycle = (uint32_t)hal_get_cycles(); 256 if( DEBUG_KMEM < cycle ) 257 printk("\n[%s] thread[%x,%x] exit / type %s allocated / base %x / size %d / cycle %d\n", 258 __FUNCTION__, this->process->pid, this->trdid, 259 kmem_type_str( type ), (intptr_t)ptr, size, cycle ); 260 #endif 261 262 } 263 else // KCM allocator 264 { 265 // initialize the KCM allocator if not already done 266 if( cluster->kcm_tbl[type] == NULL ) 267 { 268 // get lock protecting local kcm_tbl[] array 269 busylock_acquire( &cluster->kcm_lock ); 270 271 // create missing KCM 272 error_t error = kmem_create_kcm( type ); 273 274 // release lock protecting local kcm_tbl[] array 275 busylock_release( &cluster->kcm_lock ); 276 277 if ( error ) 278 { 279 printk("\n[ERROR] in %s : cannot create KCM type %d in cluster %x\n", 280 __FUNCTION__, type, local_cxy ); 281 return NULL; 282 } 283 } 284 285 // allocate memory from KCM 286 ptr = kcm_alloc( cluster->kcm_tbl[type] ); 287 if( ptr == NULL ) 288 { 289 printk("\n[ERROR] in %s : failed for type %d / size %d in cluster %x\n", 290 __FUNCTION__ , type , size , local_cxy ); 291 return NULL; 292 } 293 294 // reset memory if requested 295 if( flags & AF_ZERO ) memset( ptr , 0 , kmem_type_size( type ) ); 296 297 #if DEBUG_KMEM 298 cycle = (uint32_t)hal_get_cycles(); 299 if( DEBUG_KMEM < cycle ) 300 printk("\n[%s] thread [%x,%x] exit / type %s allocated / base %x / size %d / cycle %d\n", 301 __FUNCTION__, this->process->pid, this->trdid, kmem_type_str(type), (intptr_t)ptr, 302 kmem_type_size(type), cycle ); 303 #endif 304 305 } 306 307 return ptr; 308 } 126 order, ptr, local_cxy, cycle ); 127 #endif 128 return ptr; 129 } 130 else 131 { 132 printk("\n[ERROR] in %s : illegal allocator type\n", __FUNCTION__); 133 return NULL; 134 } 135 } // end kmem_alloc() 309 136 310 137 ////////////////////////////////// 311 138 void kmem_free( kmem_req_t * req ) 312 139 { 313 if( req->type >= KMEM_TYPES_NR ) 314 { 315 assert( false , "illegal request type\n" ); 316 } 317 318 switch(req->type) 319 { 320 case KMEM_PAGE: 321 ppm_free_pages( (page_t*)req->ptr ); 322 return; 323 324 case KMEM_GENERIC: 325 khm_free( req->ptr ); 326 return; 327 328 default: 329 kcm_free( req->ptr ); 330 return; 331 } 332 } 333 140 uint32_t type = req->type; 141 142 if( type == KMEM_PPM ) 143 { 144 page_t * page = GET_PTR( ppm_base2page( XPTR( local_cxy , req->ptr ) ) ); 145 146 ppm_free_pages( page ); 147 } 148 else if( type == KMEM_KCM ) 149 { 150 kcm_free( req->ptr ); 151 } 152 else if( type == KMEM_KHM ) 153 { 154 khm_free( req->ptr ); 155 } 156 else 157 { 158 printk("\n[ERROR] in %s : illegal allocator type\n", __FUNCTION__); 159 } 160 } // end kmem_free() 161 162 /////////////////////////////////////////// 163 void * kmem_remote_alloc( cxy_t cxy, 164 kmem_req_t * req ) 165 { 166 uint32_t type; // KMEM_PPM / KMEM_KCM / KMEM_KHM 167 uint32_t flags; // AF_ZERO / AF_KERNEL / AF_NONE 168 uint32_t order; // PPM: ln(pages) / KCM: ln(bytes) / KHM: bytes 169 170 type = req->type; 171 order = req->order; 172 flags = req->flags; 173 174 ///////////////////////////////// PPM 175 if( type == KMEM_PPM ) 176 { 177 // allocate the number of requested pages 178 page_t * page_ptr = ppm_remote_alloc_pages( cxy , order ); 179 180 if( page_ptr == NULL ) 181 { 182 printk("\n[ERROR] in %s : failed for PPM / order %d in cluster %x\n", 183 __FUNCTION__ , order , cxy ); 184 return NULL; 185 } 186 187 xptr_t page_xp = XPTR( cxy , page_ptr ); 188 189 // get pointer on buffer from the page descriptor 190 xptr_t base_xp = ppm_page2base( page_xp ); 191 192 // reset page if requested 193 if( flags & AF_ZERO ) hal_remote_memset( base_xp , 0 , CONFIG_PPM_PAGE_SIZE ); 194 195 void * ptr = GET_PTR( base_xp ); 196 197 #if DEBUG_KMEM_REMOTE 198 thread_t * this = CURRENT_THREAD; 199 uint32_t cycle = (uint32_t)hal_get_cycles(); 200 if( DEBUG_KMEM_REMOTE < cycle ) 201 printk("\n[%s] thread[%x,%x] from PPM / %d page(s) / ppn %x / cxy %x / cycle %d\n", 202 __FUNCTION__, this->process->pid, this->trdid, 203 1<<order, ppm_page2ppn(XPTR(local_cxy,ptr)), cxy, cycle ); 204 #endif 205 return ptr; 206 } 207 /////////////////////////////////// KCM 208 else if( type == KMEM_KCM ) 209 { 210 // allocate memory from KCM 211 void * ptr = kcm_remote_alloc( cxy , order ); 212 213 if( ptr == NULL ) 214 { 215 printk("\n[ERROR] in %s : failed for KCM / order %d in cluster %x\n", 216 __FUNCTION__ , order , cxy ); 217 return NULL; 218 } 219 220 // reset memory if requested 221 if( flags & AF_ZERO ) hal_remote_memset( XPTR( cxy , ptr ) , 0 , 1<<order ); 222 223 #if DEBUG_KMEM_REMOTE 224 thread_t * this = CURRENT_THREAD; 225 uint32_t cycle = (uint32_t)hal_get_cycles(); 226 if( DEBUG_KMEM_REMOTE < cycle ) 227 printk("\n[%s] thread [%x,%x] from KCM / %d bytes / base %x / cxy %x / cycle %d\n", 228 __FUNCTION__, this->process->pid, this->trdid, 229 1<<order, ptr, cxy, cycle ); 230 #endif 231 return ptr; 232 } 233 /////////////////////////////////// KHM 234 else if( type == KMEM_KHM ) 235 { 236 printk("\n[ERROR] in %s : remote access not supported for KHM\n", __FUNCTION__ ); 237 return NULL; 238 } 239 else 240 { 241 printk("\n[ERROR] in %s : illegal allocator type\n", __FUNCTION__); 242 return NULL; 243 } 244 } // kmem_remote_malloc() 245 246 //////////////////////////////////////// 247 void kmem_remote_free( cxy_t cxy, 248 kmem_req_t * req ) 249 { 250 uint32_t type = req->type; 251 252 if( type == KMEM_PPM ) 253 { 254 page_t * page = GET_PTR( ppm_base2page( XPTR( cxy , req->ptr ) ) ); 255 256 ppm_remote_free_pages( cxy , page ); 257 } 258 else if( type == KMEM_KCM ) 259 { 260 kcm_remote_free( cxy , req->ptr ); 261 } 262 else if( type == KMEM_KHM ) 263 { 264 printk("\n[ERROR] in %s : remote access not supported for KHM\n", __FUNCTION__ ); 265 } 266 else 267 { 268 printk("\n[ERROR] in %s : illegal allocator type\n", __FUNCTION__); 269 } 270 } // end kmem_remote_free() 271 272 273 -
trunk/kernel/mm/kmem.h
r619 r635 2 2 * kmem.h - kernel unified memory allocator interface 3 3 * 4 * Authors Ghassan Almaless (2008,2009,2010,2011,2012) 5 * Alain Greiner (2016,2017,2018) 4 * Authors Alain Greiner (2016,2017,2018,2019) 6 5 * 7 6 * Copyright (c) UPMC Sorbonne Universites … … 30 29 31 30 /************************************************************************************* 32 * This enum defines the Kernel Memory Types for dynamically allocated objects. 33 * WARNING : this enum must be kepts consistent with use in kmem.c file. 31 * This enum defines the three Kernel Memory Allocaror types: 34 32 ************************************************************************************/ 35 33 36 34 enum 37 35 { 38 KMEM_PAGE = 0, /*! reserved for PPM allocator */ 39 KMEM_GENERIC = 1, /*! reserved for KHM allocator */ 40 KMEM_KCM = 2, /*! kcm_t */ 41 KMEM_VSEG = 3, /*! vseg_t */ 42 KMEM_DEVICE = 4, /*! device_t */ 43 KMEM_MAPPER = 5, /*! mapper_t */ 44 KMEM_PROCESS = 6, /*! process_t */ 45 KMEM_CPU_CTX = 7, /*! hal_cpu_context_t */ 46 KMEM_FPU_CTX = 8, /*! hal_fpu_context_t */ 47 KMEM_GEN_BARRIER = 9, /*! generi_cbarrier_t */ 48 49 KMEM_SMP_BARRIER = 10, /*! simple_barrier_t */ 50 KMEM_DEVFS_CTX = 11, /*! fatfs_inode_t */ 51 KMEM_FATFS_CTX = 12, /*! fatfs_ctx_t */ 52 KMEM_VFS_CTX = 13, /*! vfs_context_t */ 53 KMEM_VFS_INODE = 14, /*! vfs_inode_t */ 54 KMEM_VFS_DENTRY = 15, /*! vfs_dentry_t */ 55 KMEM_VFS_FILE = 16, /*! vfs_file_t */ 56 KMEM_SEM = 17, /*! remote_sem_t */ 57 KMEM_CONDVAR = 18, /*! remote_condvar_t */ 58 KMEM_MUTEX = 19, /*! remote_mutex_t */ 59 60 KMEM_DIR = 20, /*! remote_dir_t */ 61 KMEM_512_BYTES = 21, /*! 512 bytes aligned */ 62 63 KMEM_TYPES_NR = 22, 36 KMEM_PPM = 0, /*! PPM allocator */ 37 KMEM_KCM = 1, /*! KCM allocator */ 38 KMEM_KHM = 2, /*! KHM allocator */ 64 39 }; 65 40 … … 79 54 typedef struct kmem_req_s 80 55 { 81 uint32_t type; /*! request type*/82 uint32_t size; /*! ln2(nb_pages) if PPM / bytes if KHM / unused by KCM*/56 uint32_t type; /*! KMEM_PPM / KMEM_KCM / KMEM_KHM */ 57 uint32_t order; /*! PPM: ln2(pages) / KCM: ln2(bytes) / KHM: bytes */ 83 58 uint32_t flags; /*! request attributes */ 84 59 void * ptr; /*! local pointer on allocated buffer (only used by free) */ … … 87 62 88 63 /************************************************************************************* 89 * Th is generic function allocates physical memory in the localcluster90 * as specified by the request descriptor.91 * It uses three specialised physical memory allocators, depending on request type:92 * - PPM (Physical Pages Manager) allocates N contiguous physical pages,93 * N must be a power of 2.94 * - K HM (Kernel Heap Manager) allocates a physical memory buffer,95 * that can have anysize.96 * - K CM (Kernel Cache Manager) allocates various fixed size objects,97 * handling a dedicated cache for each object type.64 * These two functions allocate physical memory in a local or remote cluster 65 * as specified by the kmem_req_t request descriptor, and return a local pointer 66 * on the allocated buffer. It uses three specialised physical memory allocators: 67 * - PPM (Physical Pages Manager) allocates N contiguous small physical pages. 68 * N is a power of 2, and req.order = ln(N). Implement the buddy algorithm. 69 * - KCM (Kernel Cache Manager) allocates aligned blocks of M bytes from a cache. 70 * M is a power of 2, and req.order = ln( M ). One cache per block size. 71 * - KHM (Kernel Heap Manager) allocates physical memory buffers of M bytes, 72 * M can have any value, and req.order = M. 98 73 ************************************************************************************* 99 * @ req : local pointer to allocation request. 100 * @ return a local pointer on page descriptor if KMEM_PAGE. 101 * return a local pointer to allocated buffer if KCM or KHM. 102 * return NULL if no physical memory available. 74 * @ cxy : target cluster identifier for a remote access. 75 * @ req : local pointer on allocation request. 76 * @ return local pointer on allocated buffer if success / return NULL if no memory. 103 77 ************************************************************************************/ 104 78 void * kmem_alloc( kmem_req_t * req ); 105 79 80 void * kmem_remote_alloc( cxy_t cxy, 81 kmem_req_t * req ); 82 106 83 /************************************************************************************* 107 * Th is function releasespreviously allocated physical memory, as specified108 * by the "type" and "ptr" fiels of the kmem-req_t request.84 * These two functions release previously allocated physical memory, as specified 85 * by the <type> and <ptr> fields of the kmem_req_t request descriptor. 109 86 ************************************************************************************* 87 * @ cxy : target cluster identifier for a remote access. 110 88 * @ req : local pointer to request descriptor. 111 89 ************************************************************************************/ 112 90 void kmem_free ( kmem_req_t * req ); 113 91 114 /************************************************************************************* 115 * This function returns a printable string for a kmem object type. 116 ************************************************************************************* 117 * @ type : kmem object type. 118 ************************************************************************************/ 119 char * kmem_type_str( uint32_t type ); 120 121 /************************************************************************************* 122 * This function returns the size (bytes) for a kmem object type. 123 ************************************************************************************* 124 * @ type : kmem object type. 125 ************************************************************************************/ 126 uint32_t kmem_type_size( uint32_t type ); 127 128 /************************************************************************************* 129 * This function displays the content of the KCM pointers Table 130 ************************************************************************************/ 131 void kmem_print_kcm_table( void ); 92 void kmem_remote_free( cxy_t cxy, 93 kmem_req_t * req ); 132 94 133 95 -
trunk/kernel/mm/mapper.c
r628 r635 52 52 error_t error; 53 53 54 // allocate memory for mapper 55 req.type = KMEM_ MAPPER;56 req. size = sizeof(mapper_t);54 // allocate memory for mapper descriptor 55 req.type = KMEM_KCM; 56 req.order = bits_log2( sizeof(mapper_t) ); 57 57 req.flags = AF_KERNEL | AF_ZERO; 58 mapper = (mapper_t *)kmem_alloc( &req );58 mapper = kmem_alloc( &req ); 59 59 60 60 if( mapper == NULL ) … … 73 73 CONFIG_MAPPER_GRDXT_W2, 74 74 CONFIG_MAPPER_GRDXT_W3 ); 75 76 75 if( error ) 77 76 { 78 77 printk("\n[ERROR] in %s : cannot initialize radix tree\n", __FUNCTION__ ); 79 req.type = KMEM_ MAPPER;78 req.type = KMEM_KCM; 80 79 req.ptr = mapper; 81 80 kmem_free( &req ); … … 117 116 { 118 117 // remove page from mapper and release to PPM 119 mapper_re lease_page( mapper, page );118 mapper_remote_release_page( XPTR( local_cxy , mapper ) , page ); 120 119 121 120 // update start_key value for next page … … 129 128 130 129 // release memory for mapper descriptor 131 req.type = KMEM_ MAPPER;130 req.type = KMEM_KCM; 132 131 req.ptr = mapper; 133 132 kmem_free( &req ); 134 133 135 134 } // end mapper_destroy() 135 136 //////////////////////////////////////////////////////// 137 error_t mapper_remote_handle_miss( xptr_t mapper_xp, 138 uint32_t page_id, 139 xptr_t * page_xp_ptr ) 140 { 141 error_t error; 142 143 thread_t * this = CURRENT_THREAD; 144 145 // get target mapper cluster and local pointer 146 cxy_t mapper_cxy = GET_CXY( mapper_xp ); 147 mapper_t * mapper_ptr = GET_PTR( mapper_xp ); 148 149 #if DEBUG_MAPPER_HANDLE_MISS 150 uint32_t cycle = (uint32_t)hal_get_cycles(); 151 char name[CONFIG_VFS_MAX_NAME_LENGTH]; 152 vfs_inode_t * inode = mapper->inode; 153 if( (DEBUG_MAPPER_HANDLE_MISS < cycle) && (inode != NULL) ) 154 { 155 vfs_inode_get_name( XPTR( local_cxy , inode ) , name ); 156 printk("\n[%s] thread[%x,%x] enter for page %d in <%s> / cluster %x / cycle %d", 157 __FUNCTION__, this->process->pid, this->trdid, page_id, name, mapper_cxy, cycle ); 158 if( DEBUG_MAPPER_HANDLE_MISS & 1 ) grdxt_display( XPTR(local_cxy,&mapper->rt), name ); 159 } 160 if( (DEBUG_MAPPER_HANDLE_MISS < cycle) && (inode == NULL) ) 161 { 162 printk("\n[%s] thread[%x,%x] enter for page %d in FAT / cluster %x / cycle %d", 163 __FUNCTION__, this->process->pid, this->trdid, page_id, mapper_cxy, cycle ); 164 if( DEBUG_MAPPER_HANDLE_MISS & 1 ) grdxt_display( XPTR(local_cxy,&mapper->rt), "FAT" ); 165 } 166 #endif 167 168 // allocate one 4 Kbytes page from the remote mapper cluster 169 page_t * page_ptr = ppm_remote_alloc_pages( mapper_cxy , 0 ); 170 171 if( page_ptr == NULL ) 172 { 173 printk("\n[ERROR] in %s : thread [%x,%x] cannot allocate page in cluster %x\n", 174 __FUNCTION__ , this->process->pid, this->trdid , mapper_cxy ); 175 return -1; 176 } 177 178 // build extended pointer on new page descriptor 179 xptr_t page_xp = XPTR( mapper_cxy , page_ptr ); 180 181 // initialize the page descriptor 182 page_remote_init( page_xp ); 183 184 hal_remote_s32( XPTR( mapper_cxy , &page_ptr->refcount ) , 1 ); 185 hal_remote_s32( XPTR( mapper_cxy , &page_ptr->index ) , page_id ); 186 hal_remote_spt( XPTR( mapper_cxy , &page_ptr->mapper ) , mapper_ptr ); 187 hal_remote_s32( XPTR( mapper_cxy , &page_ptr->flags ) , PG_INIT ); 188 189 // insert page in mapper radix tree 190 error = grdxt_remote_insert( XPTR( mapper_cxy , &mapper_ptr->rt), 191 page_id, 192 page_ptr ); 193 194 if( error ) 195 { 196 printk("\n[ERROR] in %s : thread[%x,%x] cannot insert page in mapper\n", 197 __FUNCTION__ , this->process->pid, this->trdid ); 198 ppm_remote_free_pages( mapper_cxy , page_ptr ); 199 return -1; 200 } 201 202 // launch I/O operation to load page from IOC device to mapper 203 error = vfs_fs_move_page( page_xp , IOC_SYNC_READ ); 204 205 if( error ) 206 { 207 printk("\n[ERROR] in %s : thread[%x,%x] cannot load page from device\n", 208 __FUNCTION__ , this->process->pid, this->trdid ); 209 mapper_remote_release_page( mapper_xp , page_ptr ); 210 return -1; 211 } 212 213 // return extended pointer on allocated page 214 *page_xp_ptr = page_xp; 215 216 #if DEBUG_MAPPER_HANDLE_MISS 217 cycle = (uint32_t)hal_get_cycles(); 218 if( (DEBUG_MAPPER_HANDLE_MISS < cycle) && (inode != NULL) ) 219 { 220 printk("\n[%s] thread[%x,%x] exit for page %d in <%s> / ppn %x / cycle %d", 221 __FUNCTION__, this->process->pid, this->trdid, 222 page_id, name, ppm_page2ppn( page_xp ), cycle ); 223 if( DEBUG_MAPPER_HANDLE_MISS & 1 ) grdxt_display( XPTR(local_cxy,&mapper->rt) , name ); 224 } 225 if( (DEBUG_MAPPER_HANDLE_MISS < cycle) && (inode == NULL) ) 226 { 227 printk("\n[%s] thread[%x,%x] exit for page %d in FAT / ppn %x / cycle %d", 228 __FUNCTION__, this->process->pid, this->trdid, 229 page_id, ppm_page2ppn( page_xp ), cycle ); 230 if( DEBUG_MAPPER_HANDLE_MISS & 1 ) grdxt_display( XPTR(local_cxy,&mapper->rt ), "FAT" ); 231 } 232 #endif 233 234 return 0; 235 236 } // end mapper_remote_handle_miss() 136 237 137 238 //////////////////////////////////////////////////// … … 183 284 184 285 // test mapper miss 185 if( page_xp == XPTR_NULL ) // miss => try tohandle it286 if( page_xp == XPTR_NULL ) // miss => handle it 186 287 { 187 288 // release the lock in READ_MODE and take it in WRITE_MODE … … 196 297 if ( page_xp == XPTR_NULL ) // miss confirmed => handle it 197 298 { 198 199 if( mapper_cxy == local_cxy ) // mapper is local 200 { 201 202 #if (DEBUG_MAPPER_GET_PAGE & 1) 203 if( DEBUG_MAPPER_GET_PAGE < cycle ) 204 printk("\n[%s] missing page => load it from FS / local access \n", __FUNCTION__ ); 205 #endif 206 error = mapper_handle_miss( mapper_ptr, 207 page_id, 208 &page_xp ); 209 } 210 else 211 { 212 213 #if (DEBUG_MAPPER_GET_PAGE & 1) 214 if( DEBUG_MAPPER_GET_PAGE < cycle ) 215 printk("\n[%s] missing page => load it from FS / RPC access \n", __FUNCTION__ ); 216 #endif 217 rpc_mapper_handle_miss_client( mapper_cxy, 218 mapper_ptr, 219 page_id, 220 &page_xp, 221 &error ); 222 } 223 224 if ( error ) 299 error = mapper_remote_handle_miss( mapper_xp, 300 page_id, 301 &page_xp ); 302 if( error ) 225 303 { 226 304 printk("\n[ERROR] in %s : thread[%x,%x] cannot handle mapper miss\n", … … 230 308 } 231 309 } 310 311 #if (DEBUG_MAPPER_GET_PAGE & 1) 312 if( DEBUG_MAPPER_GET_PAGE < cycle ) 313 printk("\n[%s] thread[%x,%x] load missing page from FS : ppn %x\n", 314 __FUNCTION__, this->process->pid, this->trdid, ppm_page2ppn(page_xp) ); 315 #endif 232 316 233 317 // release mapper lock from WRITE_MODE … … 260 344 } // end mapper_remote_get_page() 261 345 262 ////////////////////////////////////////////// 263 error_t mapper_handle_miss( mapper_t * mapper, 264 uint32_t page_id, 265 xptr_t * page_xp ) 266 { 267 kmem_req_t req; 268 page_t * page; 269 error_t error; 270 271 thread_t * this = CURRENT_THREAD; 272 273 #if DEBUG_MAPPER_HANDLE_MISS 274 uint32_t cycle = (uint32_t)hal_get_cycles(); 275 char name[CONFIG_VFS_MAX_NAME_LENGTH]; 276 vfs_inode_t * inode = mapper->inode; 277 if( (DEBUG_MAPPER_HANDLE_MISS < cycle) && (inode != NULL) ) 278 { 279 vfs_inode_get_name( XPTR( local_cxy , inode ) , name ); 280 printk("\n[%s] thread[%x,%x] enter for page %d in <%s> / cycle %d", 281 __FUNCTION__, this->process->pid, this->trdid, page_id, name, cycle ); 282 if( DEBUG_MAPPER_HANDLE_MISS & 1 ) grdxt_display( XPTR(local_cxy,&mapper->rt), name ); 283 } 284 if( (DEBUG_MAPPER_HANDLE_MISS < cycle) && (inode == NULL) ) 285 { 286 printk("\n[%s] thread[%x,%x] enter for page %d in FAT / cycle %d", 287 __FUNCTION__, this->process->pid, this->trdid, page_id, cycle ); 288 if( DEBUG_MAPPER_HANDLE_MISS & 1 ) grdxt_display( XPTR(local_cxy,&mapper->rt), "FAT" ); 289 } 290 #endif 291 292 // allocate one page from the local cluster 293 req.type = KMEM_PAGE; 294 req.size = 0; 295 req.flags = AF_NONE; 296 page = kmem_alloc( &req ); 297 298 if( page == NULL ) 299 { 300 printk("\n[ERROR] in %s : thread [%x,%x] cannot allocate page in cluster %x\n", 301 __FUNCTION__ , this->process->pid, this->trdid , local_cxy ); 302 return -1; 303 } 304 305 // initialize the page descriptor 306 page_init( page ); 307 page_set_flag( page , PG_INIT ); 308 page_refcount_up( page ); 309 page->mapper = mapper; 310 page->index = page_id; 311 312 // insert page in mapper radix tree 313 error = grdxt_insert( &mapper->rt , page_id , page ); 314 315 if( error ) 316 { 317 printk("\n[ERROR] in %s : thread[%x,%x] cannot insert page in mapper\n", 318 __FUNCTION__ , this->process->pid, this->trdid ); 319 mapper_release_page( mapper , page ); 320 req.ptr = page; 321 req.type = KMEM_PAGE; 322 kmem_free(&req); 323 return -1; 324 } 325 326 // launch I/O operation to load page from IOC device to mapper 327 error = vfs_fs_move_page( XPTR( local_cxy , page ) , IOC_SYNC_READ ); 328 329 if( error ) 330 { 331 printk("\n[ERROR] in %s : thread[%x,%x] cannot load page from device\n", 332 __FUNCTION__ , this->process->pid, this->trdid ); 333 mapper_release_page( mapper , page ); 334 req.ptr = page; 335 req.type = KMEM_PAGE; 336 kmem_free( &req ); 337 return -1; 338 } 339 340 // set extended pointer on allocated page 341 *page_xp = XPTR( local_cxy , page ); 342 343 #if DEBUG_MAPPER_HANDLE_MISS 344 cycle = (uint32_t)hal_get_cycles(); 345 if( (DEBUG_MAPPER_HANDLE_MISS < cycle) && (inode != NULL) ) 346 { 347 printk("\n[%s] thread[%x,%x] exit for page %d in <%s> / ppn %x / cycle %d", 348 __FUNCTION__, this->process->pid, this->trdid, 349 page_id, name, ppm_page2ppn( *page_xp ), cycle ); 350 if( DEBUG_MAPPER_HANDLE_MISS & 1 ) grdxt_display( XPTR(local_cxy,&mapper->rt) , name ); 351 } 352 if( (DEBUG_MAPPER_HANDLE_MISS < cycle) && (inode == NULL) ) 353 { 354 printk("\n[%s] thread[%x,%x] exit for page %d in FAT / ppn %x / cycle %d", 355 __FUNCTION__, this->process->pid, this->trdid, 356 page_id, ppm_page2ppn( *page_xp ), cycle ); 357 if( DEBUG_MAPPER_HANDLE_MISS & 1 ) grdxt_display( XPTR(local_cxy,&mapper->rt ), "FAT" ); 358 } 359 #endif 360 361 return 0; 362 363 } // end mapper_handle_miss() 364 365 //////////////////////////////////////////// 366 void mapper_release_page( mapper_t * mapper, 367 page_t * page ) 368 { 346 //////////////////////////////////////////////////// 347 void mapper_remote_release_page( xptr_t mapper_xp, 348 page_t * page ) 349 { 350 // get mapper cluster an local pointer 351 cxy_t mapper_cxy = GET_CXY( mapper_xp ); 352 mapper_t * mapper_ptr = GET_PTR( mapper_xp ); 353 369 354 // build extended pointer on mapper lock 370 xptr_t mapper_lock_xp = XPTR( local_cxy , &mapper->lock );355 xptr_t lock_xp = XPTR( mapper_cxy , &mapper_ptr->lock ); 371 356 372 357 // take mapper lock in WRITE_MODE 373 remote_rwlock_wr_acquire( mapper_lock_xp );358 remote_rwlock_wr_acquire( lock_xp ); 374 359 375 360 // remove physical page from radix tree 376 grdxt_remo ve( &mapper->rt, page->index );361 grdxt_remote_remove( XPTR( mapper_cxy , &mapper_ptr->rt ) , page->index ); 377 362 378 363 // release mapper lock from WRITE_MODE 379 remote_rwlock_wr_release( mapper_lock_xp );364 remote_rwlock_wr_release( lock_xp ); 380 365 381 366 // release page to PPM 382 kmem_req_t req; 383 req.type = KMEM_PAGE; 384 req.ptr = page; 385 kmem_free( &req ); 386 367 ppm_remote_free_pages( mapper_cxy , page ); 368 387 369 } // end mapper_release_page() 388 370 -
trunk/kernel/mm/mapper.h
r628 r635 64 64 * TODO (1) the mapper being only used to implement the VFS cache(s), the mapper.c 65 65 * and mapper.h file should be trandfered to the fs directory. 66 * TODO (2) the "type" field i s probably unused...66 * TODO (2) the "type" field in mapper descriptor is redundant and probably unused. 67 67 ******************************************************************************************/ 68 68 … … 106 106 107 107 /******************************************************************************************* 108 * This function load from device a missing page identified by the <page_id> argument109 * into the mapper identified by the <mapper> local pointer.110 * It allocates a physical page from the local cluster, initialise by accessing device,111 * and register the page in the mapper radix tree.112 * It must be executed by a thread running in the cluster containing the mapper.108 * This function load from the IOC device a missing page identified by the <page_id> 109 * argument into a - possibly remote - mapper identified by the <mapper_xp> argument. 110 * It can be executed by a thread running in any cluster. 111 * It allocates a physical page from the remote cluster PPM, initialises it by accessing 112 * the IOC device, and registers the page in the remote mapper radix tree. 113 113 * WARNING : the calling function mapper_remote_get_page() is supposed to take and release 114 114 * the lock protecting the mapper in WRITE_MODE. 115 115 ******************************************************************************************* 116 * @ mapper : [in] target mapper. 117 * @ page_id : [in] missing page index in file. 118 * @ page_xp : [out] buffer for extended pointer on missing page descriptor. 119 * @ return 0 if success / return -1 if a dirty page cannot be updated on device. 120 ******************************************************************************************/ 121 error_t mapper_handle_miss( mapper_t * mapper, 122 uint32_t page_id, 123 xptr_t * page_xp ); 116 * @ mapper_xp : [in] extended pointer on remote mapper. 117 * @ page_id : [in] missing page index in file. 118 * @ page_xp : [out] buffer for extended pointer on missing page descriptor. 119 * @ return 0 if success / return -1 if IOC cannot be accessed. 120 ******************************************************************************************/ 121 error_t mapper_remote_handle_miss( xptr_t mapper_xp, 122 uint32_t page_id, 123 xptr_t * page_xp ); 124 125 /******************************************************************************************* 126 * This function removes a physical page from a - possibly remote - mapper, 127 * and releases the page to the remote PPM. 128 * It can be executed by any thread running in any cluster. 129 * It takes the mapper lock in WRITE_MODE to update the mapper. 130 ******************************************************************************************* 131 * @ mapper : extended pointer on the remote mapper. 132 * @ page : local pointer on the page in remote mapper. 133 ******************************************************************************************/ 134 void mapper_remote_release_page( xptr_t mapper_xp, 135 struct page_s * page ); 124 136 125 137 /******************************************************************************************* … … 170 182 171 183 /******************************************************************************************* 172 * This function removes a physical page from the mapper, and releases173 * the page to the local PPM. It is called by the mapper_destroy() function.174 * It must be executed by a thread running in the cluster containing the mapper.175 * It takes the mapper lock in WRITE_MODE to update the mapper.176 *******************************************************************************************177 * @ mapper : local pointer on the mapper.178 * @ page : pointer on page to remove.179 ******************************************************************************************/180 void mapper_release_page( mapper_t * mapper,181 struct page_s * page );182 183 /*******************************************************************************************184 184 * This function returns an extended pointer on a page descriptor. 185 185 * The - possibly remote - mapper is identified by the <mapper_xp> argument. … … 237 237 /******************************************************************************************* 238 238 * This function scan all pages present in the mapper identified by the <mapper> argument, 239 * and synchronize all pages ma ked asdirty" on disk.239 * and synchronize all pages marked as "dirty" on disk. 240 240 * These pages are unmarked and removed from the local PPM dirty_list. 241 241 * This function must be called by a local thread running in same cluster as the mapper. -
trunk/kernel/mm/page.c
r634 r635 46 46 47 47 remote_busylock_init( XPTR( local_cxy , &page->lock ), LOCK_PAGE_STATE ); 48 49 list_entry_init( &page->list );50 48 } 51 49 … … 93 91 } 94 92 93 94 95 96 /////////////////////////////////////////////// 97 inline void page_remote_init( xptr_t page_xp ) 98 { 99 hal_remote_memset( page_xp , 0 , sizeof(page_t) ); 100 101 cxy_t page_cxy = GET_CXY( page_xp ); 102 page_t * page_ptr = GET_PTR( page_xp ); 103 104 remote_busylock_init( XPTR( page_cxy , &page_ptr->lock ), LOCK_PAGE_STATE ); 105 } 95 106 96 107 //////////////////////////////////////////////////// -
trunk/kernel/mm/page.h
r632 r635 48 48 * This structure defines a physical page descriptor. 49 49 * - The remote_busylock is used to allows any remote thread to atomically 50 * test/modify the forks counter or the pageflags.50 * test/modify the forks counter or the flags. 51 51 * - The list entry is used to register the page in a free list or in dirty list. 52 52 * The refcount is used for page release to KMEM. … … 133 133 134 134 135 136 /************************************************************************************* 137 * This function must be called by a thread running in the local cluster. 138 * It initializes the page descriptor. 139 ************************************************************************************* 140 * @ page_xp : extended pointer to page descriptor. 141 ************************************************************************************/ 142 inline void page_remote_init( xptr_t page_xp ); 143 135 144 /************************************************************************************* 136 145 * This function can be called by any thread running in any cluster. -
trunk/kernel/mm/ppm.c
r634 r635 212 212 page_t * found_block; 213 213 214 thread_t * this = CURRENT_THREAD; 215 214 216 #if DEBUG_PPM_ALLOC_PAGES 215 thread_t * this = CURRENT_THREAD;216 217 uint32_t cycle = (uint32_t)hal_get_cycles(); 217 218 if( DEBUG_PPM_ALLOC_PAGES < cycle ) … … 237 238 238 239 current_block = NULL; 239 240 // find a free block equal or larger to requested size 241 for( current_order = order ; current_order < CONFIG_PPM_MAX_ORDER ; current_order ++ ) 242 { 243 if( !list_is_empty( &ppm->free_pages_root[current_order] ) ) 240 current_order = order; 241 242 // search a free block equal or larger than requested size 243 while( current_order < CONFIG_PPM_MAX_ORDER ) 244 { 245 // get local pointer on the root of relevant free_list (same in all clusters) 246 list_entry_t * root = &ppm->free_pages_root[current_order]; 247 248 if( !list_is_empty( root ) ) 244 249 { 245 250 // get first free block in this free_list 246 current_block = LIST_FIRST( &ppm->free_pages_root[current_order], page_t , list );251 current_block = LIST_FIRST( root , page_t , list ); 247 252 248 253 // remove this block from this free_list 249 254 list_unlink( ¤t_block->list ); 255 ppm->free_pages_nr[current_order] --; 250 256 251 257 // register pointer on found block 252 258 found_block = current_block; 253 259 254 // update this free-list number of blocks255 ppm->free_pages_nr[current_order] --;256 257 260 // compute found block size 258 261 current_size = (1 << current_order); … … 260 263 break; 261 264 } 265 266 // increment loop index 267 current_order++; 262 268 } 263 269 … … 267 273 remote_busylock_release( lock_xp ); 268 274 269 #if DEBUG_PPM_ALLOC_PAGES 270 cycle = (uint32_t)hal_get_cycles(); 271 if( DEBUG_PPM_ALLOC_PAGES < cycle ) 272 printk("\n[%s] thread[%x,%x] cannot allocate %d page(s) in cluster %x / cycle %d\n", 273 __FUNCTION__, this->process->pid, this->trdid, 1<<order, local_cxy, cycle ); 274 #endif 275 printk("\n[%s] thread[%x,%x] cannot allocate %d page(s) in cluster %x\n", 276 __FUNCTION__, this->process->pid, this->trdid, 1<<order, local_cxy ); 275 277 276 278 return NULL; … … 282 284 while( current_order > order ) 283 285 { 286 // update size and order 284 287 current_order --; 285 286 // update pointer, size, and order fiels for new free block287 288 current_size >>= 1; 289 290 // update order fiels in new free block 288 291 current_block = found_block + current_size; 289 292 current_block->order = current_order; … … 291 294 // insert new free block in relevant free_list 292 295 list_add_first( &ppm->free_pages_root[current_order] , ¤t_block->list ); 293 294 // update number of blocks in free list295 296 ppm->free_pages_nr[current_order] ++; 296 297 } … … 312 313 printk("\n[%s] thread[%x,%x] exit for %d page(s) in cluster %x / ppn = %x / cycle %d\n", 313 314 __FUNCTION__, this->process->pid, this->trdid, 314 1<<order, ppm_page2ppn(XPTR( local_cxy , found_block )), local_cxy, cycle );315 1<<order, local_cxy, ppm_page2ppn(XPTR( local_cxy , found_block )), cycle ); 315 316 #endif 316 317 … … 374 375 375 376 ///////////////////////////////////////////// 376 xptr_tppm_remote_alloc_pages( cxy_t cxy,377 void * ppm_remote_alloc_pages( cxy_t cxy, 377 378 uint32_t order ) 378 379 { … … 382 383 page_t * found_block; 383 384 385 thread_t * this = CURRENT_THREAD; 386 384 387 #if DEBUG_PPM_REMOTE_ALLOC_PAGES 385 thread_t * this = CURRENT_THREAD;386 388 uint32_t cycle = (uint32_t)hal_get_cycles(); 387 389 if( DEBUG_PPM_REMOTE_ALLOC_PAGES < cycle ) … … 408 410 409 411 current_block = NULL; 410 411 // find in remote cluster a free block equal or larger to requested size 412 for( current_order = order ; current_order < CONFIG_PPM_MAX_ORDER ; current_order ++ ) 413 { 414 // get local pointer on the root of relevant free_list in remote cluster 412 current_order = order; 413 414 // search a free block equal or larger than requested size 415 while( current_order < CONFIG_PPM_MAX_ORDER ) 416 { 417 // get local pointer on the root of relevant free_list (same in all clusters) 415 418 list_entry_t * root = &ppm->free_pages_root[current_order]; 416 419 417 if( !list_remote_is_empty( cxy , root ) ) 420 if( !list_remote_is_empty( cxy , root ) ) // list non empty => success 418 421 { 419 422 // get local pointer on first free page descriptor in remote cluster … … 422 425 // remove first free page from the free-list in remote cluster 423 426 list_remote_unlink( cxy , ¤t_block->list ); 427 hal_remote_atomic_add( XPTR( cxy , &ppm->free_pages_nr[current_order] ), -1 ); 424 428 425 429 // register found block 426 430 found_block = current_block; 427 431 428 // decrement relevant free-list number of items in remote cluster429 hal_remote_atomic_add( XPTR( cxy , &ppm->free_pages_nr[current_order] ), -1 );430 431 432 // compute found block size 432 433 current_size = (1 << current_order); … … 434 435 break; 435 436 } 437 438 // increment loop index 439 current_order++; 436 440 } 437 441 … … 441 445 remote_busylock_release( lock_xp ); 442 446 443 #if DEBUG_REMOTE_PPM_ALLOC_PAGES 444 cycle = (uint32_t)hal_get_cycles(); 445 if( DEBUG_PPM_REMOTE_ALLOC_PAGES < cycle ) 446 printk("\n[%s] thread[%x,%x] cannot allocate %d page(s) in cluster %x / cycle %d\n", 447 __FUNCTION__, this->process->pid, this->trdid, 1<<order, cxy, cycle ); 448 #endif 447 printk("\n[ERROR] in %s : thread[%x,%x] cannot allocate %d page(s) in cluster %x\n", 448 __FUNCTION__, this->process->pid, this->trdid, 1<<order, cxy ); 449 449 450 450 return XPTR_NULL; … … 455 455 while( current_order > order ) 456 456 { 457 // update order , size, and local pointer for new free block457 // update order and size 458 458 current_order --; 459 459 current_size >>= 1; 460 461 // update new free block order field in remote cluster 460 462 current_block = found_block + current_size; 461 462 // update new free block order field in remote cluster463 463 hal_remote_s32( XPTR( cxy , ¤t_block->order ) , current_order ); 464 464 … … 497 497 #endif 498 498 499 return XPTR( cxy , found_block );499 return found_block; 500 500 501 501 } // end ppm_remote_alloc_pages() -
trunk/kernel/mm/ppm.h
r632 r635 123 123 * @ cxy : remote cluster identifier. 124 124 * @ order : ln2( number of 4 Kbytes pages) 125 * @ returns a n extended pointer on the page descriptor if success / XPTR_NULL if error.126 ****************************************************************************************/ 127 xptr_tppm_remote_alloc_pages( cxy_t cxy,125 * @ returns a local pointer on remote page descriptor if success / XPTR_NULL if error. 126 ****************************************************************************************/ 127 void * ppm_remote_alloc_pages( cxy_t cxy, 128 128 uint32_t order ); 129 129 -
trunk/kernel/mm/vmm.c
r634 r635 49 49 #include <hal_exception.h> 50 50 51 ////////////////////////////////////////////////////////////////////////////////// 51 //////////////////////////////////////////////////////////////////////////////////////////// 52 52 // Extern global variables 53 ////////////////////////////////////////////////////////////////////////////////// 53 //////////////////////////////////////////////////////////////////////////////////////////// 54 54 55 55 extern process_t process_zero; // allocated in cluster.c … … 286 286 } // end vmm_detach_from_vsl() 287 287 288 289 290 291 288 //////////////////////////////////////////// 292 289 error_t vmm_user_init( process_t * process ) 293 290 { 294 vseg_t * vseg_args;295 vseg_t * vseg_envs;296 intptr_t base;297 intptr_t size;298 291 uint32_t i; 299 292 … … 319 312 "STACK zone too small\n"); 320 313 314 // initialize the lock protecting the VSL 315 remote_rwlock_init( XPTR( local_cxy , &vmm->vsl_lock ) , LOCK_VMM_VSL ); 316 317 318 /* 321 319 // register "args" vseg in VSL 322 320 base = CONFIG_VMM_UTILS_BASE << CONFIG_PPM_PAGE_SHIFT; … … 358 356 359 357 vmm->envs_vpn_base = base; 360 358 */ 361 359 // initialize STACK allocator 362 360 vmm->stack_mgr.bitmap = 0; … … 375 373 376 374 // initialize instrumentation counters 377 vmm->pgfault_nr = 0; 375 vmm->false_pgfault_nr = 0; 376 vmm->local_pgfault_nr = 0; 377 vmm->global_pgfault_nr = 0; 378 vmm->false_pgfault_cost = 0; 379 vmm->local_pgfault_cost = 0; 380 vmm->global_pgfault_cost = 0; 378 381 379 382 hal_fence(); … … 398 401 399 402 #if DEBUG_VMM_USER_RESET 400 uint32_t cycle = (uint32_t)hal_get_cycles();403 uint32_t cycle; 401 404 thread_t * this = CURRENT_THREAD; 405 #endif 406 407 #if (DEBUG_VMM_USER_RESET & 1 ) 408 cycle = (uint32_t)hal_get_cycles(); 402 409 if( DEBUG_VMM_USER_RESET < cycle ) 403 410 printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n", … … 407 414 #if (DEBUG_VMM_USER_RESET & 1 ) 408 415 if( DEBUG_VMM_USER_RESET < cycle ) 409 hal_vmm_display( process, true );416 hal_vmm_display( XPTR( local_cxy , process ) , true ); 410 417 #endif 411 418 … … 478 485 #endif 479 486 487 #if (DEBUG_VMM_USER_RESET & 1 ) 488 if( DEBUG_VMM_USER_RESET < cycle ) 489 hal_vmm_display( XPTR( local_cxy , process ) , true ); 490 #endif 491 480 492 } // end vmm_user_reset() 481 493 … … 503 515 thread_t * this = CURRENT_THREAD; 504 516 if( DEBUG_VMM_UPDATE_PTE < cycle ) 505 printk("\n[%s] thread[%x,%x] enter for process %x / vpn %x / cycle %d\n", 506 __FUNCTION__, this->process->pid, this->trdid, process->pid , vpn , cycle ); 507 #endif 508 509 // check cluster is reference 510 assert( (GET_CXY( process->ref_xp ) == local_cxy) , "not called in reference cluster\n"); 517 printk("\n[%s] thread[%x,%x] enter for process %x / vpn %x / attr %x / ppn %x / ycle %d\n", 518 __FUNCTION__, this->process->pid, this->trdid, process->pid, vpn, attr, ppn, cycle ); 519 #endif 511 520 512 521 // get extended pointer on root of process copies xlist in owner cluster … … 517 526 process_root_xp = XPTR( owner_cxy , process_root_ptr ); 518 527 528 // check local cluster is owner cluster 529 assert( (owner_cxy == local_cxy) , "must be called in owner cluster\n"); 530 519 531 // loop on destination process copies 520 532 XLIST_FOREACH( process_root_xp , process_iter_xp ) … … 525 537 remote_process_cxy = GET_CXY( remote_process_xp ); 526 538 527 #if (DEBUG_VMM_UPDATE_PTE & 0x1)539 #if (DEBUG_VMM_UPDATE_PTE & 1) 528 540 if( DEBUG_VMM_UPDATE_PTE < cycle ) 529 printk("\n[%s] thread r[%x,%x] handling vpn %x for process %x in cluster %x\n",541 printk("\n[%s] thread[%x,%x] handling vpn %x for process %x in cluster %x\n", 530 542 __FUNCTION__, this->process->pid, this->trdid, vpn, process->pid, remote_process_cxy ); 531 543 #endif … … 545 557 #endif 546 558 559 #if (DEBUG_VMM_UPDATE_PTE & 1) 560 hal_vmm_display( process , true ); 561 #endif 562 547 563 } // end vmm_global_update_pte() 548 564 … … 570 586 cxy_t owner_cxy; 571 587 lpid_t owner_lpid; 588 589 // get target process PID 590 pid = process->pid; 572 591 573 592 #if DEBUG_VMM_SET_COW … … 576 595 if( DEBUG_VMM_SET_COW < cycle ) 577 596 printk("\n[%s] thread[%x,%x] enter for process %x / cycle %d\n", 578 __FUNCTION__, this->process->pid, this->trdid, process->pid , cycle ); 597 __FUNCTION__, this->process->pid, this->trdid, pid , cycle ); 598 #endif 599 600 #if (DEBUG_VMM_SET_COW & 1) 601 if( DEBUG_VMM_SET_COW < cycle ) 602 hal_vmm_display( process , true ); 579 603 #endif 580 604 581 605 // check cluster is reference 582 assert( ( GET_CXY( process->ref_xp ) == local_cxy),583 "local cluster is notprocess reference cluster\n");606 assert( (XPTR( local_cxy , process ) == process->ref_xp), 607 "local cluster must be process reference cluster\n"); 584 608 585 609 // get pointer on reference VMM … … 587 611 588 612 // get extended pointer on root of process copies xlist in owner cluster 589 pid = process->pid;590 613 owner_cxy = CXY_FROM_PID( pid ); 591 614 owner_lpid = LPID_FROM_PID( pid ); … … 596 619 vseg_root_xp = XPTR( local_cxy , &vmm->vsegs_root ); 597 620 598 // loop on destinationprocess copies621 // loop on target process copies 599 622 XLIST_FOREACH( process_root_xp , process_iter_xp ) 600 623 { 601 // get cluster and local pointer on remote process 624 // get cluster and local pointer on remote process copy 602 625 remote_process_xp = XLIST_ELEMENT( process_iter_xp , process_t , copies_list ); 603 626 remote_process_ptr = GET_PTR( remote_process_xp ); … … 606 629 #if (DEBUG_VMM_SET_COW & 1) 607 630 if( DEBUG_VMM_SET_COW < cycle ) 608 printk("\n[%s] thread[%x,%x] handlingprocess %x in cluster %x\n",609 __FUNCTION__, this->process->pid, this->trdid, process->pid, remote_process_cxy );631 printk("\n[%s] thread[%x,%x] (%x) handles process %x in cluster %x\n", 632 __FUNCTION__, this->process->pid, this->trdid, this, pid, remote_process_cxy ); 610 633 #endif 611 634 … … 620 643 vseg = GET_PTR( vseg_xp ); 621 644 622 assert( (GET_CXY( vseg_xp ) == local_cxy) ,623 "all vsegs in reference VSL must be local\n" );624 625 645 // get vseg type, base and size 626 646 uint32_t type = vseg->type; … … 630 650 #if (DEBUG_VMM_SET_COW & 1) 631 651 if( DEBUG_VMM_SET_COW < cycle ) 632 printk("\n[%s] thread[%x,%x] handlingvseg %s / vpn_base = %x / vpn_size = %x\n",652 printk("\n[%s] thread[%x,%x] found vseg %s / vpn_base = %x / vpn_size = %x\n", 633 653 __FUNCTION__, this->process->pid, this->trdid, vseg_type_str(type), vpn_base, vpn_size ); 634 654 #endif … … 653 673 654 674 // atomically increment pending forks counter in physical pages, 655 // for all vseg pages that are mapped in reference cluster675 // this is only done once, when handling the reference copy 656 676 if( remote_process_cxy == local_cxy ) 657 677 { 678 679 #if (DEBUG_VMM_SET_COW & 1) 680 if( DEBUG_VMM_SET_COW < cycle ) 681 printk("\n[%s] thread[%x,%x] handles vseg %s / vpn_base = %x / vpn_size = %x\n", 682 __FUNCTION__, this->process->pid, this->trdid, vseg_type_str(type), vpn_base, vpn_size ); 683 #endif 658 684 // scan all pages in vseg 659 685 for( vpn = vpn_base ; vpn < (vpn_base + vpn_size) ; vpn++ ) … … 684 710 } 685 711 } // end loop on vpn 712 713 #if (DEBUG_VMM_SET_COW & 1) 714 if( DEBUG_VMM_SET_COW < cycle ) 715 printk("\n[%s] thread[%x,%x] completes vseg %s / vpn_base = %x / vpn_size = %x\n", 716 __FUNCTION__, this->process->pid, this->trdid, vseg_type_str(type), vpn_base, vpn_size ); 717 #endif 686 718 } // end if local 687 719 } // end if vseg type … … 713 745 vseg_t * child_vseg; 714 746 uint32_t type; 715 bool_t cow;716 747 vpn_t vpn; 717 748 vpn_t vpn_base; 718 749 vpn_t vpn_size; 719 xptr_t page_xp; // extended pointer on page descriptor720 page_t * page_ptr;721 cxy_t page_cxy;722 xptr_t forks_xp; // extended pointer on forks counter in page descriptor723 750 xptr_t parent_root_xp; 724 751 bool_t mapped; … … 740 767 parent_vmm = &parent_process->vmm; 741 768 child_vmm = &child_process->vmm; 742 743 // initialize the lock protecting the child VSL744 remote_rwlock_init( XPTR( local_cxy , &child_vmm->vsl_lock ) , LOCK_VMM_VSL );745 746 // initialize the child VSL as empty747 xlist_root_init( XPTR( local_cxy, &child_vmm->vsegs_root ) );748 child_vmm->vsegs_nr = 0;749 750 // create an empty child GPT751 error = hal_gpt_create( &child_vmm->gpt );752 if( error )753 {754 printk("\n[ERROR] in %s : cannot create GPT\n", __FUNCTION__ );755 return -1;756 }757 769 758 770 // build extended pointer on parent VSL root and lock … … 820 832 { 821 833 // activate the COW for DATA, ANON, REMOTE vsegs only 822 cow = ( type != VSEG_TYPE_FILE );834 // cow = ( type != VSEG_TYPE_FILE ); 823 835 824 836 vpn_base = child_vseg->vpn_base; … … 832 844 XPTR( parent_cxy , &parent_vmm->gpt ), 833 845 vpn, 834 cow,835 &ppn, 836 &mapped ); 846 false, // does not handle COW flag 847 &ppn, // unused 848 &mapped ); // unused 837 849 if( error ) 838 850 { … … 842 854 } 843 855 844 // increment pending forks counter in page if mapped845 if( mapped )846 {847 // get pointers and cluster on page descriptor848 page_xp = ppm_ppn2page( ppn );849 page_cxy = GET_CXY( page_xp );850 page_ptr = GET_PTR( page_xp );851 852 // get extended pointers on "forks" and "lock"853 forks_xp = XPTR( page_cxy , &page_ptr->forks );854 lock_xp = XPTR( page_cxy , &page_ptr->lock );855 856 // get lock protecting "forks" counter857 remote_busylock_acquire( lock_xp );858 859 // increment "forks"860 hal_remote_atomic_add( forks_xp , 1 );861 862 // release lock protecting "forks" counter863 remote_busylock_release( lock_xp );864 865 856 #if DEBUG_VMM_FORK_COPY 866 857 cycle = (uint32_t)hal_get_cycles(); … … 869 860 __FUNCTION__ , this->process->pid, this->trdid , vpn , cycle ); 870 861 #endif 871 }872 862 } 873 863 } // end if no code & no stack … … 877 867 // release the parent VSL lock in read mode 878 868 remote_rwlock_rd_release( parent_lock_xp ); 879 880 // update child VMM with kernel vsegs881 error = hal_vmm_kernel_update( child_process );882 883 if( error )884 {885 printk("\n[ERROR] in %s : cannot update child VMM\n", __FUNCTION__ );886 return -1;887 }888 869 889 870 // initialize the child VMM STACK allocator … … 902 883 903 884 // initialize instrumentation counters 904 child_vmm->pgfault_nr = 0; 885 child_vmm->false_pgfault_nr = 0; 886 child_vmm->local_pgfault_nr = 0; 887 child_vmm->global_pgfault_nr = 0; 888 child_vmm->false_pgfault_cost = 0; 889 child_vmm->local_pgfault_cost = 0; 890 child_vmm->global_pgfault_cost = 0; 905 891 906 892 // copy base addresses from parent VMM to child VMM … … 933 919 934 920 #if DEBUG_VMM_DESTROY 935 uint32_t cycle = (uint32_t)hal_get_cycles();936 thread_t * this = CURRENT_THREAD;921 uint32_t cycle = (uint32_t)hal_get_cycles(); 922 thread_t * this = CURRENT_THREAD; 937 923 if( DEBUG_VMM_DESTROY < cycle ) 938 924 printk("\n[%s] thread[%x,%x] enter for process %x in cluster %x / cycle %d\n", … … 942 928 #if (DEBUG_VMM_DESTROY & 1 ) 943 929 if( DEBUG_VMM_DESTROY < cycle ) 944 hal_vmm_display( process, true );930 hal_vmm_display( XPTR( local_cxy, process ) , true ); 945 931 #endif 946 932 … … 1062 1048 vseg_t * vmm_create_vseg( process_t * process, 1063 1049 vseg_type_t type, 1064 intptr_t base, 1050 intptr_t base, // ltid for VSEG_TYPE_STACK 1065 1051 uint32_t size, 1066 1052 uint32_t file_offset, … … 1074 1060 error_t error; 1075 1061 1076 #if DEBUG_VMM_CREATE_VSEG1062 #if (DEBUG_VMM_CREATE_VSEG & 1) 1077 1063 thread_t * this = CURRENT_THREAD; 1078 1064 uint32_t cycle = (uint32_t)hal_get_cycles(); 1079 1065 if( DEBUG_VMM_CREATE_VSEG < cycle ) 1080 printk("\n[%s] thread[%x,%x] enter for process %x / %s / cxy %x / cycle %d\n", 1081 __FUNCTION__, this->process->pid, this->trdid, process->pid, vseg_type_str(type), cxy, cycle ); 1066 printk("\n[%s] thread[%x,%x] enter / process %x / %s / base %x / cxy %x / cycle %d\n", 1067 __FUNCTION__, this->process->pid, this->trdid, 1068 process->pid, vseg_type_str(type), base, cxy, cycle ); 1082 1069 #endif 1083 1070 … … 1171 1158 } 1172 1159 1173 #if DEBUG_VMM_CREATE_VSEG1160 #if (DEBUG_VMM_CREATE_VSEG & 1) 1174 1161 if( DEBUG_VMM_CREATE_VSEG < cycle ) 1175 1162 printk("\n[%s] thread[%x,%x] : base %x / size %x / vpn_base %x / vpn_size %x\n", … … 1204 1191 cycle = (uint32_t)hal_get_cycles(); 1205 1192 if( DEBUG_VMM_CREATE_VSEG < cycle ) 1206 printk("\n[%s] thread[%x,%x] exit / %s / cxy %x / cycle %d\n", 1207 __FUNCTION__, this->process->pid, this->trdid, vseg_type_str(type), cxy, cycle ); 1193 printk("\n[%s] thread[%x,%x] exit / process %x / %s / base %x / cxy %x / cycle %d\n", 1194 __FUNCTION__, this->process->pid, this->trdid, 1195 process->pid, vseg_type_str(type), base, cxy, cycle ); 1208 1196 #endif 1209 1197 … … 1685 1673 xptr_t page_xp; 1686 1674 cxy_t page_cxy; 1675 page_t * page_ptr; 1687 1676 uint32_t index; 1688 1677 … … 1711 1700 } 1712 1701 1713 // allocate a 4 Kbytes physical page from target cluster 1714 page_xp = ppm_remote_alloc_pages( page_cxy , 0 ); 1702 // allocate one small physical page from target cluster 1703 page_ptr = ppm_remote_alloc_pages( page_cxy , 0 ); 1704 1705 page_xp = XPTR( page_cxy , page_ptr ); 1715 1706 1716 1707 #if DEBUG_VMM_PAGE_ALLOCATE 1717 1708 cycle = (uint32_t)hal_get_cycles(); 1718 1709 if( DEBUG_VMM_PAGE_ALLOCATE < cycle ) 1719 printk("\n[%s] thread[%x,%x] exit for vpn %x / ppn %x / c luster %x / cycle %d\n",1720 __FUNCTION__ , this->process->pid, this->trdid, vpn, ppm_page2ppn(page_xp), page_cxy,cycle );1710 printk("\n[%s] thread[%x,%x] exit for vpn %x / ppn %x / cycle %d\n", 1711 __FUNCTION__ , this->process->pid, this->trdid, vpn, ppm_page2ppn(page_xp), cycle ); 1721 1712 #endif 1722 1713 … … 1741 1732 uint32_t cycle = (uint32_t)hal_get_cycles(); 1742 1733 thread_t * this = CURRENT_THREAD; 1743 // if( DEBUG_VMM_GET_ONE_PPN < cycle ) 1744 if( vpn == 0x40B ) 1734 if( (DEBUG_VMM_GET_ONE_PPN < cycle) && (vpn == 0x40b) ) 1745 1735 printk("\n[%s] thread[%x,%x] enter for vpn %x / type %s / page_id %d / cycle %d\n", 1746 1736 __FUNCTION__, this->process->pid, this->trdid, vpn, vseg_type_str(type), page_id, cycle ); … … 1769 1759 page_xp = vmm_page_allocate( vseg , vpn ); 1770 1760 1771 if( page_xp == XPTR_NULL ) return ENOMEM;1761 if( page_xp == XPTR_NULL ) return -1; 1772 1762 1773 1763 // initialise missing page from .elf file mapper for DATA and CODE types … … 1788 1778 1789 1779 #if (DEBUG_VMM_GET_ONE_PPN & 0x1) 1790 // if( DEBUG_VMM_GET_ONE_PPN < cycle ) 1791 if( vpn == 0x40B ) 1780 if( (DEBUG_VMM_GET_ONE_PPN < cycle) && (vpn == 0x40b) ) 1792 1781 printk("\n[%s] thread[%x,%x] for vpn = %x / elf_offset = %x\n", 1793 1782 __FUNCTION__, this->process->pid, this->trdid, vpn, elf_offset ); … … 1803 1792 1804 1793 #if (DEBUG_VMM_GET_ONE_PPN & 0x1) 1805 // if( DEBUG_VMM_GET_ONE_PPN < cycle ) 1806 if( vpn == 0x40B ) 1794 if( (DEBUG_VMM_GET_ONE_PPN < cycle) && (vpn == 0x40b) ) 1807 1795 printk("\n[%s] thread[%x,%x] for vpn %x / fully in BSS\n", 1808 1796 __FUNCTION__, this->process->pid, this->trdid, vpn ); … … 1821 1809 1822 1810 #if (DEBUG_VMM_GET_ONE_PPN & 0x1) 1823 // if( DEBUG_VMM_GET_ONE_PPN < cycle ) 1824 if( vpn == 0x40B ) 1811 if( (DEBUG_VMM_GET_ONE_PPN < cycle) && (vpn == 0x40b) ) 1825 1812 printk("\n[%s] thread[%x,%x] for vpn %x / fully in mapper\n", 1826 1813 __FUNCTION__, this->process->pid, this->trdid, vpn ); … … 1839 1826 1840 1827 #if (DEBUG_VMM_GET_ONE_PPN & 0x1) 1841 // if( DEBUG_VMM_GET_ONE_PPN < cycle ) 1842 if( vpn == 0x40B ) 1828 if( (DEBUG_VMM_GET_ONE_PPN < cycle) && (vpn == 0x40b) ) 1843 1829 printk("\n[%s] thread[%x,%x] for vpn %x / both mapper & BSS\n" 1844 1830 " %d bytes from mapper / %d bytes from BSS\n", … … 1874 1860 #if DEBUG_VMM_GET_ONE_PPN 1875 1861 cycle = (uint32_t)hal_get_cycles(); 1876 // if( DEBUG_VMM_GET_ONE_PPN < cycle ) 1877 if( vpn == 0x40B ) 1878 printk("\n[%s] thread[%x,%x] exit for vpn %x / ppn %x / cycle\n", 1862 if( (DEBUG_VMM_GET_ONE_PPN < cycle) && (vpn == 0x40b) ) 1863 printk("\n[%s] thread[%x,%x] exit for vpn %x / ppn %x / cycle %d\n", 1879 1864 __FUNCTION__ , this->process->pid, this->trdid , vpn , *ppn, cycle ); 1880 1865 #endif … … 1906 1891 1907 1892 #if DEBUG_VMM_HANDLE_PAGE_FAULT 1908 if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn == vpn) )1893 if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) ) 1909 1894 printk("\n[%s] thread[%x,%x] enter for vpn %x / cycle %d\n", 1910 1895 __FUNCTION__, this->process->pid, this->trdid, vpn, start_cycle ); … … 1912 1897 1913 1898 #if (DEBUG_VMM_HANDLE_PAGE_FAULT & 1) 1914 if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn == vpn) )1915 hal_vmm_display( this->process , false );1899 if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) ) 1900 hal_vmm_display( this->process , true ); 1916 1901 #endif 1917 1902 … … 1928 1913 } 1929 1914 1930 #if DEBUG_VMM_HANDLE_PAGE_FAULT1931 if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn == vpn) )1915 #if (DEBUG_VMM_HANDLE_PAGE_FAULT & 1) 1916 if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) ) 1932 1917 printk("\n[%s] thread[%x,%x] found vseg %s\n", 1933 1918 __FUNCTION__, this->process->pid, this->trdid, vseg_type_str(vseg->type) ); … … 1950 1935 } 1951 1936 1952 #if DEBUG_VMM_HANDLE_PAGE_FAULT1953 if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn == vpn) )1954 printk("\n[%s] thread[%x,%x] locked vpn %x in cluster %x / cycle %d\n",1937 #if (DEBUG_VMM_HANDLE_PAGE_FAULT & 1) 1938 if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) ) 1939 printk("\n[%s] thread[%x,%x] locked vpn %x in cluster %x\n", 1955 1940 __FUNCTION__, this->process->pid, this->trdid, vpn, local_cxy ); 1956 1941 #endif … … 1970 1955 { 1971 1956 1972 #if DEBUG_VMM_HANDLE_PAGE_FAULT 1973 if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn == vpn) ) 1974 printk("\n[%s] thread[%x,%x] access local gpt : cxy %x / ref_cxy %x / type %s\n", 1975 __FUNCTION__, this->process->pid, this->trdid, local_cxy, ref_cxy, vseg_type_str(vseg->type) ); 1957 #if (DEBUG_VMM_HANDLE_PAGE_FAULT & 1) 1958 if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) ) 1959 printk("\n[%s] thread[%x,%x] access local gpt : cxy %x / ref_cxy %x / type %s / cycle %d\n", 1960 __FUNCTION__, this->process->pid, this->trdid, 1961 local_cxy, ref_cxy, vseg_type_str(vseg->type), (uint32_t)hal_get_cycles() ); 1976 1962 #endif 1977 1963 // allocate and initialise a physical page … … 2008 1994 2009 1995 #if DEBUG_VMM_HANDLE_PAGE_FAULT 2010 if( (end_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn == vpn) )1996 if( (end_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) ) 2011 1997 printk("\n[%s] thread[%x,%x] handled local pgfault / ppn %x / attr %x / cycle %d\n", 2012 1998 __FUNCTION__, this->process->pid, this->trdid, ppn, attr, end_cycle ); … … 2026 2012 { 2027 2013 2028 #if DEBUG_VMM_HANDLE_PAGE_FAULT 2029 if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn == vpn) ) 2030 printk("\n[%s] thread[%x,%x] access ref gpt : cxy %x / ref_cxy %x / type %s\n", 2031 __FUNCTION__, this->process->pid, this->trdid, local_cxy, ref_cxy, vseg_type_str(vseg->type) ); 2014 #if (DEBUG_VMM_HANDLE_PAGE_FAULT & 1) 2015 if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) ) 2016 printk("\n[%s] thread[%x,%x] access ref gpt : cxy %x / ref_cxy %x / type %s / cycle %d\n", 2017 __FUNCTION__, this->process->pid, this->trdid, 2018 local_cxy, ref_cxy, vseg_type_str(vseg->type), (uint32_t)hal_get_cycles() ); 2032 2019 #endif 2033 2020 // build extended pointer on reference GPT … … 2050 2037 } 2051 2038 2052 #if DEBUG_VMM_HANDLE_PAGE_FAULT2053 if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn == vpn) )2039 #if (DEBUG_VMM_HANDLE_PAGE_FAULT & 1) 2040 if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) ) 2054 2041 printk("\n[%s] thread[%x,%x] get pte from ref gpt / attr %x / ppn %x\n", 2055 2042 __FUNCTION__, this->process->pid, this->trdid, ref_attr, ref_ppn ); … … 2065 2052 ref_ppn ); 2066 2053 2067 #if DEBUG_VMM_HANDLE_PAGE_FAULT2068 if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn == vpn) )2054 #if (DEBUG_VMM_HANDLE_PAGE_FAULT & 1) 2055 if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) ) 2069 2056 printk("\n[%s] thread[%x,%x] updated local gpt for a false pgfault\n", 2070 2057 __FUNCTION__, this->process->pid, this->trdid ); … … 2074 2061 hal_gpt_unlock_pte( ref_gpt_xp, vpn ); 2075 2062 2076 #if DEBUG_VMM_HANDLE_PAGE_FAULT2077 if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn == vpn) )2063 #if (DEBUG_VMM_HANDLE_PAGE_FAULT &1) 2064 if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) ) 2078 2065 printk("\n[%s] thread[%x,%x] unlock the ref gpt after a false pgfault\n", 2079 2066 __FUNCTION__, this->process->pid, this->trdid ); … … 2085 2072 2086 2073 #if DEBUG_VMM_HANDLE_PAGE_FAULT 2087 if( (end_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn == vpn) )2074 if( (end_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) ) 2088 2075 printk("\n[%s] thread[%x,%x] handled false pgfault / ppn %x / attr %x / cycle %d\n", 2089 2076 __FUNCTION__, this->process->pid, this->trdid, ref_ppn, ref_attr, end_cycle ); … … 2120 2107 if( vseg->flags & VSEG_CACHE ) attr |= GPT_CACHABLE; 2121 2108 2122 #if DEBUG_VMM_HANDLE_PAGE_FAULT2123 if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn == vpn) )2109 #if (DEBUG_VMM_HANDLE_PAGE_FAULT & 1) 2110 if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) ) 2124 2111 printk("\n[%s] thread[%x,%x] build a new PTE for a true pgfault\n", 2125 2112 __FUNCTION__, this->process->pid, this->trdid ); … … 2132 2119 ppn ); 2133 2120 2134 #if DEBUG_VMM_HANDLE_PAGE_FAULT2135 if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn == vpn) )2121 #if (DEBUG_VMM_HANDLE_PAGE_FAULT & 1) 2122 if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) ) 2136 2123 printk("\n[%s] thread[%x,%x] set new PTE in ref gpt for a true page fault\n", 2137 2124 __FUNCTION__, this->process->pid, this->trdid ); … … 2150 2137 2151 2138 #if DEBUG_VMM_HANDLE_PAGE_FAULT 2152 if( (end_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn == vpn) )2139 if( (end_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) ) 2153 2140 printk("\n[%s] thread[%x,%x] handled global pgfault / ppn %x / attr %x / cycle %d\n", 2154 2141 __FUNCTION__, this->process->pid, this->trdid, ppn, attr, end_cycle ); … … 2173 2160 2174 2161 #if DEBUG_VMM_HANDLE_PAGE_FAULT 2175 if( (end_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn == vpn) )2162 if( (end_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) ) 2176 2163 printk("\n[%s] handled by another thread / vpn %x / ppn %x / attr %x / cycle %d\n", 2177 2164 __FUNCTION__, vpn, ppn, attr, end_cycle ); … … 2212 2199 #endif 2213 2200 2214 #if ( DEBUG_VMM_HANDLE_PAGE_FAULT & 1)2201 #if ((DEBUG_VMM_HANDLE_COW & 3) == 3 ) 2215 2202 hal_vmm_display( process , true ); 2216 2203 #endif … … 2352 2339 #if(DEBUG_VMM_HANDLE_COW & 1) 2353 2340 if( DEBUG_VMM_HANDLE_COW < cycle ) 2354 printk("\n[%s] thread[%x,%x] 2341 printk("\n[%s] thread[%x,%x] no pending forks / keep existing PPN %x\n", 2355 2342 __FUNCTION__, this->process->pid, this->trdid, old_ppn ); 2356 2343 #endif … … 2360 2347 // build new_attr : set WRITABLE, reset COW, reset LOCKED 2361 2348 new_attr = (((old_attr | GPT_WRITABLE) & (~GPT_COW)) & (~GPT_LOCKED)); 2349 2350 #if(DEBUG_VMM_HANDLE_COW & 1) 2351 if( DEBUG_VMM_HANDLE_COW < cycle ) 2352 printk("\n[%s] thread[%x,%x] new_attr %x / new_ppn %x\n", 2353 __FUNCTION__, this->process->pid, this->trdid, new_attr, new_ppn ); 2354 #endif 2362 2355 2363 2356 // update the relevant GPT(s) … … 2366 2359 if( (vseg->type == VSEG_TYPE_STACK) || (vseg->type == VSEG_TYPE_CODE) ) 2367 2360 { 2368 // set the new PTE22361 // set new PTE in local gpt 2369 2362 hal_gpt_set_pte( gpt_xp, 2370 2363 vpn, … … 2398 2391 #endif 2399 2392 2393 #if ((DEBUG_VMM_HANDLE_COW & 3) == 3) 2394 hal_vmm_display( process , true ); 2395 #endif 2396 2400 2397 return EXCP_NON_FATAL; 2401 2398 -
trunk/kernel/mm/vmm.h
r632 r635 52 52 * - The allocator checks that the requested slot has not been already allocated, and set the 53 53 * corresponding bit in the bitmap. 54 * - The de-allocator functionreset the corresponding bit in the bitmap.54 * - The de-allocator reset the corresponding bit in the bitmap. 55 55 ********************************************************************************************/ 56 56 … … 112 112 typedef struct vmm_s 113 113 { 114 remote_rwlock_t vsl_lock; /*! lock protecting the local VSL */ 115 xlist_entry_t vsegs_root; /*! Virtual Segment List (complete in reference) */ 116 uint32_t vsegs_nr; /*! total number of local vsegs */ 117 118 gpt_t gpt; /*! Generic Page Table (complete in reference) */ 119 120 stack_mgr_t stack_mgr; /*! embedded STACK vsegs allocator */ 121 mmap_mgr_t mmap_mgr; /*! embedded MMAP vsegs allocator */ 122 123 uint32_t pgfault_nr; /*! page fault counter (instrumentation) */ 124 125 vpn_t args_vpn_base; /*! args vseg first page */ 126 vpn_t envs_vpn_base; /*! envs vseg first page */ 127 vpn_t code_vpn_base; /*! code vseg first page */ 128 vpn_t data_vpn_base; /*! data vseg first page */ 129 vpn_t heap_vpn_base; /*! heap zone first page */ 130 131 intptr_t entry_point; /*! main thread entry point */ 114 remote_rwlock_t vsl_lock; /*! lock protecting the local VSL */ 115 xlist_entry_t vsegs_root; /*! Virtual Segment List (complete in reference) */ 116 uint32_t vsegs_nr; /*! total number of local vsegs */ 117 118 gpt_t gpt; /*! Generic Page Table (complete in reference) */ 119 120 stack_mgr_t stack_mgr; /*! embedded STACK vsegs allocator */ 121 mmap_mgr_t mmap_mgr; /*! embedded MMAP vsegs allocator */ 122 123 uint32_t false_pgfault_nr; /*! false page fault counter (for all threads) */ 124 uint32_t local_pgfault_nr; /*! false page fault counter (for all threads) */ 125 uint32_t global_pgfault_nr; /*! false page fault counter (for all threads) */ 126 uint32_t false_pgfault_cost; /*! cumulated cost (for all threads) */ 127 uint32_t local_pgfault_cost; /*! cumulated cost (for all threads) */ 128 uint32_t global_pgfault_cost; /*! cumulated cost (for all threads) */ 129 130 vpn_t args_vpn_base; /*! args vseg first page */ 131 vpn_t envs_vpn_base; /*! envs vseg first page */ 132 vpn_t code_vpn_base; /*! code vseg first page */ 133 vpn_t data_vpn_base; /*! data vseg first page */ 134 vpn_t heap_vpn_base; /*! heap zone first page */ 135 136 intptr_t entry_point; /*! main thread entry point */ 132 137 } 133 138 vmm_t; 134 139 135 140 /********************************************************************************************* 136 * This function mkkes a partial initialisation of the VMM attached to an user process. 137 * The GPT must have been previously created, with the hal_gpt_create() function. 138 * - It registers "args", "envs" vsegs in the VSL. 139 * - It initializes the STACK and MMAP allocators. 140 * Note: 141 * This function makes only a partial initialisation of the VMM attached to an user 142 * process: It intializes the STACK and MMAP allocators, and the VSL lock. 143 * - The GPT has been previously created, with the hal_gpt_create() function. 144 * - The "kernel" vsegs are previously registered, by the hal_vmm_kernel_update() function. 141 145 * - The "code" and "data" vsegs are registered by the elf_load_process() function. 142 146 * - The "stack" vsegs are dynamically registered by the thread_user_create() function. … … 165 169 * This function is called by the process_make_fork() function. It partially copies 166 170 * the content of a remote parent process VMM to the local child process VMM: 171 * - The KERNEL vsegs required by the architecture must have been previously 172 * created in the child VMM, using the hal_vmm_kernel_update() function. 167 173 * - The DATA, ANON, REMOTE vsegs registered in the parent VSL are registered in the 168 * child VSL. All valid PTEs in parent GPT are copied to the child GPT, but the 169 * WRITABLE flag is reset and the COW flag is set. 174 * child VSL. All valid PTEs in parent GPT are copied to the child GPT. 175 * The WRITABLE and COW flags are not modified, as it will be done later for those 176 * shared pages by the vmm_set_cow() function. 170 177 * - The CODE vsegs registered in the parent VSL are registered in the child VSL, but the 171 178 * GPT entries are not copied in the child GPT, and will be dynamically updated from … … 173 180 * - The FILE vsegs registered in the parent VSL are registered in the child VSL, and all 174 181 * valid GPT entries in parent GPT are copied to the child GPT. The COW flag is not set. 175 * - No STACK vseg is copied from parent VMM to child VMM, because the child stack vseg 176 * must be copied later from the cluster containing the user thread requesting the fork(). 177 * - The KERNEL vsegs required by the target architecture are re-created in the child 178 * VMM, from the local kernel process VMM, using the hal_vmm_kernel_update() function. 182 * - No STACK vseg is copied from parent VMM to child VMM: the child stack vseg is copied 183 * later from the cluster containing the user thread requesting the fork(). 179 184 ********************************************************************************************* 180 185 * @ child_process : local pointer on local child process descriptor. … … 203 208 * This function modifies one GPT entry identified by the <process> and <vpn> arguments 204 209 * in all clusters containing a process copy. It is used to maintain coherence in GPT 205 * copies, using the list of copies stored in the owner process, andremote_write accesses.210 * copies, using remote_write accesses. 206 211 * It must be called by a thread running in the process owner cluster. 207 212 * Use the RPC_VMM_GLOBAL_UPDATE_PTE if required. … … 248 253 * - For the FILE, ANON, & REMOTE types, it does not use the <base> and <size> arguments, 249 254 * but uses the specific MMAP virtual memory allocator. 250 * - For the STACK type, it does not use the < size> argument, and the <base> argument251 * defines the user thread LTID used bythe specific STACK virtual memory allocator.252 * It checks collision with allpre-existing vsegs.255 * - For the STACK type, it does not use the <base> and <size> arguments, but uses the 256 * and the <base> argument the specific STACK virtual memory allocator. 257 * It checks collision with pre-existing vsegs. 253 258 * To comply with the "on-demand" paging policy, this function does NOT modify the GPT, 254 259 * and does not allocate physical memory for vseg data. -
trunk/kernel/mm/vseg.c
r625 r635 2 2 * vseg.c - virtual segment (vseg) related operations 3 3 * 4 * Authors Ghassan Almaless (2008,2009,2010,2011, 2012) 5 * Mohamed Lamine Karaoui (2015) 6 * Alain Greiner (2016,2017,2018,2019) 4 * Authors Alain Greiner (2016,2017,2018,2019) 7 5 * 8 6 * Copyright (c) UPMC Sorbonne Universites … … 66 64 kmem_req_t req; 67 65 68 req.type = KMEM_ VSEG;69 req. size = sizeof(vseg_t);70 req.flags = AF_KERNEL ;71 72 return (vseg_t *)kmem_alloc( &req );66 req.type = KMEM_KCM; 67 req.order = bits_log2( sizeof(vseg_t) ); 68 req.flags = AF_KERNEL | AF_ZERO; 69 70 return kmem_alloc( &req ); 73 71 } 74 72 … … 78 76 kmem_req_t req; 79 77 80 req.type = KMEM_ VSEG;78 req.type = KMEM_KCM; 81 79 req.ptr = vseg; 82 80 kmem_free( &req ); -
trunk/kernel/syscalls/sys_barrier.c
r629 r635 76 76 printk("\n[ERROR] in %s : unmapped barrier %x / thread %x / process %x\n", 77 77 __FUNCTION__ , vaddr , this->trdid , process->pid ); 78 hal_vmm_display( process , false );79 78 #endif 80 79 this->errno = error; … … 97 96 printk("\n[ERROR] in %s : unmapped barrier attributes %x / thread %x / process %x\n", 98 97 __FUNCTION__ , attr , this->trdid , process->pid ); 99 hal_vmm_display( process , false );100 98 #endif 101 99 this->errno = EINVAL; -
trunk/kernel/syscalls/sys_condvar.c
r624 r635 76 76 printk("\n[ERROR] in %s : unmapped condvar %x / thread %x / process %x\n", 77 77 __FUNCTION__ , (intptr_t)condvar , this->trdid , process->pid ); 78 hal_vmm_display( process , false );79 78 #endif 80 79 this->errno = error; -
trunk/kernel/syscalls/sys_display.c
r626 r635 160 160 } 161 161 162 // get local pointer on process163 process_t * process = (process_t *)GET_PTR( process_xp );164 165 162 // call kernel function 166 if( cxy == local_cxy ) 167 { 168 hal_vmm_display( process , true ); 169 } 170 else 171 { 172 rpc_hal_vmm_display_client( cxy , process , true ); 173 } 163 hal_vmm_display( process_xp , true ); 174 164 175 165 break; -
trunk/kernel/syscalls/sys_exec.c
r626 r635 63 63 uint32_t length; // string length 64 64 kmem_req_t req; // kmem request 65 page_t * page; // page descriptor66 xptr_t base_xp; // extended pointer on page base67 65 uint32_t order; // ln2( number of pages to store strings ) 68 66 char ** k_pointers; // base of kernel array of pointers 67 char * k_buf_base; // base address of the kernel strings buffer 69 68 char * k_buf_ptr; // pointer on first empty slot in kernel strings buffer 70 char * k_buf_base; // base address of the kernel strings buffer71 69 72 70 // compute ln2( number of pages for kernel strings buffer ) … … 74 72 else order = bits_log2( CONFIG_VMM_ENVS_SIZE ); 75 73 76 req.type = KMEM_PAGE; 74 // allocate one physical page for kernel array of pointers 75 req.type = KMEM_PPM; 76 req.order = 0; 77 77 req.flags = AF_KERNEL | AF_ZERO; 78 79 // allocate one physical page for kernel array of pointers 80 req.type = 0; 81 page = kmem_alloc( &req ); 82 83 if( page == NULL ) return ENOMEM; 84 85 base_xp = ppm_page2base( XPTR( local_cxy , page ) ); 86 k_pointers = (char **)GET_PTR( base_xp ); 78 k_pointers = kmem_alloc( &req ); 79 80 if( k_pointers == NULL ) return ENOMEM; 87 81 88 82 // allocate several physical pages to store the strings themselve 89 req.type = order; 90 page = kmem_alloc( &req ); 91 92 if( page == NULL ) return ENOMEM; 93 94 base_xp = ppm_page2base( XPTR( local_cxy , page ) ); 95 k_buf_base = (char *)GET_PTR( base_xp ); 83 req.type = KMEM_PPM; 84 req.order = order; 85 req.flags = AF_KERNEL | AF_ZERO; 86 k_buf_base = kmem_alloc( &req ); 87 88 if( k_buf_base == NULL ) return ENOMEM; 96 89 97 90 // copy the array of pointers to kernel buffer -
trunk/kernel/syscalls/sys_fork.c
r625 r635 72 72 73 73 #if DEBUG_SYS_FORK 74 if( DEBUG_SYS_FORK < tm_start )74 if( DEBUG_SYS_FORK < (uint32_t)tm_start ) 75 75 printk("\n[%s] thread[%x,%x] enter / cycle = %d\n", 76 76 __FUNCTION__, parent_pid, parent_thread_ptr->trdid, (uint32_t)tm_start ); … … 109 109 110 110 #if (DEBUG_SYS_FORK & 1 ) 111 if( DEBUG_SYS_FORK < tm_start )111 if( DEBUG_SYS_FORK < (uint32_t)tm_start ) 112 112 printk("\n[%s] thread[%x,%x] selected cluster %x\n", 113 113 __FUNCTION__, parent_pid, parent_thread_ptr->trdid, child_cxy ); … … 150 150 } 151 151 152 // set remote child CPU context from parent_thread register values 152 // set the remote child CPU context from parent register values, 153 // set the remote child uzone from 153 154 // replicates the parent thread kernel stack to the child thread descriptor, 154 155 // and finally unblock the child thread. … … 171 172 172 173 #if DEBUG_SYS_FORK 173 if( DEBUG_SYS_FORK < tm_end )174 if( DEBUG_SYS_FORK < (uint32_t)tm_end ) 174 175 printk("\n[%s] parent thread[%x,%x] exit / child_pid %x / cycle %d\n", 175 176 __FUNCTION__, current->process->pid, current->trdid, child_pid, (uint32_t)tm_end ); 176 177 #endif 177 178 178 // only parent contribute to instrumentation 179 // only parent display the parent and child VMM 180 #if (DEBUG_SYS_FORK & 1 ) 181 if( DEBUG_SYS_FORK < (uint32_t)tm_end ) 182 { 183 process_t * child_process_ptr = hal_remote_lpt( XPTR( child_cxy , 184 &child_thread_ptr->process ) ); 185 xptr_t child_process_xp = XPTR( child_cxy , child_process_ptr ); 186 187 hal_vmm_display( ref_process_xp , true ); 188 hal_vmm_display( child_process_xp , true ); 189 } 190 #endif 191 192 // only parent contribute to syscalls instrumentation 179 193 #if CONFIG_INSTRUMENTATION_SYSCALLS 180 194 hal_atomic_add( &syscalls_cumul_cost[SYS_FORK] , tm_end - tm_start ); … … 187 201 188 202 #if DEBUG_SYS_FORK 189 if( DEBUG_SYS_FORK < tm_end )203 if( DEBUG_SYS_FORK < (uint32_t)tm_end ) 190 204 printk("\n[%s] child thread[%x,%x] exit / child_pid %x / cycle %d\n", 191 205 __FUNCTION__, current->process->pid, current->trdid, child_pid, (uint32_t)tm_end ); -
trunk/kernel/syscalls/sys_get_config.c
r626 r635 69 69 printk("\n[ERROR] in %s : x_size buffer unmapped / thread %x / process %x\n", 70 70 __FUNCTION__ , (intptr_t)x_size , this->trdid , process->pid ); 71 hal_vmm_display( process , false );72 71 #endif 73 72 this->errno = EINVAL; … … 84 83 printk("\n[ERROR] in %s : y_size buffer unmapped / thread %x / process %x\n", 85 84 __FUNCTION__ , (intptr_t)y_size , this->trdid , process->pid ); 86 hal_vmm_display( process , false );87 85 #endif 88 86 this->errno = EINVAL; … … 99 97 printk("\n[ERROR] in %s : ncores buffer unmapped / thread %x / process %x\n", 100 98 __FUNCTION__ , (intptr_t)ncores , this->trdid , process->pid ); 101 hal_vmm_display( process , false );102 99 #endif 103 100 this->errno = EINVAL; -
trunk/kernel/syscalls/sys_get_core.c
r626 r635 56 56 printk("\n[ERROR] in %s : cxy buffer unmapped %x / thread %x / process %x\n", 57 57 __FUNCTION__ , (intptr_t)cxy , this->trdid , process->pid ); 58 hal_vmm_display( process , false );59 58 #endif 60 59 this->errno = EFAULT; … … 71 70 printk("\n[ERROR] in %s : lid buffer unmapped %x / thread %x / process %x\n", 72 71 __FUNCTION__ , (intptr_t)lid , this->trdid , process->pid ); 73 hal_vmm_display( process , false );74 72 #endif 75 73 this->errno = EFAULT; -
trunk/kernel/syscalls/sys_get_cycle.c
r626 r635 54 54 printk("\n[ERROR] in %s : user buffer unmapped %x / thread %x / process %x\n", 55 55 __FUNCTION__ , (intptr_t)cycle , this->trdid , process->pid ); 56 hal_vmm_display( process , false );57 56 #endif 58 57 this->errno = EFAULT; -
trunk/kernel/syscalls/sys_is_fg.c
r626 r635 68 68 printk("\n[ERROR] in %s : unmapped owner buffer %x / thread %x in process %x\n", 69 69 __FUNCTION__ , (intptr_t)is_fg, this->trdid, process->pid ); 70 hal_vmm_display( process , false );71 70 #endif 72 71 this->errno = EINVAL; -
trunk/kernel/syscalls/sys_mmap.c
r626 r635 70 70 printk("\n[ERROR] in %s : thread[%x,%x] / mmap attributes unmapped %x\n", 71 71 __FUNCTION__ , process->pid, this->trdid, (intptr_t)attr ); 72 hal_vmm_display( process , false );73 72 #endif 74 73 this->errno = EINVAL; -
trunk/kernel/syscalls/sys_munmap.c
r625 r635 67 67 printk("\n[ERROR] in %s : thread[%x,%x] / user buffer unmapped %x\n", 68 68 __FUNCTION__ , process->pid, this->trdid, (intptr_t)vaddr ); 69 hal_vmm_display( process , false );70 69 #endif 71 70 this->errno = EINVAL; -
trunk/kernel/syscalls/sys_mutex.c
r625 r635 76 76 printk("\n[ERROR] in %s : mutex unmapped %x / thread %x / process %x\n", 77 77 __FUNCTION__ , (intptr_t)vaddr , this->trdid , process->pid ); 78 hal_vmm_display( process , false );79 78 #endif 80 79 this->errno = error; -
trunk/kernel/syscalls/sys_opendir.c
r626 r635 67 67 printk("\n[ERROR] in %s / thread[%x,%x] : DIR buffer %x unmapped\n", 68 68 __FUNCTION__ , process->pid , this->trdid, dirp ); 69 hal_vmm_display( process , false );70 69 #endif 71 70 this->errno = EINVAL; -
trunk/kernel/syscalls/sys_read.c
r633 r635 106 106 printk("\n[ERROR] in %s : thread[%x,%x] user buffer unmapped %x\n", 107 107 __FUNCTION__ , process->pid, this->trdid, (intptr_t)vaddr ); 108 hal_vmm_display( process , false );109 108 #endif 110 109 this->errno = EINVAL; -
trunk/kernel/syscalls/sys_readdir.c
r626 r635 70 70 printk("\n[ERROR] in %s / thread[%x,%x] : user buffer %x unmapped\n", 71 71 __FUNCTION__ , process->pid , this->trdid, buffer ); 72 hal_vmm_display( process , false );73 72 #endif 74 73 this->errno = EINVAL; -
trunk/kernel/syscalls/sys_sem.c
r626 r635 75 75 printk("\n[ERROR] in %s : unmapped semaphore pointer %x / thread %x in process %x / cycle %d\n", 76 76 __FUNCTION__ , (intptr_t)vaddr, this->trdid, process->pid, (uint32_t)hal_get_cycles() ); 77 hal_vmm_display( process , false );78 77 #endif 79 78 this->errno = EINVAL; … … 113 112 printk("\n[ERROR] in %s GETVALUE: unmapped buffer %x / thread %x in process %x / cycle %d\n", 114 113 __FUNCTION__ , (intptr_t)current_value, this->trdid, process->pid, (uint32_t)hal_get_cycles() ); 115 hal_vmm_display( process , false );116 114 #endif 117 115 this->errno = EINVAL; … … 158 156 printk("\n[ERROR] in %s WAIT: semaphore %x not found / thread %x in process %x / cycle %d\n", 159 157 __FUNCTION__ , (intptr_t)vaddr, this->trdid, process->pid, (uint32_t)hal_get_cycles() ); 160 hal_vmm_display( process , true );161 158 #endif 162 159 this->errno = EINVAL; -
trunk/kernel/syscalls/sys_stat.c
r626 r635 62 62 printk("\n[ERROR] in %s / thread[%x,%x] : stat structure %x unmapped\n", 63 63 __FUNCTION__ , process->pid , this->trdid, u_stat ); 64 hal_vmm_display( process , false );65 64 #endif 66 65 this->errno = EINVAL; -
trunk/kernel/syscalls/sys_thread_create.c
r633 r635 81 81 printk("\n[ERROR] in %s : thread[%x,%x] / trdid buffer %x unmapped %x\n", 82 82 __FUNCTION__, process->pid, parent->trdid, (intptr_t)trdid_ptr ); 83 hal_vmm_display( process , false );84 83 #endif 85 84 parent->errno = EINVAL; … … 98 97 printk("\n[ERROR] in %s : thread[%x,%x] / user_attr buffer unmapped %x\n", 99 98 __FUNCTION__, process->pid, parent->trdid, (intptr_t)user_attr ); 100 hal_vmm_display( process , false );101 99 #endif 102 100 parent->errno = EINVAL; … … 119 117 printk("\n[ERROR] in %s : thread[%x,%x] / start_func unmapped %x\n", 120 118 __FUNCTION__, process->pid, parent->trdid, (intptr_t)start_func ); 121 hal_vmm_display( process , false );122 119 #endif 123 120 parent->errno = EINVAL; … … 136 133 printk("\n[ERROR] in %s : thread[%x,%x] / start_args buffer unmapped %x\n", 137 134 __FUNCTION__, process->pid, parent->trdid, (intptr_t)start_args ); 138 hal_vmm_display( process , false );139 135 #endif 140 136 parent->errno = EINVAL; -
trunk/kernel/syscalls/sys_thread_exit.c
r625 r635 2 2 * sys_thread_exit.c - terminates the execution of calling thread 3 3 * 4 * Authors Alain Greiner (2016,2017,2018 )4 * Authors Alain Greiner (2016,2017,2018,2019) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 46 46 47 47 #if DEBUG_SYSCALLS_ERROR 48 printk("\n[ERROR] in %s : exit_value argument must be NULL / thread %x in process %x\n",49 __FUNCTION__ , this , pid);48 printk("\n[ERROR] in %s : thread[%x,%x] / exit_value argument %x must be NULL\n", 49 __FUNCTION__ , pid, trdid , exit_value ); 50 50 #endif 51 51 this->errno = EINVAL; 52 52 return -1; 53 53 } 54 55 54 56 55 // If calling thread is the main thread, the process must be deleted. -
trunk/kernel/syscalls/sys_timeofday.c
r626 r635 71 71 printk("\n[ERROR] in %s : user buffer tz unmapped / thread %x / process %x\n", 72 72 __FUNCTION__ , (intptr_t)tz , this->trdid , process->pid ); 73 hal_vmm_display( process , false );74 73 #endif 75 74 this->errno = EINVAL; -
trunk/kernel/syscalls/sys_wait.c
r626 r635 69 69 printk("\n[ERROR] in %s : status buffer %x unmapped for thread[%x,%x]\n", 70 70 __FUNCTION__ , (intptr_t)status, pid, this->trdid ); 71 hal_vmm_display( process , false );72 71 #endif 73 72 this->errno = EINVAL; -
trunk/kernel/syscalls/sys_write.c
r625 r635 106 106 printk("\n[ERROR] in %s : thread[%x,%x] user buffer unmapped %x\n", 107 107 __FUNCTION__ , process->pid, this->trdid, (intptr_t)vaddr ); 108 hal_vmm_display( process , false );109 108 #endif 110 109 this->errno = EINVAL;
Note: See TracChangeset
for help on using the changeset viewer.