Changeset 611 for trunk/kernel
- Timestamp:
- Jan 9, 2019, 3:02:51 PM (6 years ago)
- Location:
- trunk/kernel
- Files:
-
- 35 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/Makefile
r610 r611 122 122 build/libk/remote_fifo.o \ 123 123 build/libk/remote_mutex.o \ 124 build/libk/remote_dir.o \ 124 125 build/libk/remote_sem.o \ 125 126 build/libk/remote_condvar.o \ -
trunk/kernel/fs/fatfs.c
r610 r611 2011 2011 2012 2012 // get pointer on local FATFS context 2013 fatfs_ctx_t * fatfs_ctx 2013 fatfs_ctx_t * fatfs_ctx = fs_context[FS_TYPE_FATFS].extend; 2014 2014 2015 2015 // get page base address … … 2034 2034 #if (DEBUG_FATFS_MOVE_PAGE & 0x1) 2035 2035 if( DEBUG_FATFS_MOVE_PAGE < cycle ) 2036 { 2037 uint32_t * tab = (uint32_t *)buffer; 2038 uint32_t line , word; 2039 printk("\n***** %s : First 64 words of page %d in FAT mapper\n", 2040 __FUNCTION__ , page_id ); 2041 for( line = 0 ; line < 8 ; line++ ) 2042 { 2043 printk("%X : ", line ); 2044 for( word = 0 ; word < 8 ; word++ ) printk("%X ", tab[(line<<3) + word] ); 2045 printk("\n"); 2046 } 2047 } 2036 mapper_display_page( XPTR(page_cxy , mapper_ptr) , page_id , "FAT" ); 2048 2037 #endif 2049 2038 … … 2103 2092 #if (DEBUG_FATFS_MOVE_PAGE & 0x1) 2104 2093 if( DEBUG_FATFS_MOVE_PAGE < cycle ) 2105 { 2106 uint32_t * tab = (uint32_t *)buffer; 2107 uint32_t line , word; 2108 printk("\n***** %s : First 64 words of page %d in <%s> mapper\n", 2109 __FUNCTION__, page_id, name ); 2110 for( line = 0 ; line < 8 ; line++ ) 2111 { 2112 printk("%X : ", line ); 2113 for( word = 0 ; word < 8 ; word++ ) printk("%X ", tab[(line<<3) + word] ); 2114 printk("\n"); 2115 } 2116 } 2094 char string[CONFIG_VFS_MAX_NAME_LENGTH]; 2095 vfs_inode_get_name( XPTR(page_cxy , inode_ptr) , string ); 2096 mapper_display_page( XPTR(page_cxy , mapper_ptr) , page_id , string ); 2117 2097 #endif 2118 2098 -
trunk/kernel/fs/fatfs.h
r610 r611 238 238 239 239 240 241 240 ////////////////////////////////////////////////////////////////////////////////////////// 242 241 // Generic API: These functions are called by the kernel VFS, … … 409 408 ***************************************************************************************** 410 409 * This function moves a page from/to the mapper to/from the FATFS file system on device. 411 * The page must have been previously allocated and registered in the mapper , but the412 * page - and the mapper - can be located in another cluster than the calling thread.410 * The page must have been previously allocated and registered in the mapper. 411 * The page - and the mapper - can be located in another cluster than the calling thread. 413 412 * The pointer on the mapper and the page index in file are found in the page descriptor. 414 * It is used both for the regular file/directory mappers, and forthe FAT mapper.413 * It is used for both for a regular file/directory mapper, and the FAT mapper. 415 414 * For the FAT mapper, it access the FATFS to get the location on IOC device. 416 415 * For a regular file, it access the FAT mapper to get the cluster index on IOC device. -
trunk/kernel/fs/vfs.c
r610 r611 23 23 */ 24 24 25 26 25 #include <kernel_config.h> 27 26 #include <hal_kernel_types.h> … … 48 47 #include <syscalls.h> 49 48 50 51 49 ////////////////////////////////////////////////////////////////////////////////////////// 52 50 // Extern variables … … 136 134 case INODE_TYPE_SOCK: return "SOCK"; 137 135 case INODE_TYPE_DEV: return "DEV "; 136 case INODE_TYPE_BLK: return "BLK "; 138 137 case INODE_TYPE_SYML: return "SYML"; 139 138 default: return "undefined"; … … 1009 1008 cxy_t vfs_root_cxy; // VFS root inode cluster identifier 1010 1009 xptr_t lock_xp; // extended pointer on lock protecting Inode Tree 1011 xptr_t inode_xp; // extended pointer on targetinode1012 vfs_inode_t * inode_ptr; // local pointer on targetinode1013 cxy_t inode_cxy; // targetinode cluster identifier1010 xptr_t inode_xp; // extended pointer on new directory inode 1011 vfs_inode_t * inode_ptr; // local pointer on new directory inode 1012 cxy_t inode_cxy; // new directory inode cluster identifier 1014 1013 xptr_t dentry_xp; // extended pointer on new dentry 1015 vfs_dentry_t * dentry_ptr; // targetdentry local pointer1016 xptr_t parent_xp; // extended pointer on newparent inode1017 vfs_inode_t * parent_ptr; // local pointer on newparent inode1018 cxy_t parent_cxy; // newparent inode cluster identifier1019 vfs_ctx_t * parent_ctx_ptr; // local pointer on target inode context1020 uint32_t parent_fs_type; // target inode file system type1014 vfs_dentry_t * dentry_ptr; // new dentry local pointer 1015 xptr_t parent_xp; // extended pointer on parent inode 1016 vfs_inode_t * parent_ptr; // local pointer on parent inode 1017 cxy_t parent_cxy; // parent inode cluster identifier 1018 vfs_ctx_t * parent_ctx_ptr; // local pointer on parent inode context 1019 uint32_t parent_fs_type; // parent inode file system type 1021 1020 1022 1021 xptr_t parents_root_xp; // extended pointer on parents field in inode (root) … … 1109 1108 #endif 1110 1109 1111 // 3. create new directory inode in child cluster1110 // 3. create new directory inode 1112 1111 // TODO : define attr / uid / gid 1113 1112 uint32_t attr = 0; … … 1118 1117 inode_cxy = cluster_random_select(); 1119 1118 1120 if( inode_cxy == local_cxy ) // childcluster is local1119 if( inode_cxy == local_cxy ) // target cluster is local 1121 1120 { 1122 1121 error = vfs_inode_create( parent_fs_type, … … 1128 1127 &inode_xp ); 1129 1128 } 1130 else // childcluster is remote1129 else // target cluster is remote 1131 1130 { 1132 1131 rpc_vfs_inode_create_client( inode_cxy, … … 1143 1142 if( error ) 1144 1143 { 1144 remote_rwlock_wr_release( lock_xp ); 1145 1145 printk("\n[ERROR] in %s : cannot create new inode in cluster %x for <%s>\n", 1146 1146 __FUNCTION__ , inode_cxy , path ); 1147 1148 1147 if( parent_cxy == local_cxy ) vfs_dentry_destroy( dentry_ptr ); 1149 1148 else rpc_vfs_dentry_destroy_client( parent_cxy , dentry_ptr ); … … 1181 1180 #endif 1182 1181 1182 // 7. create the two special dentries <.> and <..> in new directory 1183 // both the new directory mapper, and the Inode Tree are updated 1184 error = vfs_add_special_dentries( inode_xp, 1185 parent_xp ); 1186 1187 if( error ) 1188 { 1189 remote_rwlock_wr_release( lock_xp ); 1190 printk("\n[ERROR] in %s : cannot create new inode in cluster %x for <%s>\n", 1191 __FUNCTION__ , inode_cxy , path ); 1192 if( parent_cxy == local_cxy ) vfs_dentry_destroy( dentry_ptr ); 1193 else rpc_vfs_dentry_destroy_client( parent_cxy , dentry_ptr ); 1194 return -1; 1195 } 1196 1183 1197 // release the lock protecting Inode Tree 1184 1198 remote_rwlock_wr_release( lock_xp ); 1185 1199 1186 // 5. update parent directory mapper1200 // 8. update parent directory mapper 1187 1201 // and synchronize the parent directory on IOC device 1188 1202 if (parent_cxy == local_cxy) … … 1625 1639 } // end vfs_unlink() 1626 1640 1627 /////////////////////////////////////////// 1628 error_t vfs_stat( xptr_t root_inode_xp,1629 char * path,1630 st at_t* st )1641 //////////////////////////////////////////////// 1642 error_t vfs_stat( xptr_t root_inode_xp, 1643 char * path, 1644 struct stat * st ) 1631 1645 { 1632 1646 error_t error; … … 1936 1950 inode_inum, inode_size, inode_dirty, inode_cxy, inode_ptr, mapper_ptr ); 1937 1951 1938 // scan directory entries 1939 if( inode_type == INODE_TYPE_DIR ) 1952 // scan directory entries when current inode is a directory 1953 // don't scan the the "." and ".." directories to break loops 1954 if( (inode_type == INODE_TYPE_DIR) && 1955 (strcmp( name , "." ) != 0) && 1956 (strcmp( name , ".." ) != 0) ) 1940 1957 { 1941 1958 // get extended pointer on directory entries xhtab … … 2234 2251 error = vfs_get_name_from_path( current , name , &next , &last ); 2235 2252 2236 // VFS root case2253 // handle VFS root case 2237 2254 if ( error ) 2238 2255 { … … 2258 2275 name, 2259 2276 &child_xp ); 2277 2278 // get child inode local pointer and cluster 2279 child_ptr = GET_PTR( child_xp ); 2280 child_cxy = GET_CXY( child_xp ); 2260 2281 2261 2282 // analyse found & last, depending on lookup_mode … … 2302 2323 else child_type = INODE_TYPE_FILE; 2303 2324 2304 // insert (speculatively) a new child dentry/inode in inode tree2325 // insert a new child dentry/inode couple in inode tree 2305 2326 error = vfs_add_child_in_parent( child_cxy, 2306 2327 child_type, … … 2326 2347 #endif 2327 2348 // scan parent mapper to find the missing dentry, and complete 2328 // the initialisation of dentry and child inode desc iptors2349 // the initialisation of dentry and child inode descriptors 2329 2350 if( parent_cxy == local_cxy ) 2330 2331 2351 { 2332 2352 error = vfs_fs_child_init( parent_ptr, … … 2424 2444 if( DEBUG_VFS_LOOKUP < cycle ) 2425 2445 printk("\n[%s] thread[%x,%x] found <%s> in Inode Tree / inode (%x,%x)\n", 2426 __FUNCTION__, process->pid, this->trdid, name, GET_CXY(child_xp), GET_PTR(child_xp) ); 2427 #endif 2428 // get child inode local pointer and cluster 2429 child_ptr = GET_PTR( child_xp ); 2430 child_cxy = GET_CXY( child_xp ); 2431 2446 __FUNCTION__, process->pid, this->trdid, name, child_cxy, child_ptr ); 2447 #endif 2432 2448 // check the excl flag 2433 2449 if( last && create && excl ) … … 2584 2600 2585 2601 } // end vfs_new_child_init() 2602 2603 /////////////////////////////////////////////////// 2604 error_t vfs_add_special_dentries( xptr_t child_xp, 2605 xptr_t parent_xp ) 2606 { 2607 error_t error; 2608 vfs_inode_t * child_ptr; // local pointer on child inode directory 2609 cxy_t child_cxy; // child inode directory cluster identifier 2610 vfs_inode_t * parent_ptr; // local pointer on parent inode directory 2611 cxy_t parent_cxy; // parent inode directory cluster identifier 2612 vfs_ctx_t * ctx_ptr; // local pointer on child inode FS context 2613 vfs_fs_type_t fs_type; // FS type of child inode 2614 xptr_t dentry_xp; // extended pointer on dentry (used for . and ..) 2615 vfs_dentry_t * dentry_ptr; // local pointer on dentry (used for . and ..) 2616 2617 xptr_t parents_root_xp; // extended pointer on inode "parents" field 2618 xptr_t parents_entry_xp; // extended pointer on dentry "parents" field 2619 xptr_t children_xhtab_xp; // extended pointer on inode "children" field 2620 xptr_t children_entry_xp; // extended pointer on dentry "children" field 2621 2622 #if DEBUG_VFS_ADD_SPECIAL 2623 uint32_t cycle = (uint32_t)hal_get_cycles(); 2624 thread_t * this = CURRENT_THREAD; 2625 char child_name[CONFIG_VFS_MAX_NAME_LENGTH]; 2626 char parent_name[CONFIG_VFS_MAX_NAME_LENGTH]; 2627 vfs_inode_get_name( child_xp , child_name ); 2628 vfs_inode_get_name( parent_xp , parent_name ); 2629 if( DEBUG_VFS_ADD_SPECIAL < cycle ) 2630 printk("\n[%s] thread[%x,%x] enter / child <%s> / parent <%s> / cycle %d\n", 2631 __FUNCTION__, this->process->pid, this->trdid, child_name, parent_name, cycle ); 2632 #endif 2633 2634 // get new directory cluster and local pointer 2635 child_cxy = GET_CXY( child_xp ); 2636 child_ptr = GET_PTR( child_xp ); 2637 2638 // get parent directory cluster and local pointer 2639 parent_cxy = GET_CXY( parent_xp ); 2640 parent_ptr = GET_PTR( parent_xp ); 2641 2642 // get child inode FS type 2643 ctx_ptr = hal_remote_lpt( XPTR( child_cxy , &child_ptr->ctx ) ); 2644 fs_type = hal_remote_l32( XPTR( child_cxy , &ctx_ptr->type ) ); 2645 2646 //////////////////////////// create <.> 2647 if( child_cxy == local_cxy ) 2648 { 2649 error = vfs_dentry_create( fs_type, 2650 ".", 2651 &dentry_xp ); 2652 } 2653 else 2654 { 2655 rpc_vfs_dentry_create_client( child_cxy, 2656 fs_type, 2657 ".", 2658 &dentry_xp, 2659 &error ); 2660 } 2661 if( error ) 2662 { 2663 printk("\n[ERROR] in %s : cannot create dentry <.> in cluster %x\n", 2664 __FUNCTION__ , child_cxy ); 2665 return -1; 2666 } 2667 2668 // get <.> dentry local pointer 2669 dentry_ptr = GET_PTR( dentry_xp ); 2670 2671 #if(DEBUG_VFS_ADD_SPECIAL & 1) 2672 if( DEBUG_VFS_ADD_SPECIAL < cycle ) 2673 printk("\n[%s] thread[%x,%x] created dentry <.> (%x,%x)\n", 2674 __FUNCTION__, this->process->pid, this->trdid, child_cxy, dentry_ptr ); 2675 #endif 2676 2677 // register <.> dentry in child inode xhtab of children 2678 children_xhtab_xp = XPTR( child_cxy , &child_ptr->children ); 2679 children_entry_xp = XPTR( child_cxy , &dentry_ptr->children ); 2680 error = xhtab_insert( children_xhtab_xp , "." , children_entry_xp ); 2681 if( error ) 2682 { 2683 printk("\n[ERROR] in %s : cannot register dentry <.> in xhtab\n", 2684 __FUNCTION__ ); 2685 return -1; 2686 } 2687 2688 // register <.> dentry in child_inode xlist of parents TODO faut-il ? 2689 parents_root_xp = XPTR( child_cxy , &child_ptr->parents ); 2690 parents_entry_xp = XPTR( child_cxy , &dentry_ptr->parents ); 2691 xlist_add_first( parents_root_xp , parents_entry_xp ); 2692 hal_remote_atomic_add( XPTR( child_cxy , &child_ptr->links ) , 1 ); 2693 2694 // update "parent" and "child_xp" fields in <.> dentry 2695 hal_remote_s64( XPTR( child_cxy , &dentry_ptr->child_xp ) , child_xp ); 2696 hal_remote_spt( XPTR( child_cxy , &dentry_ptr->parent ) , child_ptr ); 2697 2698 #if(DEBUG_VFS_ADD_SPECIAL & 1) 2699 if( DEBUG_VFS_ADD_SPECIAL < cycle ) 2700 printk("\n[%s] thread[%x,%x] linked dentry <.> to parent and child inodes\n", 2701 __FUNCTION__, this->process->pid, this->trdid ); 2702 #endif 2703 2704 // introduce <.> dentry into child directory mapper 2705 if( child_cxy == local_cxy ) 2706 { 2707 error = vfs_fs_add_dentry( child_ptr, 2708 dentry_ptr ); 2709 } 2710 else 2711 { 2712 rpc_vfs_fs_add_dentry_client( child_cxy, 2713 child_ptr, 2714 dentry_ptr, 2715 &error ); 2716 } 2717 if( error ) 2718 { 2719 printk("\n[ERROR] in %s : cannot introduce dentry <..> in mapper %x\n", 2720 __FUNCTION__ ); 2721 return -1; 2722 } 2723 2724 #if(DEBUG_VFS_ADD_SPECIAL & 1) 2725 if( DEBUG_VFS_ADD_SPECIAL < cycle ) 2726 printk("\n[%s] thread[%x,%x] registered dentry <.> in child mapper\n", 2727 __FUNCTION__, this->process->pid, this->trdid ); 2728 #endif 2729 2730 ///////////////////////////// create <..> dentry 2731 if( child_cxy == local_cxy ) 2732 { 2733 error = vfs_dentry_create( fs_type, 2734 "..", 2735 &dentry_xp ); 2736 } 2737 else 2738 { 2739 rpc_vfs_dentry_create_client( child_cxy, 2740 fs_type, 2741 "..", 2742 &dentry_xp, 2743 &error ); 2744 } 2745 if( error ) 2746 { 2747 printk("\n[ERROR] in %s : cannot create dentry <..> in cluster %x\n", 2748 __FUNCTION__ , child_cxy ); 2749 return -1; 2750 } 2751 2752 // get <..> dentry local pointer 2753 dentry_ptr = GET_PTR( dentry_xp ); 2754 2755 #if(DEBUG_VFS_ADD_SPECIAL & 1) 2756 if( DEBUG_VFS_ADD_SPECIAL < cycle ) 2757 printk("\n[%s] thread[%x,%x] created dentry <..> (%x,%x)\n", 2758 __FUNCTION__, this->process->pid, this->trdid, child_cxy, dentry_ptr ); 2759 #endif 2760 2761 // register <..> dentry in child_inode xhtab of children 2762 children_xhtab_xp = XPTR( child_cxy , &child_ptr->children ); 2763 children_entry_xp = XPTR( child_cxy , &dentry_ptr->children ); 2764 error = xhtab_insert( children_xhtab_xp , ".." , children_entry_xp ); 2765 if( error ) 2766 { 2767 printk("\n[ERROR] in %s : cannot register dentry <..> in xhtab\n", 2768 __FUNCTION__ ); 2769 return -1; 2770 } 2771 2772 // register <..> dentry in parent_inode xlist of parents TODO faut-il ? 2773 parents_root_xp = XPTR( parent_cxy , &parent_ptr->parents ); 2774 parents_entry_xp = XPTR( child_cxy , &dentry_ptr->parents ); 2775 xlist_add_first( parents_root_xp , parents_entry_xp ); 2776 hal_remote_atomic_add( XPTR( parent_cxy , &parent_ptr->links ) , 1 ); 2777 2778 // update "parent" and "child_xp" fields in <..> dentry 2779 hal_remote_s64( XPTR( child_cxy , &dentry_ptr->child_xp ) , parent_xp ); 2780 hal_remote_spt( XPTR( child_cxy , &dentry_ptr->parent ) , child_ptr ); 2781 2782 #if(DEBUG_VFS_ADD_SPECIAL & 1) 2783 if( DEBUG_VFS_ADD_SPECIAL < cycle ) 2784 printk("\n[%s] thread[%x,%x] linked dentry <..> to parent and child inodes\n", 2785 __FUNCTION__, this->process->pid, this->trdid ); 2786 #endif 2787 2788 // introduce <..> dentry into child directory mapper 2789 if( child_cxy == local_cxy ) 2790 { 2791 error = vfs_fs_add_dentry( child_ptr, 2792 dentry_ptr ); 2793 } 2794 else 2795 { 2796 rpc_vfs_fs_add_dentry_client( child_cxy, 2797 child_ptr, 2798 dentry_ptr, 2799 &error ); 2800 } 2801 if( error ) 2802 { 2803 printk("\n[ERROR] in %s : cannot introduce dentry <..> in mapper %x\n", 2804 __FUNCTION__ ); 2805 return -1; 2806 } 2807 2808 #if(DEBUG_VFS_ADD_SPECIAL & 1) 2809 if( DEBUG_VFS_ADD_SPECIAL < cycle ) 2810 printk("\n[%s] thread[%x,%x] registered dentry <..> in child mapper\n", 2811 __FUNCTION__, this->process->pid, this->trdid ); 2812 #endif 2813 2814 #if DEBUG_VFS_ADD_SPECIAL 2815 cycle = (uint32_t)hal_get_cycles(); 2816 if( DEBUG_VFS_ADD_SPECIAL < cycle ) 2817 printk("\n[%s] thread[%x,%x] exit / cycle %d\n", 2818 __FUNCTION__, this->process->pid, this->trdid, (uint32_t)hal_get_cycles() ); 2819 #endif 2820 2821 return 0; 2822 2823 } // end vfs_add_special_dentries() 2586 2824 2587 2825 ////////////////////////////////////////// … … 2845 3083 2846 3084 #if(DEBUG_VFS_ADD_CHILD & 1) 2847 if( local_cxy == 1 ) 2848 // if( DEBUG_VFS_ADD_CHILD < cycle ) 3085 if( DEBUG_VFS_ADD_CHILD < cycle ) 2849 3086 printk("\n[%s] thread[%x,%x] / dentry (%x,%x) registered in child inode (%x,%x)\n", 2850 3087 __FUNCTION__, this->process->pid, this->trdid, … … 2852 3089 #endif 2853 3090 2854 // 4.register new_dentry in parent_inode xhtab of children3091 // register new_dentry in parent_inode xhtab of children 2855 3092 children_xhtab_xp = XPTR( parent_cxy , &parent_inode_ptr->children ); 2856 3093 children_entry_xp = XPTR( parent_cxy , &new_dentry_ptr->children ); … … 2864 3101 #endif 2865 3102 2866 // 5.update "parent" and "child_xp" fields in new_dentry3103 // update "parent" and "child_xp" fields in new_dentry 2867 3104 hal_remote_s64( XPTR( parent_cxy , &new_dentry_ptr->child_xp ) , new_inode_xp ); 2868 3105 hal_remote_spt( XPTR( parent_cxy , &new_dentry_ptr->parent ) , parent_inode_ptr ); -
trunk/kernel/fs/vfs.h
r610 r611 45 45 46 46 struct vfs_inode_s; 47 struct vfs_dentry_t; 48 struct vfs_ctx_t; 49 struct vfs_file_ref_s; 47 struct vfs_dentry_s; 48 struct vfs_ctx_s; 50 49 struct vfs_file_s; 51 52 struct vfs_inode_op_s;53 struct vfs_dentry_op_s;54 struct vfs_file_op_s;55 struct vfs_ctx_op_s;56 57 struct vfs_lookup_cmd_s;58 struct vfs_lookup_rsp_s;59 50 60 51 struct mapper_s; … … 63 54 struct vseg_s; 64 55 struct page_s; 65 66 56 67 57 /****************************************************************************************** … … 133 123 *****************************************************************************************/ 134 124 135 /* this enum define the VFS inode types values */ 136 /* WARNING : this enum must be kept consistent with macros in <shared_stat.h> file */ 125 /* this enum define the VFS inode types values */ 126 /* WARNING : this enum must be kept consistent with macros in <shared_stat.h> file */ 127 /* and with types in <shared_dirent.h> file. */ 137 128 138 129 typedef enum … … 144 135 INODE_TYPE_SOCK = 4, /*! POSIX socket */ 145 136 INODE_TYPE_DEV = 5, /*! character device */ 146 INODE_TYPE_SYML = 6, /*! symbolic link */ 137 INODE_TYPE_BLK = 6, /*! block device */ 138 INODE_TYPE_SYML = 7, /*! symbolic link */ 147 139 } 148 140 vfs_inode_type_t; … … 184 176 #define VFS_ISUID 0x0004000 185 177 #define VFS_ISGID 0x0002000 186 define VFS_ISVTX 0x0001000178 #define VFS_ISVTX 0x0001000 187 179 188 180 #define VFS_IRWXU 0x0000700 … … 316 308 * This function allocates memory from local cluster for an inode descriptor and the 317 309 * associated mapper. It initialise these descriptors from arguments values. 318 * If the client thread is not running in the cluster containing this inode, 319 * it must use the rpc_vfs_inode_create_client() function. 310 * It must called by a local thread. Use the RPC_INODE_CREATE if client thread is remote. 320 311 ****************************************************************************************** 321 312 * @ fs_type : file system type. … … 407 398 408 399 400 409 401 /****************************************************************************************** 410 402 * These low-level functions access / modify a VFS dentry descriptor … … 414 406 * This function allocates memory from local cluster for a dentry descriptor, 415 407 * initialises it from arguments values, and returns the extended pointer on dentry. 416 * If the client thread is not running in the target cluster for this inode, 417 * it must use the rpc_dentry_create_client() function. 408 * It must called by a local thread. Use the RPC_DENTRY_CREATE if client thread is remote. 418 409 ****************************************************************************************** 419 410 * @ fs_type : file system type. … … 548 539 * 549 540 * [Implementation] 550 * As there are cross-references between the inode and the associated dentry, this551 * function implementa three steps scenario :541 * As there are cross-references between inode and dentry, this function implements 542 * a three steps scenario : 552 543 * 1) The dentry descriptor is created in the cluster containing the existing <parent_xp> 553 * inode, and is only partially initialized : "fs_type", "name", "parent_xp" fields.544 * inode, and partially initialized, using the RPC_VFS_CREATE DENTRY if required. 554 545 * 2) The inode and its associated mapper are created in cluster identified by <child_cxy>, 555 * and initialised. The new inode and the parent inode can have different FS types. 556 * 3) The "child_xp" field in dentry (pointing on the created inode) is updated, 557 * and the refcount is incremented for both the inode and the dentry. 546 * and partially initialised, using the RPC_VFS_CREATE_INODE if required. 547 * The new inode and the parent inode can have different FS types. 548 * 3) The pointers between the parent inode, the new dentry, and the child inode 549 * are updated, using remote accesses. 558 550 ****************************************************************************************** 559 551 * @ child_inode_cxy : [in] target cluster for child inode. … … 612 604 613 605 /****************************************************************************************** 606 * This function is called by the vfs_mkdir() function to create the two special dentries 607 * <.> and <..> in a new directory identified by the <child_xp> argument. The parent 608 * directory inode is defined by the <parent_xp> argument. 609 * The two dentries are introduced in the Inode Tree. They are also introduced in the 610 * in the child directory mapper, and the IOC device is updated. 611 ****************************************************************************************** 612 * @ child_xp : extended pointer on new directory inode. 613 * @ parent_xp : extended pointer on parent directory inode. 614 * @ return 0 if success / -1 if failure. 615 *****************************************************************************************/ 616 error_t vfs_add_special_dentries( xptr_t child_xp, 617 xptr_t parent_xp ); 618 619 /****************************************************************************************** 614 620 * This recursive function diplays a complete inode/dentry sub-tree. 615 621 * Any inode can be selected as the sub-tree root. 616 * TODOthis function is not protected against a concurrent inode/dentry removal...622 * WARNING : this function is not protected against a concurrent inode/dentry removal... 617 623 ****************************************************************************************** 618 624 * @ inode_xp : extended pointer on sub-tree root inode. … … 809 815 810 816 /****************************************************************************************** 811 * This function returns, in the structure pointed by the <k_dirent> kernel pointer,812 * various infos on the directory entry currently pointed by the <file_xp> file descriptor.813 * TODO not implemented yet...814 ******************************************************************************************815 * @ file_xp : extended pointer on the file descriptor of the searched directory .816 * @ k_dirent : local pointer on the dirent structure in kernel space.817 * @ returns 0 if success / -1 if error.818 *****************************************************************************************/819 error_t vfs_readdir( xptr_t file_xp,820 struct dirent * k_dirent );821 822 /******************************************************************************************823 817 * This function creates a new directory as defined by the <root_xp> & <path> arguments. 824 818 * TODO not implemented yet... … … 880 874 * The directory inode descriptor and the dentry descriptor are in the same cluster. 881 875 * Depending on the file system type, it calls the proper, FS specific function. 882 * It ulsopdates the dentry descriptor and/or the inode descriptor extensions876 * It also updates the dentry descriptor and/or the inode descriptor extensions 883 877 * as required by the specific file system type. 884 878 * Finally, it synchronously updates the parent directory on IOC device. 885 879 * 886 880 * It must be executed by a thread running in the cluster containing the parent directory. 887 * It can be the RPC_VFS_ VS_ADD_DENTRY. This function does NOT take any lock.881 * It can be the RPC_VFS_FS_ADD_DENTRY. This function does NOT take any lock. 888 882 ****************************************************************************************** 889 883 * @ parent : local pointer on parent (directory) inode. -
trunk/kernel/kern/cluster.h
r583 r611 264 264 /****************************************************************************************** 265 265 * This function returns a pointer on the local process descriptor from the PID. 266 * It uses the RPC267 * to create a local process descriptor copy if it does not exist yet.268 266 ****************************************************************************************** 269 267 * @ pid : searched process identifier. -
trunk/kernel/kern/kernel_init.c
r610 r611 167 167 "PROCESS_FDARRAY", // 27 168 168 "FATFS_FREE", // 28 169 170 "PROCESS_THTBL", // 29 171 172 "MAPPER_STATE", // 30 173 "VFS_SIZE", // 31 174 "VFS_FILE", // 32 175 "VMM_VSL", // 33 176 "VMM_GPT", // 34 177 "VFS_MAIN", // 35 169 "PROCESS_DIR", // 29 170 171 "PROCESS_THTBL", // 30 172 173 "MAPPER_STATE", // 31 174 "VFS_SIZE", // 32 175 "VFS_FILE", // 33 176 "VMM_VSL", // 34 177 "VMM_GPT", // 35 178 "VFS_MAIN", // 36 178 179 }; 179 180 -
trunk/kernel/kern/process.c
r610 r611 274 274 remote_queuelock_init( XPTR( local_cxy , &process->children_lock ), LOCK_PROCESS_CHILDREN ); 275 275 276 // reset semaphore / mutex / barrier / condvar list roots 276 // reset semaphore / mutex / barrier / condvar list roots and lock 277 277 xlist_root_init( XPTR( local_cxy , &process->sem_root ) ); 278 278 xlist_root_init( XPTR( local_cxy , &process->mutex_root ) ); … … 280 280 xlist_root_init( XPTR( local_cxy , &process->condvar_root ) ); 281 281 remote_queuelock_init( XPTR( local_cxy , &process->sync_lock ), LOCK_PROCESS_USERSYNC ); 282 283 // reset open directories root and lock 284 xlist_root_init( XPTR( local_cxy , &process->dir_root ) ); 285 remote_queuelock_init( XPTR( local_cxy , &process->dir_lock ), LOCK_PROCESS_DIR ); 282 286 283 287 // register new process in the local cluster manager pref_tbl[] … … 546 550 thread_block( client_xp , THREAD_BLOCKED_RPC ); 547 551 548 // take the lock protecting process copies549 remote_queuelock_acquire( lock_xp );550 551 552 // initialize shared RPC descriptor 552 553 rpc.responses = 0; … … 555 556 rpc.thread = client; 556 557 rpc.lid = client->core->lid; 557 rpc.args[0] = type; 558 rpc.args[1] = pid; 558 rpc.args[0] = pid; 559 rpc.args[1] = type; 560 561 // take the lock protecting process copies 562 remote_queuelock_acquire( lock_xp ); 559 563 560 564 // scan list of process copies 561 // to send RPCs to remote copies562 565 XLIST_FOREACH( root_xp , iter_xp ) 563 566 { -
trunk/kernel/kern/process.h
r610 r611 60 60 ********************************************************************************************/ 61 61 62 typedef enum process_sigactions62 typedef enum 63 63 { 64 64 BLOCK_ALL_THREADS = 0x11, 65 65 UNBLOCK_ALL_THREADS = 0x22, 66 66 DELETE_ALL_THREADS = 0x33, 67 } process_sigactions_t; 67 } 68 process_sigactions_t; 68 69 69 70 /********************************************************************************************* … … 145 146 146 147 struct thread_s * th_tbl[CONFIG_THREADS_MAX_PER_CLUSTER]; /*! local threads */ 148 147 149 uint32_t th_nr; /*! number of threads in this cluster */ 148 150 rwlock_t th_lock; /*! lock protecting th_tbl[] i */ 149 151 150 xlist_entry_t sem_root; /*! root of the user defined semaphore list*/152 xlist_entry_t sem_root; /*! root of the user defined semaphore list */ 151 153 xlist_entry_t mutex_root; /*! root of the user defined mutex list */ 152 154 xlist_entry_t barrier_root; /*! root of the user defined barrier list */ 153 155 xlist_entry_t condvar_root; /*! root of the user defined condvar list */ 154 156 remote_queuelock_t sync_lock; /*! lock protecting user defined synchro lists */ 157 158 xlist_entry_t dir_root; /*! root of the user defined DIR list */ 159 remote_queuelock_t dir_lock; /*! lock protexting user defined DIR list */ 155 160 156 161 uint32_t term_state; /*! termination status (flags & exit status) */ -
trunk/kernel/kern/rpc.c
r610 r611 77 77 &rpc_undefined, // 24 unused slot 78 78 &rpc_mapper_handle_miss_server, // 25 79 &rpc_ undefined, // 26 unused slot79 &rpc_vmm_delete_vseg_server, // 26 80 80 &rpc_vmm_create_vseg_server, // 27 81 81 &rpc_vmm_set_cow_server, // 28 … … 113 113 "undefined", // 24 114 114 "MAPPER_HANDLE_MISS", // 25 115 " undefined",// 26115 "VMM_DELETE_VSEG", // 26 116 116 "VMM_CREATE_VSEG", // 27 117 117 "VMM_SET_COW", // 28 … … 283 283 bool_t blocking; // blocking RPC when true 284 284 remote_fifo_t * rpc_fifo; // local pointer on RPC fifo 285 uint32_t count; // current number of expected responses 285 286 286 287 // makes RPC thread not preemptable … … 302 303 uint32_t cycle = (uint32_t)hal_get_cycles(); 303 304 if( DEBUG_RPC_SERVER_GENERIC < cycle ) 304 printk("\n[%s] RPC thread %xon core[%d] takes RPC_FIFO ownership / cycle %d\n",305 __FUNCTION__, server_ptr-> trdid, server_core_lid, cycle );305 printk("\n[%s] RPC thread[%x,%x] on core[%d] takes RPC_FIFO ownership / cycle %d\n", 306 __FUNCTION__, server_ptr->process->pid, server_ptr->trdid, server_core_lid, cycle ); 306 307 #endif 307 308 // try to consume one RPC request … … 326 327 uint32_t items = remote_fifo_items( XPTR( local_cxy , rpc_fifo ) ); 327 328 if( DEBUG_RPC_SERVER_GENERIC < cycle ) 328 printk("\n[%s] RPC thread %xgot rpc %s / client_cxy %x / items %d / cycle %d\n",329 __FUNCTION__, server_ptr-> trdid, rpc_str[index], desc_cxy, items, cycle );329 printk("\n[%s] RPC thread[%x,%x] got rpc %s / client_cxy %x / items %d / cycle %d\n", 330 __FUNCTION__, server_ptr->process->pid, server_ptr->trdid, rpc_str[index], desc_cxy, items, cycle ); 330 331 #endif 331 332 // register client thread in RPC thread descriptor … … 338 339 cycle = (uint32_t)hal_get_cycles(); 339 340 if( DEBUG_RPC_SERVER_GENERIC < cycle ) 340 printk("\n[%s] RPC thread %x completes rpc %s / client_cxy %x / cycle %d\n", 341 __FUNCTION__, server_ptr->trdid, rpc_str[index], desc_cxy, cycle ); 342 #endif 343 // decrement response counter in RPC descriptor if blocking RPC 344 if( blocking ) 341 printk("\n[%s] RPC thread[%x,%x] completes rpc %s / client_cxy %x / cycle %d\n", 342 __FUNCTION__, server_ptr->process->pid, server_ptr->trdid, rpc_str[index], desc_cxy, cycle ); 343 #endif 344 // decrement expected responses counter in RPC descriptor 345 count = hal_remote_atomic_add( XPTR( desc_cxy, &desc_ptr->responses ), -1 ); 346 347 // decrement response counter in RPC descriptor if last response 348 if( count == 1 ) 345 349 { 346 // decrement responses counter in RPC descriptor347 hal_remote_atomic_add( XPTR( desc_cxy, &desc_ptr->responses ), -1 );348 349 350 // get client thread pointer and client core lid from RPC descriptor 350 351 client_ptr = hal_remote_lpt( XPTR( desc_cxy , &desc_ptr->thread ) ); … … 359 360 cycle = (uint32_t)hal_get_cycles(); 360 361 if( DEBUG_RPC_SERVER_GENERIC < cycle ) 361 printk("\n[%s] RPC thread %x unblocked client thread %x / cycle %d\n", 362 __FUNCTION__, server_ptr->trdid, client_ptr->trdid, cycle ); 362 printk("\n[%s] RPC thread[%x,%x] unblocked client thread[%x,%x] / cycle %d\n", 363 __FUNCTION__, server_ptr->process->pid, server_ptr->trdid, 364 client_ptr->process->pid, client_ptr->trdid, cycle ); 363 365 #endif 364 366 // send IPI to client core 365 367 dev_pic_send_ipi( desc_cxy , client_core_lid ); 366 367 } // end if blocking RPC 368 } 368 369 } // end RPC handling if fifo non empty 369 370 } // end if RPC_fIFO ownership successfully taken and released … … 376 377 uint32_t cycle = (uint32_t)hal_get_cycles(); 377 378 if( DEBUG_RPC_SERVER_GENERIC < cycle ) 378 printk("\n[%s] RPC thread %xsuicides / cycle %d\n",379 __FUNCTION__, server_ptr-> trdid, cycle );379 printk("\n[%s] RPC thread[%x,%x] suicides / cycle %d\n", 380 __FUNCTION__, server_ptr->process->pid, server_ptr->trdid, cycle ); 380 381 #endif 381 382 // update RPC threads counter … … 395 396 uint32_t cycle = (uint32_t)hal_get_cycles(); 396 397 if( DEBUG_RPC_SERVER_GENERIC < cycle ) 397 printk("\n[%s] RPC thread %xblock IDLE & deschedules / cycle %d\n",398 __FUNCTION__, server_ptr-> trdid, cycle );398 printk("\n[%s] RPC thread[%x,%x] block IDLE & deschedules / cycle %d\n", 399 __FUNCTION__, server_ptr->process->pid, server_ptr->trdid, cycle ); 399 400 #endif 400 401 // RPC thread blocks on IDLE … … 425 426 #endif 426 427 427 assert( (cxy != local_cxy) , " targetcluster is not remote\n");428 assert( (cxy != local_cxy) , "server cluster is not remote\n"); 428 429 429 430 // initialise RPC descriptor header … … 498 499 #endif 499 500 500 assert( (cxy != local_cxy) , " targetcluster is not remote\n");501 assert( (cxy != local_cxy) , "server cluster is not remote\n"); 501 502 502 503 // initialise RPC descriptor header … … 576 577 #endif 577 578 578 assert( (cxy != local_cxy) , " targetcluster is not remote\n");579 assert( (cxy != local_cxy) , "server cluster is not remote\n"); 579 580 580 581 // initialise RPC descriptor header … … 677 678 #endif 678 679 679 assert( (cxy != local_cxy) , " targetcluster is not remote\n");680 assert( (cxy != local_cxy) , "server cluster is not remote\n"); 680 681 681 682 // initialise RPC descriptor header … … 784 785 #endif 785 786 786 assert( (cxy != local_cxy) , " targetcluster is not remote\n");787 assert( (cxy != local_cxy) , "server cluster is not remote\n"); 787 788 788 789 // initialise RPC descriptor header … … 862 863 863 864 ///////////////////////////////////////////////////////////////////////////////////////// 864 // [9] Marshaling functions attached to RPC_PROCESS_SIGACTION (multicast /non blocking)865 // [9] Marshaling functions attached to RPC_PROCESS_SIGACTION (non blocking) 865 866 ///////////////////////////////////////////////////////////////////////////////////////// 866 867 … … 869 870 rpc_desc_t * rpc ) 870 871 { 871 872 872 #if DEBUG_RPC_PROCESS_SIGACTION 873 uint32_t cycle = (uint32_t)hal_get_cycles(); 874 uint32_t action = rpc->args[0]; 875 pid_t pid = rpc->args[1]; 873 uint32_t cycle = (uint32_t)hal_get_cycles(); 876 874 thread_t * this = CURRENT_THREAD; 877 875 if( DEBUG_RPC_PROCESS_SIGACTION < cycle ) 878 printk("\n[%s] thread[%x,%x] enter to request %s of process %x in cluster %x/ cycle %d\n",879 __FUNCTION__, this->process->pid, this->trdid, process_action_str(action), pid, cxy, cycle );880 #endif 881 882 // check some RPCarguments883 884 876 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 877 __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); 878 #endif 879 880 // check RPC "index" and "blocking" arguments 881 assert( (rpc->blocking == false) , "must be non-blocking\n"); 882 assert( (rpc->index == RPC_PROCESS_SIGACTION ) , "bad RPC index\n" ); 885 883 886 884 // register RPC request in remote RPC fifo and return … … 890 888 cycle = (uint32_t)hal_get_cycles(); 891 889 if( DEBUG_RPC_PROCESS_SIGACTION < cycle ) 892 printk("\n[%s] thread[%x,%x] requested %s of process %x in cluster %x / cycle %d\n", 893 __FUNCTION__, this->process->pid, this->trdid, process_action_str(action), pid, cxy, cycle ); 894 #endif 895 890 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 891 __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); 892 #endif 896 893 } // end rpc_process_sigaction_client() 897 894 … … 899 896 void rpc_process_sigaction_server( xptr_t xp ) 900 897 { 901 pid_t pid; // target process identifier 902 process_t * process; // pointer on local target process descriptor 903 uint32_t action; // sigaction index 904 thread_t * client_ptr; // pointer on client thread in client cluster 905 xptr_t client_xp; // extended pointer client thread 906 cxy_t client_cxy; // client cluster identifier 907 rpc_desc_t * rpc; // pointer on rpc descriptor in client cluster 908 xptr_t count_xp; // extended pointer on responses counter 909 uint32_t count_value; // responses counter value 910 lid_t client_lid; // client core local index 898 #if DEBUG_RPC_PROCESS_SIGACTION 899 uint32_t cycle = (uint32_t)hal_get_cycles(); 900 thread_t * this = CURRENT_THREAD; 901 if( DEBUG_RPC_PROCESS_SIGACTION < cycle ) 902 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 903 __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); 904 #endif 911 905 912 906 // get client cluster identifier and pointer on RPC descriptor 913 c lient_cxy = GET_CXY( xp );914 rpc 907 cxy_t client_cxy = GET_CXY( xp ); 908 rpc_desc_t * desc = GET_PTR( xp ); 915 909 916 910 // get arguments from RPC descriptor 917 action = (uint32_t)hal_remote_l64( XPTR(client_cxy , &rpc->args[0]) ); 918 pid = (pid_t) hal_remote_l64( XPTR(client_cxy , &rpc->args[1]) ); 919 920 #if DEBUG_RPC_PROCESS_SIGACTION 921 uint32_t cycle = (uint32_t)hal_get_cycles(); 922 thread_t * this = CURRENT_THREAD; 923 if( DEBUG_RPC_PROCESS_SIGACTION < cycle ) 924 printk("\n[%s] thread[%x,%x] enter to %s process %x in cluster %x / cycle %d\n", 925 __FUNCTION__, this->process->pid, this->trdid, 926 process_action_str( action ), pid, local_cxy, cycle ); 927 #endif 911 pid_t pid = (pid_t) hal_remote_l64( XPTR(client_cxy , &desc->args[0]) ); 912 uint32_t action = (uint32_t)hal_remote_l64( XPTR(client_cxy , &desc->args[1]) ); 928 913 929 914 // get client thread pointers 930 client_ptr = (thread_t *)hal_remote_lpt( XPTR( client_cxy , &rpc->thread ) );931 client_xp = XPTR( client_cxy , client_ptr );915 thread_t * client_ptr = hal_remote_lpt( XPTR( client_cxy , &desc->thread ) ); 916 xptr_t client_xp = XPTR( client_cxy , client_ptr ); 932 917 933 918 // get local process descriptor 934 process = cluster_get_local_process_from_pid( pid );919 process_t * process = cluster_get_local_process_from_pid( pid ); 935 920 936 921 // call relevant kernel function … … 939 924 else if ( action == UNBLOCK_ALL_THREADS ) process_unblock_threads( process ); 940 925 941 // build extended pointer on response counter in RPC942 count_xp = XPTR( client_cxy , &rpc->responses );943 944 // decrement the responses counter in RPC descriptor,945 count_value = hal_remote_atomic_add( count_xp , -1 );946 947 // unblock the client thread only if it is the last response.948 if( count_value == 1 )949 {950 // get client core lid951 client_lid = (lid_t) hal_remote_l32 ( XPTR( client_cxy , &rpc->lid ) );952 953 // unblock client thread954 thread_unblock( client_xp , THREAD_BLOCKED_RPC );955 956 // send an IPI to client core957 // dev_pic_send_ipi( client_cxy , client_lid );958 }959 960 926 #if DEBUG_RPC_PROCESS_SIGACTION 961 927 cycle = (uint32_t)hal_get_cycles(); 962 928 if( DEBUG_RPC_PROCESS_SIGACTION < cycle ) 963 printk("\n[%s] thread[%x,%x] exit after %s process %x in cluster %x / cycle %d\n", 964 __FUNCTION__, this->process->pid, this->trdid, 965 process_action_str( action ), pid, local_cxy, cycle ); 966 #endif 967 929 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 930 __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); 931 #endif 968 932 } // end rpc_process_sigaction_server() 969 933 … … 991 955 #endif 992 956 993 assert( (cxy != local_cxy) , " targetcluster is not remote\n");957 assert( (cxy != local_cxy) , "server cluster is not remote\n"); 994 958 995 959 // initialise RPC descriptor header … … 1091 1055 #endif 1092 1056 1093 assert( (cxy != local_cxy) , " targetcluster is not remote\n");1057 assert( (cxy != local_cxy) , "server cluster is not remote\n"); 1094 1058 1095 1059 // initialise RPC descriptor header … … 1163 1127 #endif 1164 1128 1165 assert( (cxy != local_cxy) , " targetcluster is not remote\n");1129 assert( (cxy != local_cxy) , "server cluster is not remote\n"); 1166 1130 1167 1131 // initialise RPC descriptor header … … 1251 1215 #endif 1252 1216 1253 assert( (cxy != local_cxy) , " targetcluster is not remote\n");1217 assert( (cxy != local_cxy) , "server cluster is not remote\n"); 1254 1218 1255 1219 // initialise RPC descriptor header … … 1324 1288 #endif 1325 1289 1326 assert( (cxy != local_cxy) , " targetcluster is not remote\n");1290 assert( (cxy != local_cxy) , "server cluster is not remote\n"); 1327 1291 1328 1292 // initialise RPC descriptor header … … 1408 1372 #endif 1409 1373 1410 assert( (cxy != local_cxy) , " targetcluster is not remote\n");1374 assert( (cxy != local_cxy) , "server cluster is not remote\n"); 1411 1375 1412 1376 // initialise RPC descriptor header … … 1480 1444 #endif 1481 1445 1482 assert( (cxy != local_cxy) , " targetcluster is not remote\n");1446 assert( (cxy != local_cxy) , "server cluster is not remote\n"); 1483 1447 1484 1448 // initialise RPC descriptor header … … 1569 1533 #endif 1570 1534 1571 assert( (cxy != local_cxy) , " targetcluster is not remote\n");1535 assert( (cxy != local_cxy) , "server cluster is not remote\n"); 1572 1536 1573 1537 // initialise RPC descriptor header … … 1649 1613 #endif 1650 1614 1651 assert( (cxy != local_cxy) , " targetcluster is not remote\n");1615 assert( (cxy != local_cxy) , "server cluster is not remote\n"); 1652 1616 1653 1617 // initialise RPC descriptor header … … 1729 1693 #endif 1730 1694 1731 assert( (cxy != local_cxy) , " targetcluster is not remote\n");1695 assert( (cxy != local_cxy) , "server cluster is not remote\n"); 1732 1696 1733 1697 // initialise RPC descriptor header … … 1808 1772 #endif 1809 1773 1810 assert( (cxy != local_cxy) , " targetcluster is not remote\n");1774 assert( (cxy != local_cxy) , "server cluster is not remote\n"); 1811 1775 1812 1776 // initialise RPC descriptor header … … 1896 1860 #endif 1897 1861 1898 assert( (cxy != local_cxy) , " targetcluster is not remote\n");1862 assert( (cxy != local_cxy) , "server cluster is not remote\n"); 1899 1863 1900 1864 // initialise RPC descriptor header … … 1975 1939 #endif 1976 1940 1977 assert( (cxy != local_cxy) , " targetcluster is not remote\n");1941 assert( (cxy != local_cxy) , "server cluster is not remote\n"); 1978 1942 1979 1943 // initialise RPC descriptor header … … 2053 2017 #endif 2054 2018 2055 assert( (cxy != local_cxy) , " targetcluster is not remote\n");2019 assert( (cxy != local_cxy) , "server cluster is not remote\n"); 2056 2020 2057 2021 // initialise RPC descriptor header … … 2125 2089 { 2126 2090 #if DEBUG_RPC_MAPPER_HANDLE_MISS 2091 thread_t * this = CURRENT_THREAD; 2127 2092 uint32_t cycle = (uint32_t)hal_get_cycles(); 2128 2093 if( cycle > DEBUG_RPC_MAPPER_HANDLE_MISS ) … … 2131 2096 #endif 2132 2097 2133 assert( (cxy != local_cxy) , " targetcluster is not remote\n");2098 assert( (cxy != local_cxy) , "server cluster is not remote\n"); 2134 2099 2135 2100 // initialise RPC descriptor header … … 2162 2127 { 2163 2128 #if DEBUG_RPC_MAPPER_HANDLE_MISS 2129 thread_t * this = CURRENT_THREAD; 2164 2130 uint32_t cycle = (uint32_t)hal_get_cycles(); 2165 2131 if( cycle > DEBUG_RPC_MAPPER_HANDLE_MISS ) … … 2199 2165 2200 2166 ///////////////////////////////////////////////////////////////////////////////////////// 2201 // [26] undefined slot 2202 ///////////////////////////////////////////////////////////////////////////////////////// 2167 // [26] Marshaling functions attached to RPC_VMM_DELETE_VSEG (parallel / non blocking) 2168 ///////////////////////////////////////////////////////////////////////////////////////// 2169 2170 ////////////////////////////////////////////////// 2171 void rpc_vmm_delete_vseg_client( cxy_t cxy, 2172 rpc_desc_t * rpc ) 2173 { 2174 #if DEBUG_RPC_VMM_DELETE_VSEG 2175 thread_t * this = CURRENT_THREAD; 2176 uint32_t cycle = (uint32_t)hal_get_cycles(); 2177 if( cycle > DEBUG_RPC_VMM_DELETE_VSEG ) 2178 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 2179 __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); 2180 #endif 2181 2182 // check RPC "index" and "blocking" arguments 2183 assert( (rpc->blocking == false) , "must be non-blocking\n"); 2184 assert( (rpc->index == RPC_VMM_DELETE_VSEG ) , "bad RPC index\n" ); 2185 2186 // register RPC request in remote RPC fifo 2187 rpc_send( cxy , rpc ); 2188 2189 #if DEBUG_RPC_VMM_DELETE_VSEG 2190 cycle = (uint32_t)hal_get_cycles(); 2191 if( cycle > DEBUG_RPC_VMM_DELETE_VSEG ) 2192 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 2193 __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); 2194 #endif 2195 } 2196 2197 //////////////////////////////////////////// 2198 void rpc_vmm_delete_vseg_server( xptr_t xp ) 2199 { 2200 #if DEBUG_RPC_VMM_DELETE_VSEG 2201 uint32_t cycle = (uint32_t)hal_get_cycles(); 2202 thread_t * this = CURRENT_THREAD; 2203 if( DEBUG_RPC_VMM_DELETE_VSEG < cycle ) 2204 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 2205 __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); 2206 #endif 2207 2208 // get client cluster identifier and pointer on RPC descriptor 2209 cxy_t client_cxy = GET_CXY( xp ); 2210 rpc_desc_t * desc = GET_PTR( xp ); 2211 2212 // get arguments from RPC descriptor 2213 pid_t pid = (pid_t) hal_remote_l64( XPTR(client_cxy , &desc->args[0]) ); 2214 intptr_t vaddr = (intptr_t)hal_remote_l64( XPTR(client_cxy , &desc->args[1]) ); 2215 2216 // call relevant kernel function 2217 vmm_delete_vseg( pid , vaddr ); 2218 2219 #if DEBUG_RPC_VMM_DELETE_VSEG 2220 cycle = (uint32_t)hal_get_cycles(); 2221 if( DEBUG_RPC_VMM_DELETE_VSEG < cycle ) 2222 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 2223 __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle ); 2224 #endif 2225 } 2203 2226 2204 2227 ///////////////////////////////////////////////////////////////////////////////////////// … … 2218 2241 struct vseg_s ** vseg ) 2219 2242 { 2220 assert( (cxy != local_cxy) , "target cluster is not remote\n"); 2243 #if DEBUG_RPC_VMM_CREATE_VSEG 2244 thread_t * this = CURRENT_THREAD; 2245 uint32_t cycle = (uint32_t)hal_get_cycles(); 2246 if( cycle > DEBUG_RPC_VMM_CREATE_VSEG ) 2247 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 2248 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2249 #endif 2250 2251 assert( (cxy != local_cxy) , "server cluster is not remote\n"); 2221 2252 2222 2253 // initialise RPC descriptor header … … 2242 2273 *vseg = (vseg_t *)(intptr_t)rpc.args[8]; 2243 2274 2275 #if DEBUG_RPC_VMM_CREATE_VSEG 2276 cycle = (uint32_t)hal_get_cycles(); 2277 if( cycle > DEBUG_RPC_VMM_CREATE_VSEG ) 2278 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 2279 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2280 #endif 2244 2281 } 2245 2282 … … 2247 2284 void rpc_vmm_create_vseg_server( xptr_t xp ) 2248 2285 { 2286 #if DEBUG_RPC_VMM_CREATE_VSEG 2287 thread_t * this = CURRENT_THREAD; 2288 uint32_t cycle = (uint32_t)hal_get_cycles(); 2289 if( cycle > DEBUG_RPC_VMM_CREATE_VSEG ) 2290 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n", 2291 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2292 #endif 2293 2249 2294 // get client cluster identifier and pointer on RPC descriptor 2250 2295 cxy_t cxy = GET_CXY( xp ); … … 2274 2319 hal_remote_s64( XPTR( cxy , &desc->args[8] ) , (uint64_t)(intptr_t)vseg ); 2275 2320 2321 #if DEBUG_RPC_VMM_CREATE_VSEG 2322 cycle = (uint32_t)hal_get_cycles(); 2323 if( cycle > DEBUG_RPC_VMM_CREATE_VSEG ) 2324 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n", 2325 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle ); 2326 #endif 2276 2327 } 2277 2328 … … 2284 2335 process_t * process ) 2285 2336 { 2286 assert( (cxy != local_cxy) , " targetcluster is not remote\n");2337 assert( (cxy != local_cxy) , "server cluster is not remote\n"); 2287 2338 2288 2339 // initialise RPC descriptor header … … 2326 2377 bool_t detailed ) 2327 2378 { 2328 assert( (cxy != local_cxy) , " targetcluster is not remote\n");2379 assert( (cxy != local_cxy) , "server cluster is not remote\n"); 2329 2380 2330 2381 // initialise RPC descriptor header -
trunk/kernel/kern/rpc.h
r610 r611 54 54 /*********************************************************************************** 55 55 * This enum defines all RPC indexes. 56 * It must be consistent with the rpc_server[] array defined in in the rpc.c file.56 * It must be consistent with the rpc_server[] arrays defined in in the rpc.c file. 57 57 **********************************************************************************/ 58 58 … … 68 68 RPC_THREAD_KERNEL_CREATE = 7, 69 69 RPC_UNDEFINED_8 = 8, 70 RPC_PROCESS_SIGACTION = 9, 70 RPC_PROCESS_SIGACTION = 9, // non blocking 71 71 72 72 RPC_VFS_INODE_CREATE = 10, … … 87 87 RPC_UNDEFINED_24 = 24, 88 88 RPC_MAPPER_HANDLE_MISS = 25, 89 RPC_ UNDEFINED_26 = 26,89 RPC_VMM_DELETE_VSEG = 26, // non blocking 90 90 RPC_VMM_CREATE_VSEG = 27, 91 91 RPC_VMM_SET_COW = 28, … … 281 281 282 282 /*********************************************************************************** 283 * [9] The RPC_PROCESS_SIGACTION allows a thread running in any cluster 284 * to request a cluster identified by the <cxy> argument (local or remote) 285 * to execute a given sigaction for a given cluster. The <action_type> and 286 * the <pid> arguments are defined in the shared RPC descriptor, that must be 287 * initialised by the client thread. 283 * [9] The non blocking RPC_PROCESS_SIGACTION allows any client thread running in 284 * any cluster to send parallel RPC requests to one or several servers (that can be 285 * local or remote), to execute a given sigaction, defined by the <action_type> 286 * argument[1], for a given process identified by the <pid> argument[0]. 288 287 * 289 * WARNING : It is implemented as a NON BLOCKING multicast RPC, that can be sent 290 * in parallel to all process copies. The various RPC server threads atomically 291 * decrement the <response> field in the shared RPC descriptor. 292 * The last server thread unblock the client thread that blocked (after sending 293 * all RPC requests) in the process_sigaction() function. 288 * WARNING : It is implemented as a NON BLOCKING RPC, that can be sent in parallel 289 * to several servers. The RPC descriptor, containing the <action_type> and <pid> 290 * arguments, as well as the RPC <index>, <blocked>, and <response> fields, must 291 * be allocated and initialised by the calling function itself. 292 * Each RPC server thread atomically decrements the <response> field in this 293 * shared RPC descriptor. The last server thread unblock the client thread, 294 * that blocked only after sending all parallel RPC requests to all servers. 294 295 *********************************************************************************** 295 296 * @ cxy : server cluster identifier. 296 * @ rpc : pointer on ishared RPC descriptor initialized by the client thread.297 * @ rpc : pointer on shared RPC descriptor initialized by the client thread. 297 298 **********************************************************************************/ 298 299 void rpc_process_sigaction_client( cxy_t cxy, … … 550 551 * On the server side, this RPC call the mapper_handle_miss() function and return 551 552 * an extended pointer on the allocated page descriptor and an error status. 553 *********************************************************************************** 552 554 * @ cxy : server cluster identifier. 553 555 * @ mapper : [in] local pointer on mapper. … … 566 568 567 569 /*********************************************************************************** 568 * [26] undefined slot 569 **********************************************************************************/ 570 * [26] The non blocking RPC_VMM_DELETE_VSEG allows any client thread running in 571 * any cluster to send parallel RPC requests to one or several clusters (that can be 572 * local or remote), to delete from a given VMM, identified by the <pid> argument[0] 573 * a given vseg, identified by the <vaddr> argument[1]. 574 * 575 * WARNING : It is implemented as a NON BLOCKING RPC, that can be sent in parallel 576 * to several servers. The RPC descriptor, containing the <pid> and <vaddr> 577 * arguments, as well as the RPC <index>, <blocked>, and <response> fields, must 578 * be allocated and initialised by the calling function itself. 579 * Each RPC server thread atomically decrements the the <response> field in this 580 * shared RPC descriptor. The last server thread unblock the client thread, 581 * that blocked only after sending all paralle RPC requests to all servers. 582 *********************************************************************************** 583 * @ cxy : server cluster identifier. 584 * @ rpc : pointer on shared RPC descriptor initialized by the client thread. 585 **********************************************************************************/ 586 void rpc_vmm_delete_vseg_client( cxy_t cxy, 587 struct rpc_desc_s * rpc ); 588 589 void rpc_vmm_delete_vseg_server( xptr_t xp ); 570 590 571 591 /*********************************************************************************** -
trunk/kernel/kern/thread.c
r593 r611 326 326 { 327 327 printk("\n[ERROR] in %s : cannot create new thread\n", __FUNCTION__ ); 328 vmm_ remove_vseg( vseg);328 vmm_delete_vseg( process->pid , vseg->min ); 329 329 return ENOMEM; 330 330 } … … 348 348 { 349 349 printk("\n[ERROR] in %s : cannot initialize new thread\n", __FUNCTION__ ); 350 vmm_ remove_vseg( vseg);350 vmm_delete_vseg( process->pid , vseg->min ); 351 351 thread_release( thread ); 352 352 return EINVAL; … … 369 369 { 370 370 printk("\n[ERROR] in %s : cannot create CPU context\n", __FUNCTION__ ); 371 vmm_ remove_vseg( vseg);371 vmm_delete_vseg( process->pid , vseg->min ); 372 372 thread_release( thread ); 373 373 return ENOMEM; … … 379 379 { 380 380 printk("\n[ERROR] in %s : cannot create FPU context\n", __FUNCTION__ ); 381 vmm_ remove_vseg( vseg);381 vmm_delete_vseg( process->pid , vseg->min ); 382 382 thread_release( thread ); 383 383 return ENOMEM; … … 538 538 539 539 // register STACK vseg in local child VSL 540 vmm_ vseg_attach( &child_process->vmm , vseg );540 vmm_attach_vseg_to_vsl( &child_process->vmm , vseg ); 541 541 542 542 #if (DEBUG_THREAD_USER_FORK & 1) … … 560 560 if( error ) 561 561 { 562 vmm_vseg_detach( &child_process->vmm , vseg ); 563 vseg_free( vseg ); 562 vmm_detach_vseg_from_vsl( &child_process->vmm , vseg ); 564 563 thread_release( child_ptr ); 565 564 printk("\n[ERROR] in %s : cannot update child GPT\n", __FUNCTION__ ); -
trunk/kernel/kernel_config.h
r610 r611 128 128 #define DEBUG_QUEUELOCK_TYPE 0 // lock type (0 is undefined) 129 129 130 #define DEBUG_REMOTE_DIR 0 131 130 132 #define DEBUG_RPC_CLIENT_GENERIC 0 131 133 #define DEBUG_RPC_SERVER_GENERIC 0 … … 148 150 #define DEBUG_RPC_VFS_FILE_DESTROY 0 149 151 #define DEBUG_RPC_VFS_DEVICE_GET_DENTRY 0 152 #define DEBUG_RPC_VMM_CREATE_VSEG 0 150 153 #define DEBUG_RPC_VMM_GET_PTE 0 151 154 #define DEBUG_RPC_VMM_GET_VSEG 0 155 #define DEBUG_RPC_VMM_UNMAP_VSEG 0 152 156 153 157 #define DEBUG_RWLOCK_TYPE 0 // lock type (0 is undefined) … … 163 167 #define DEBUG_SYS_BARRIER 0 164 168 #define DEBUG_SYS_CLOSE 0 169 #define DEBUG_SYS_CLOSEDIR 0 165 170 #define DEBUG_SYS_CONDVAR 0 166 171 #define DEBUG_SYS_DISPLAY 0 … … 176 181 #define DEBUG_SYS_KILL 0 177 182 #define DEBUG_SYS_OPEN 0 178 #define DEBUG_SYS_MKDIR 2 183 #define DEBUG_SYS_OPENDIR 0 184 #define DEBUG_SYS_MKDIR 0 179 185 #define DEBUG_SYS_MMAP 0 180 186 #define DEBUG_SYS_MUNMAP 0 181 187 #define DEBUG_SYS_MUTEX 0 182 188 #define DEBUG_SYS_READ 0 189 #define DEBUG_SYS_READDIR 0 183 190 #define DEBUG_SYS_SEM 0 184 191 #define DEBUG_SYS_STAT 0 … … 207 214 208 215 #define DEBUG_VFS_ADD_CHILD 0 216 #define DEBUG_VFS_ADD_SPECIAL 1 217 #define DEBUG_VFS_CHDIR 0 209 218 #define DEBUG_VFS_CLOSE 0 210 #define DEBUG_VFS_CHDIR 0211 219 #define DEBUG_VFS_DENTRY_CREATE 0 212 220 #define DEBUG_VFS_FILE_CREATE 0 … … 215 223 #define DEBUG_VFS_INODE_LOAD_ALL 0 216 224 #define DEBUG_VFS_LINK 0 217 #define DEBUG_VFS_LOOKUP 1225 #define DEBUG_VFS_LOOKUP 0 218 226 #define DEBUG_VFS_LSEEK 0 219 #define DEBUG_VFS_MKDIR 1227 #define DEBUG_VFS_MKDIR 0 220 228 #define DEBUG_VFS_NEW_CHILD_INIT 0 221 229 #define DEBUG_VFS_OPEN 0 … … 224 232 225 233 #define DEBUG_VMM_CREATE_VSEG 0 234 #define DEBUG_VMM_DELETE_VSEG 0 226 235 #define DEBUG_VMM_DESTROY 0 227 236 #define DEBUG_VMM_FORK_COPY 0 … … 233 242 #define DEBUG_VMM_PAGE_ALLOCATE 0 234 243 #define DEBUG_VMM_SET_COW 0 235 #define DEBUG_VMM_UNMAP_VSEG 0236 244 #define DEBUG_VMM_UPDATE_PTE 0 237 245 … … 276 284 #define LOCK_PROCESS_FDARRAY 27 // remote (Q) protect array of open files in owner process 277 285 #define LOCK_FATFS_FREE 28 // remote (Q) protect the FATFS context (free clusters) 278 279 #define LOCK_PROCESS_THTBL 29 // local (RW) protect local array of threads in a process 280 281 #define LOCK_MAPPER_STATE 30 // remote (RW) protect mapper state 282 #define LOCK_VFS_SIZE 31 // remote (RW) protect inode state and associated mapper 283 #define LOCK_VFS_FILE 32 // remote (RW) protect file descriptor state 284 #define LOCK_VMM_VSL 33 // remote (RW) protect VSL (local list of vsegs) 285 #define LOCK_VMM_GPT 34 // remote (RW) protect GPT (local page table) 286 #define LOCK_VFS_MAIN 35 // remote (RW) protect vfs traversal (in root inode) 286 #define LOCK_PROCESS_DIR 29 // remote (Q) protect xlist of open directories in process 287 288 #define LOCK_PROCESS_THTBL 30 // local (RW) protect local array of threads in a process 289 290 #define LOCK_MAPPER_STATE 31 // remote (RW) protect mapper state 291 #define LOCK_VFS_SIZE 32 // remote (RW) protect inode state and associated mapper 292 #define LOCK_VFS_FILE 33 // remote (RW) protect file descriptor state 293 #define LOCK_VMM_VSL 34 // remote (RW) protect VSL (local list of vsegs) 294 #define LOCK_VMM_GPT 35 // remote (RW) protect GPT (local page table) 295 #define LOCK_VFS_MAIN 36 // remote (RW) protect vfs traversal (in root inode) 287 296 288 297 … … 338 347 339 348 #define CONFIG_VFS_MAX_INODES 128 // max number of inodes per cluster 340 #define CONFIG_VFS_MAX_NAME_LENGTH 32// dentry name max length (bytes)349 #define CONFIG_VFS_MAX_NAME_LENGTH 56 // dentry name max length (bytes) 341 350 #define CONFIG_VFS_MAX_PATH_LENGTH 256 // pathname max length (bytes) 342 351 #define CONFIG_VFS_FREE_CLUSTERS_MIN 32 // min number of free clusters 343 352 #define CONFIG_VFS_MAX_DENTRIES 63 // max number of dentries in one dir 344 353 345 354 #define CONFIG_VFS_ROOT_IS_FATFS 1 // root FS is FATFS if non zero … … 397 406 398 407 //////////////////////////////////////////////////////////////////////////////////////////// 399 // USER SPACE SEGMENTATION / all values are number of pages408 // USER SPACE SEGMENTATION / all values are numbers of pages 400 409 //////////////////////////////////////////////////////////////////////////////////////////// 401 410 -
trunk/kernel/libk/remote_mutex.c
r581 r611 137 137 #if DEBUG_MUTEX 138 138 thread_t * this = CURRENT_THREAD; 139 if( (uint32_t)hal_get_cycles() > DEBUG_ QUEUELOCK)139 if( (uint32_t)hal_get_cycles() > DEBUG_MUTEX ) 140 140 printk("\n[DBG] %s : thread %x in %x process / mutex(%x,%x)\n", 141 141 __FUNCTION__, this->trdid, this->process->pid, local_cxy, mutex_ptr ); -
trunk/kernel/libk/remote_mutex.h
r581 r611 38 38 * This user type is implemented as an unsigned long, but the value is not used by the 39 39 * kernel. ALMOS-MKH uses only the mutex virtual address as an identifier. 40 * For each user mutex, ALMOS-MKH creates a kernel "remote_mutex_t" structure, 41 * dynamically allocated in the reference cluster by the remote_mutex_create() function,42 * anddestroyed by the remote_mutex_destroy() function, using RPC if the calling thread40 * For each user mutex, ALMOS-MKH creates a kernel "remote_mutex_t" structure, allocated 41 * in the user process reference cluster by the remote_mutex_create() function, and 42 * destroyed by the remote_mutex_destroy() function, using RPC if the calling thread 43 43 * is not running in the reference cluster. 44 44 * -
trunk/kernel/libk/string.h
r457 r611 49 49 /******************************************************************************************** 50 50 * This function compares lexicographically the strind s1 and s2. 51 * characters are considered unsigned.51 * Characters are considered unsigned. 52 52 * I does not compare characters after the first NUL character. 53 53 ******************************************************************************************** … … 61 61 /******************************************************************************************** 62 62 * This function compares lexicographically the strind s1 and s2. 63 * I does not compare than <n> characters and stops after the first NUL character.63 * I does not compare more than <n> characters and stops after the first NUL character. 64 64 ******************************************************************************************** 65 65 * @ s1 : pointer on string. -
trunk/kernel/libk/xhtab.h
r610 r611 32 32 33 33 /////////////////////////////////////////////////////////////////////////////////////////// 34 // This file define a generic, embedded, remotely accessible hash table.34 // This file define a generic, embedded, remotely accessible, hash table. 35 35 // 36 36 // It can be accessed by any thread, running in any cluster. … … 39 39 // For this purpose the set of all registered items is split in several subsets. 40 40 // Each subset is organised as an embedded double linked xlists. 41 // - an item is uniquely identified by a <key>, that is a single uint32_t value. 42 // - From the <key> value, the hash table uses an item type specific xhtab_index() 41 // - an item is uniquely identified by a <key>, that is a item specific pointer, 42 // that can be a - for example - a char* defining the item "name". 43 // - From the <key> value, the hash table uses an item type specific index_from_key() 43 44 // function, to compute an <index> value, defining a subset of registered items. 44 45 // - to discriminate between items that have the same <index>, the hash table makes 45 // an associative search on the key in subset. 46 // an associative search on the key in subset, using the item type specific 47 // item_match_key() function. 46 48 // - Each registered item is a structure, that must contain an embedded xlist_entry, 47 49 // that is part of the xlist implementing the subset. 48 50 // 49 51 // For all registered items, a total order is defined by the increasing index values, 50 // and for each index value, by the position in the partial xlist.52 // and for each index value, by the position in the xlist implementing a subset. 51 53 // This order is used by the two functions xhtab_get_first() and xhtab_get_next(), that 52 54 // are used to scan all registered items. The two "current_index" and "current_xlist_xp" -
trunk/kernel/mm/kmem.c
r577 r611 40 40 #include <fatfs.h> 41 41 #include <ramfs.h> 42 #include <remote_dir.h> 42 43 #include <remote_sem.h> 43 44 #include <remote_barrier.h> … … 100 101 else if( type == KMEM_CONDVAR ) return sizeof( remote_condvar_t ); 101 102 else if( type == KMEM_MUTEX ) return sizeof( remote_mutex_t ); 103 else if( type == KMEM_DIR ) return sizeof( remote_dir_t ); 104 102 105 else if( type == KMEM_512_BYTES ) return 512; 103 106 … … 128 131 else if( type == KMEM_CONDVAR ) return "KMEM_CONDVAR"; 129 132 else if( type == KMEM_MUTEX ) return "KMEM_MUTEX"; 133 else if( type == KMEM_DIR ) return "KMEM_DIR"; 134 130 135 else if( type == KMEM_512_BYTES ) return "KMEM_512_BYTES"; 131 136 … … 144 149 145 150 #if DEBUG_KMEM 151 thread_t * this = CURRENT_THREAD; 146 152 uint32_t cycle = (uint32_t)hal_get_cycles(); 147 153 if( DEBUG_KMEM < cycle ) 148 printk("\n[ DBG] %s : thread %xenter / KCM type %s missing in cluster %x / cycle %d\n",149 __FUNCTION__, CURRENT_THREAD, kmem_type_str( type ), local_cxy, cycle );154 printk("\n[%s] thread[%x,%x] enter / KCM type %s missing in cluster %x / cycle %d\n", 155 __FUNCTION__, this->process->pid, this->trdid, kmem_type_str( type ), local_cxy, cycle ); 150 156 #endif 151 157 … … 174 180 cycle = (uint32_t)hal_get_cycles(); 175 181 if( DEBUG_KMEM < cycle ) 176 printk("\n[ DBG] %s : thread %xexit / cycle %d\n",177 __FUNCTION__, CURRENT_THREAD, cycle );182 printk("\n[%s] thread[%x,%x] exit / cycle %d\n", 183 __FUNCTION__, this->process->pid, this->trdid, cycle ); 178 184 #endif 179 185 … … 198 204 199 205 #if DEBUG_KMEM 206 thread_t * this = CURRENT_THREAD; 200 207 uint32_t cycle = (uint32_t)hal_get_cycles(); 201 208 if( DEBUG_KMEM < cycle ) 202 printk("\n[DBG] %s : thread %x enter / type %s / cluster %x / cycle %d\n", 203 __FUNCTION__, CURRENT_THREAD, kmem_type_str( type ), local_cxy, cycle ); 209 printk("\n[%s] thread [%x,%x] enter / %s / size %d / cluster %x / cycle %d\n", 210 __FUNCTION__, this->process->pid, this->trdid, 211 kmem_type_str( type ), size, local_cxy, cycle ); 204 212 #endif 205 213 … … 222 230 cycle = (uint32_t)hal_get_cycles(); 223 231 if( DEBUG_KMEM < cycle ) 224 printk("\n[DBG] %s : thread %x exit / %d page(s) allocated / ppn %x / cycle %d\n", 225 __FUNCTION__, CURRENT_THREAD, 1<<size, ppm_page2ppn(XPTR(local_cxy,ptr)), cycle ); 232 printk("\n[%s] thread[%x,%x] exit / %d page(s) allocated / ppn %x / cycle %d\n", 233 __FUNCTION__, this->process->pid, this->trdid, 234 1<<size, ppm_page2ppn(XPTR(local_cxy,ptr)), cycle ); 226 235 #endif 227 236 … … 244 253 cycle = (uint32_t)hal_get_cycles(); 245 254 if( DEBUG_KMEM < cycle ) 246 printk("\n[DBG] %s : thread %x exit / type %s allocated / base %x / size %d / cycle %d\n", 247 __FUNCTION__, CURRENT_THREAD, kmem_type_str( type ), (intptr_t)ptr, size, cycle ); 255 printk("\n[%s] thread[%x,%x] exit / type %s allocated / base %x / size %d / cycle %d\n", 256 __FUNCTION__, this->process->pid, this->trdid, 257 kmem_type_str( type ), (intptr_t)ptr, size, cycle ); 248 258 #endif 249 259 … … 286 296 cycle = (uint32_t)hal_get_cycles(); 287 297 if( DEBUG_KMEM < cycle ) 288 printk("\n[ DBG] %s : thread %xexit / type %s allocated / base %x / size %d / cycle %d\n",289 __FUNCTION__, CURRENT_THREAD, kmem_type_str(type), (intptr_t)ptr,298 printk("\n[%s] thread [%x,%x] exit / type %s allocated / base %x / size %d / cycle %d\n", 299 __FUNCTION__, this->process->pid, this->trdid, kmem_type_str(type), (intptr_t)ptr, 290 300 kmem_type_size(type), cycle ); 291 301 #endif -
trunk/kernel/mm/kmem.h
r567 r611 36 36 enum 37 37 { 38 KMEM_PAGE = 0, /*! reserved for PPM allocator*/39 KMEM_GENERIC = 1, /*! reserved for KHM allocator*/40 KMEM_KCM = 2, /*! kcm_t*/41 KMEM_VSEG = 3, /*! vseg_t*/42 KMEM_DEVICE = 4, /*! device_t*/43 KMEM_MAPPER = 5, /*! mapper_t*/44 KMEM_PROCESS = 6, /*! process_t*/45 KMEM_CPU_CTX = 7, /*! hal_cpu_context_t*/46 KMEM_FPU_CTX = 8, /*! hal_fpu_context_t*/47 KMEM_BARRIER = 9, /*! remote_barrier_t*/38 KMEM_PAGE = 0, /*! reserved for PPM allocator */ 39 KMEM_GENERIC = 1, /*! reserved for KHM allocator */ 40 KMEM_KCM = 2, /*! kcm_t */ 41 KMEM_VSEG = 3, /*! vseg_t */ 42 KMEM_DEVICE = 4, /*! device_t */ 43 KMEM_MAPPER = 5, /*! mapper_t */ 44 KMEM_PROCESS = 6, /*! process_t */ 45 KMEM_CPU_CTX = 7, /*! hal_cpu_context_t */ 46 KMEM_FPU_CTX = 8, /*! hal_fpu_context_t */ 47 KMEM_BARRIER = 9, /*! remote_barrier_t */ 48 48 49 KMEM_DEVFS_CTX = 10, /*! fatfs_inode_t*/50 KMEM_FATFS_CTX = 11, /*! fatfs_ctx_t*/51 KMEM_VFS_CTX = 12, /*! vfs_context_t*/52 KMEM_VFS_INODE = 13, /*! vfs_inode_t*/53 KMEM_VFS_DENTRY = 14, /*! vfs_dentry_t*/54 KMEM_VFS_FILE = 15, /*! vfs_file_t*/55 KMEM_SEM = 16, /*! remote_sem_t*/56 KMEM_CONDVAR = 17, /*! remote_condvar_t*/57 KMEM_MUTEX = 18, /*! remote_mutex_t*/58 KMEM_512_BYTES = 19, /*! 512 bytes aligned*/49 KMEM_DEVFS_CTX = 10, /*! fatfs_inode_t */ 50 KMEM_FATFS_CTX = 11, /*! fatfs_ctx_t */ 51 KMEM_VFS_CTX = 12, /*! vfs_context_t */ 52 KMEM_VFS_INODE = 13, /*! vfs_inode_t */ 53 KMEM_VFS_DENTRY = 14, /*! vfs_dentry_t */ 54 KMEM_VFS_FILE = 15, /*! vfs_file_t */ 55 KMEM_SEM = 16, /*! remote_sem_t */ 56 KMEM_CONDVAR = 17, /*! remote_condvar_t */ 57 KMEM_MUTEX = 18, /*! remote_mutex_t */ 58 KMEM_DIR = 19, /*! remote_dir_t */ 59 59 60 KMEM_TYPES_NR = 21, 60 KMEM_512_BYTES = 20, /*! 512 bytes aligned */ 61 62 KMEM_TYPES_NR = 21, 61 63 }; 62 64 -
trunk/kernel/mm/mapper.c
r610 r611 644 644 } // end mapper_remote_set_32() 645 645 646 646 ////////////////////////////////////////////////// 647 error_t mapper_display_page( xptr_t mapper_xp, 648 uint32_t page_id, 649 uint32_t nbytes, 650 char * string ) 651 { 652 xptr_t page_xp; // extended pointer on page descriptor 653 xptr_t base_xp; // extended pointer on page base 654 char buffer[4096]; // local buffer 655 uint32_t * tab; // pointer on uint32_t to scan the buffer 656 uint32_t line; // line index 657 uint32_t word; // word index 658 659 if( nbytes > 4096) 660 { 661 printk("\n[ERROR] in %s : nbytes (%d) cannot be larger than 4096\n", 662 __FUNCTION__, nbytes ); 663 return -1; 664 } 665 666 // get extended pointer on page descriptor 667 page_xp = mapper_remote_get_page( mapper_xp , page_id ); 668 669 if( page_xp == XPTR_NULL) 670 { 671 printk("\n[ERROR] in %s : cannot access page %d in mapper\n", 672 __FUNCTION__, page_id ); 673 return -1; 674 } 675 676 // get extended pointer on page base 677 base_xp = ppm_page2base( page_xp ); 678 679 // copy remote page to local buffer 680 hal_remote_memcpy( XPTR( local_cxy , buffer ) , base_xp , nbytes ); 681 682 // display 8 words per line 683 tab = (uint32_t *)buffer; 684 printk("\n***** %s : first %d bytes of page %d *****\n", string, nbytes, page_id ); 685 for( line = 0 ; line < (nbytes >> 5) ; line++ ) 686 { 687 printk("%X : ", line ); 688 for( word = 0 ; word < 8 ; word++ ) printk("%X ", tab[(line<<3) + word] ); 689 printk("\n"); 690 } 691 692 return 0; 693 694 } // end mapper_display_page 695 696 -
trunk/kernel/mm/mapper.h
r610 r611 1 1 /* 2 * mapper.h - Kernel cache for FS files ordirectories definition.2 * mapper.h - Kernel cache for VFS files/directories definition. 3 3 * 4 4 * Authors Mohamed Lamine Karaoui (2015) … … 195 195 196 196 /******************************************************************************************* 197 * This function returns an extended pointer on a mapper page, identified by <page_id>, 198 * index in the file. The - possibly remote - mapper is identified by the <mapper_xp> 199 * argument. It can be executed by a thread running in any cluster, as it uses remote 197 * This function returns an extended pointer on a page descriptor. 198 * The - possibly remote - mapper is identified by the <mapper_xp> argument. 199 * The page is identified by <page_id> argument (page index in the file). 200 * It can be executed by a thread running in any cluster, as it uses remote 200 201 * access primitives to scan the mapper. 201 202 * In case of miss, this function takes the mapper lock in WRITE_MODE, and call the … … 205 206 * @ mapper_xp : extended pointer on the mapper. 206 207 * @ page_id : page index in file 207 * @ returns extended pointer on page baseif success / return XPTR_NULL if error.208 * @ returns extended pointer on page descriptor if success / return XPTR_NULL if error. 208 209 ******************************************************************************************/ 209 210 xptr_t mapper_remote_get_page( xptr_t mapper_xp, … … 212 213 /******************************************************************************************* 213 214 * This function allows to read a single word in a mapper seen as and array of uint32_t. 214 * It has bee designed to support remote access t ho the FAT mapper of the FATFS.215 * It has bee designed to support remote access to the FAT mapper of the FATFS. 215 216 * It can be called by any thread running in any cluster. 216 217 * In case of miss, it takes the mapper lock in WRITE_MODE, load the missing … … 218 219 ******************************************************************************************* 219 220 * @ mapper_xp : [in] extended pointer on the mapper. 220 * @ index: [in] 32 bits word index in file.221 * @ word_id : [in] 32 bits word index in file. 221 222 * @ p_value : [out] local pointer on destination buffer. 222 223 * @ returns 0 if success / return -1 if error. … … 234 235 ******************************************************************************************* 235 236 * @ mapper_xp : [in] extended pointer on the mapper. 236 * @ index: [in] 32 bits word index in file.237 * @ p_value: [in] value to be written.237 * @ word_id : [in] 32 bits word index in file. 238 * @ value : [in] value to be written. 238 239 * @ returns 0 if success / return -1 if error. 239 240 ******************************************************************************************/ … … 242 243 uint32_t value ); 243 244 245 /******************************************************************************************* 246 * This debug function displays the content of a given page of a given mapper. 247 * - the mapper is identified by the <mapper_xp> argument. 248 * - the page is identified by the <page_id> argument. 249 * - the number of bytes to display in page is defined by the <nbytes> argument. 250 * The format is eigth (32 bits) words per line in hexadecimal. 251 * It can be called by any thread running in any cluster. 252 * In case of miss in mapper, it load the missing page from device to mapper. 253 ******************************************************************************************* 254 * @ mapper_xp : [in] extended pointer on the mapper. 255 * @ page_id : [in] page index in file. 256 * @ nbytes : [in] value to be written. 257 * @ string : [in] string printed in header. 258 * @ returns 0 if success / return -1 if error. 259 ******************************************************************************************/ 260 error_t mapper_display_page( xptr_t mapper_xp, 261 uint32_t page_id, 262 uint32_t nbytes, 263 char * string ); 264 265 244 266 #endif /* _MAPPER_H_ */ -
trunk/kernel/mm/ppm.c
r610 r611 210 210 211 211 #if DEBUG_PPM_ALLOC_PAGES 212 thread_t * this = CURRENT_THREAD; 212 213 uint32_t cycle = (uint32_t)hal_get_cycles(); 213 214 if( DEBUG_PPM_ALLOC_PAGES < cycle ) 214 printk("\n[ DBG] in %s : thread %x in process %xenter for %d page(s) / cycle %d\n",215 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, 1<<order, cycle );215 printk("\n[%s] thread[%x,%x] enter for %d page(s) / cycle %d\n", 216 __FUNCTION__, this->process->pid, this->trdid, 1<<order, cycle ); 216 217 #endif 217 218 218 219 #if(DEBUG_PPM_ALLOC_PAGES & 0x1) 219 220 if( DEBUG_PPM_ALLOC_PAGES < cycle ) 220 ppm_print( );221 ppm_print("enter ppm_alloc_pages"); 221 222 #endif 222 223 223 224 ppm_t * ppm = &LOCAL_CLUSTER->ppm; 224 225 225 assert( (order < CONFIG_PPM_MAX_ORDER) , 226 "illegal order argument = %x\n" , order );226 // check order 227 assert( (order < CONFIG_PPM_MAX_ORDER) , "illegal order argument = %d\n" , order ); 227 228 228 229 page_t * block = NULL; … … 250 251 cycle = (uint32_t)hal_get_cycles(); 251 252 if( DEBUG_PPM_ALLOC_PAGES < cycle ) 252 printk("\n[ DBG] in %s : thread %x in process %xcannot allocate %d page(s) / cycle %d\n",253 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, 1<<order, cycle );253 printk("\n[%s] thread[%x,%x] cannot allocate %d page(s) / cycle %d\n", 254 __FUNCTION__, this->process->pid, this->trdid, 1<<order, cycle ); 254 255 #endif 255 256 … … 289 290 cycle = (uint32_t)hal_get_cycles(); 290 291 if( DEBUG_PPM_ALLOC_PAGES < cycle ) 291 printk("\n[ DBG] in %s : thread %x in process %xexit for %d page(s) / ppn = %x / cycle %d\n",292 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid,292 printk("\n[%s] thread[%x,%x] exit for %d page(s) / ppn = %x / cycle %d\n", 293 __FUNCTION__, this->process->pid, this->trdid, 293 294 1<<order, ppm_page2ppn(XPTR( local_cxy , block )), cycle ); 295 #endif 296 297 #if(DEBUG_PPM_ALLOC_PAGES & 0x1) 298 if( DEBUG_PPM_ALLOC_PAGES < cycle ) 299 ppm_print("exit ppm_alloc_pages"); 294 300 #endif 295 301 … … 307 313 uint32_t cycle = (uint32_t)hal_get_cycles(); 308 314 if( DEBUG_PPM_FREE_PAGES < cycle ) 309 printk("\n[ DBG] in %s : thread %x in process %xenter for %d page(s) / ppn %x / cycle %d\n",310 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid,315 printk("\n[%s] thread[%x,%x] enter for %d page(s) / ppn %x / cycle %d\n", 316 __FUNCTION__, this->process->pid, this->trdid, 311 317 1<<page->order, ppm_page2ppn(XPTR(local_cxy , page)), cycle ); 312 318 #endif … … 314 320 #if(DEBUG_PPM_FREE_PAGES & 0x1) 315 321 if( DEBUG_PPM_FREE_PAGES < cycle ) 316 ppm_print( );322 ppm_print("enter ppm_free_pages"); 317 323 #endif 318 324 … … 331 337 cycle = (uint32_t)hal_get_cycles(); 332 338 if( DEBUG_PPM_FREE_PAGES < cycle ) 333 printk("\n[ DBG] in %s : thread %x in process %xexit for %d page(s) / ppn %x / cycle %d\n",334 __FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid,339 printk("\n[%s] thread[%x,%x] exit for %d page(s) / ppn %x / cycle %d\n", 340 __FUNCTION__, this->process->pid, this->trdid, 335 341 1<<page->order, ppm_page2ppn(XPTR(local_cxy , page)), cycle ); 336 342 #endif 337 343 344 #if(DEBUG_PPM_FREE_PAGES & 0x1) 345 if( DEBUG_PPM_FREE_PAGES < cycle ) 346 ppm_print("exit ppm_free_pages"); 347 #endif 348 338 349 } // end ppm_free_pages() 339 350 340 ////////////////////// 341 void ppm_print( void)351 /////////////////////////////// 352 void ppm_print( char * string ) 342 353 { 343 354 uint32_t order; … … 350 361 busylock_acquire( &ppm->free_lock ); 351 362 352 printk("\n*** PPM in cluster %x : %d pages ***\n", local_cxy , ppm->pages_nr ); 363 printk("\n*** PPM in cluster %x / %s / %d pages ***\n", 364 local_cxy , string, ppm->pages_nr ); 353 365 354 366 for( order = 0 ; order < CONFIG_PPM_MAX_ORDER ; order++ ) … … 413 425 xptr_t dirty_lock_xp = XPTR( page_cxy , &ppm->dirty_lock ); 414 426 415 // printk("\n@@@ %s : before dirty_list lock aquire\n", __FUNCTION__ );416 417 427 // lock the remote PPM dirty_list 418 428 remote_queuelock_acquire( dirty_lock_xp ); 419 429 420 // printk("\n@@@ %s : after dirty_list lock aquire\n", __FUNCTION__ );421 422 430 // lock the remote page 423 431 remote_busylock_acquire( page_lock_xp ); 424 425 // printk("\n@@@ %s : after page lock aquire\n", __FUNCTION__ );426 432 427 433 // get remote page flags … … 466 472 } 467 473 468 // printk("\n@@@ %s : before page lock release\n", __FUNCTION__ );469 470 474 // unlock the remote page 471 475 remote_busylock_release( page_lock_xp ); 472 476 473 // printk("\n@@@ %s : after page lock release\n", __FUNCTION__ );474 475 477 // unlock the remote PPM dirty_list 476 478 remote_queuelock_release( dirty_lock_xp ); 477 478 // printk("\n@@@ %s : after page lock aquire\n", __FUNCTION__ );479 479 480 480 return done; -
trunk/kernel/mm/ppm.h
r610 r611 83 83 * This is the low-level physical pages allocation function. 84 84 * It allocates N contiguous physical pages. N is a power of 2. 85 * In normal use, you don't need to call itdirectly, as the recommended way to get85 * In normal use, it should not be called directly, as the recommended way to get 86 86 * physical pages is to call the generic allocator defined in kmem.h. 87 87 ***************************************************************************************** 88 88 * @ order : ln2( number of 4 Kbytes pages) 89 89 * @ returns a pointer on the page descriptor if success / NULL otherwise 90 ************************************************************************************** à))**/90 ****************************************************************************************/ 91 91 page_t * ppm_alloc_pages( uint32_t order ); 92 92 … … 174 174 /***************************************************************************************** 175 175 * This function prints the PPM allocator status in the calling thread cluster. 176 ****************************************************************************************/ 177 void ppm_print( void ); 176 ***************************************************************************************** 177 * string : character string printed in header 178 ****************************************************************************************/ 179 void ppm_print( char * string ); 178 180 179 181 /***************************************************************************************** -
trunk/kernel/mm/vmm.c
r610 r611 1 1 /* 2 * vmm.c - virtual memory manager related operations interface.2 * vmm.c - virtual memory manager related operations definition. 3 3 * 4 4 * Authors Ghassan Almaless (2008,2009,2010,2011, 2012) … … 254 254 } // vmm_display() 255 255 256 /////////////////////////////////// 257 void vmm_ vseg_attach( vmm_t * vmm,258 vseg_t * vseg )256 ////////////////////////////////////////// 257 void vmm_attach_vseg_to_vsl( vmm_t * vmm, 258 vseg_t * vseg ) 259 259 { 260 260 // build extended pointer on rwlock protecting VSL … … 275 275 } 276 276 277 /////////////////////////////////// 278 void vmm_ vseg_detach( vmm_t * vmm,279 vseg_t * vseg )277 //////////////////////////////////////////// 278 void vmm_detach_vseg_from_vsl( vmm_t * vmm, 279 vseg_t * vseg ) 280 280 { 281 // get vseg type 282 uint32_t type = vseg->type; 283 281 284 // build extended pointer on rwlock protecting VSL 282 285 xptr_t lock_xp = XPTR( local_cxy , &vmm->vsegs_lock ); … … 288 291 vseg->vmm = NULL; 289 292 290 // remove vseg from vmm list293 // remove vseg from VSL 291 294 xlist_unlink( XPTR( local_cxy , &vseg->xlist ) ); 292 295 293 296 // release rwlock in write mode 294 297 remote_rwlock_wr_release( lock_xp ); 295 } 298 299 // release the stack slot to VMM stack allocator if STACK type 300 if( type == VSEG_TYPE_STACK ) 301 { 302 // get pointer on stack allocator 303 stack_mgr_t * mgr = &vmm->stack_mgr; 304 305 // compute slot index 306 uint32_t index = ((vseg->vpn_base - mgr->vpn_base - 1) / CONFIG_VMM_STACK_SIZE); 307 308 // update stacks_bitmap 309 busylock_acquire( &mgr->lock ); 310 bitmap_clear( &mgr->bitmap , index ); 311 busylock_release( &mgr->lock ); 312 } 313 314 // release the vseg to VMM mmap allocator if MMAP type 315 if( (type == VSEG_TYPE_ANON) || (type == VSEG_TYPE_FILE) || (type == VSEG_TYPE_REMOTE) ) 316 { 317 // get pointer on mmap allocator 318 mmap_mgr_t * mgr = &vmm->mmap_mgr; 319 320 // compute zombi_list index 321 uint32_t index = bits_log2( vseg->vpn_size ); 322 323 // update zombi_list 324 busylock_acquire( &mgr->lock ); 325 list_add_first( &mgr->zombi_list[index] , &vseg->zlist ); 326 busylock_release( &mgr->lock ); 327 } 328 329 // release physical memory allocated for vseg descriptor if no MMAP type 330 if( (type != VSEG_TYPE_ANON) && (type != VSEG_TYPE_FILE) && (type != VSEG_TYPE_REMOTE) ) 331 { 332 vseg_free( vseg ); 333 } 334 335 } // end vmm_remove_vseg_from_vsl() 296 336 297 337 //////////////////////////////////////////////// … … 616 656 617 657 // register child vseg in child VSL 618 vmm_ vseg_attach( child_vmm , child_vseg );658 vmm_attach_vseg_to_vsl( child_vmm , child_vseg ); 619 659 620 660 #if DEBUG_VMM_FORK_COPY … … 759 799 xptr_t root_xp = XPTR( local_cxy , &vmm->vsegs_root ); 760 800 761 // remove all user vsegs registered in VSL 801 // scan the VSL to delete all registered vsegs 802 // (don't use a FOREACH for item deletion in xlist) 762 803 while( !xlist_is_empty( root_xp ) ) 763 804 { … … 766 807 vseg = GET_PTR( vseg_xp ); 767 808 768 // unmap and release physical pages 769 vmm_unmap_vseg( process , vseg ); 770 771 // remove vseg from VSL 772 vmm_vseg_detach( vmm , vseg ); 773 774 // release memory allocated to vseg descriptor 775 vseg_free( vseg ); 809 // delete vseg and release physical pages 810 vmm_delete_vseg( process->pid , vseg->min ); 776 811 777 812 #if( DEBUG_VMM_DESTROY & 1 ) 778 813 if( DEBUG_VMM_DESTROY < cycle ) 779 printk("\n[%s] %s vseg released / vpn_base %x / vpn_size %d\n",814 printk("\n[%s] %s vseg deleted / vpn_base %x / vpn_size %d\n", 780 815 __FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size ); 781 816 #endif … … 796 831 __FUNCTION__ , vseg_type_str( vseg->type ), vseg->vpn_base, vseg->vpn_size ); 797 832 #endif 798 vmm_vseg_detach( vmm , vseg ); 833 // clean vseg descriptor 834 vseg->vmm = NULL; 835 836 // remove vseg from xlist 837 xlist_unlink( XPTR( local_cxy , &vseg->xlist ) ); 838 839 // release vseg descriptor 799 840 vseg_free( vseg ); 800 841 … … 1079 1120 1080 1121 // attach vseg to VSL 1081 vmm_ vseg_attach( vmm , vseg );1122 vmm_attach_vseg_to_vsl( vmm , vseg ); 1082 1123 1083 1124 #if DEBUG_VMM_CREATE_VSEG … … 1092 1133 } // vmm_create_vseg() 1093 1134 1094 ///////////////////////////////////// 1095 void vmm_remove_vseg( vseg_t * vseg ) 1135 /////////////////////////////////// 1136 void vmm_delete_vseg( pid_t pid, 1137 intptr_t vaddr ) 1096 1138 { 1097 // get pointers on calling process and VMM 1098 thread_t * this = CURRENT_THREAD; 1099 vmm_t * vmm = &this->process->vmm; 1100 uint32_t type = vseg->type; 1101 1102 // detach vseg from VSL 1103 vmm_vseg_detach( vmm , vseg ); 1104 1105 // release the stack slot to VMM stack allocator if STACK type 1106 if( type == VSEG_TYPE_STACK ) 1107 { 1108 // get pointer on stack allocator 1109 stack_mgr_t * mgr = &vmm->stack_mgr; 1110 1111 // compute slot index 1112 uint32_t index = ((vseg->vpn_base - mgr->vpn_base - 1) / CONFIG_VMM_STACK_SIZE); 1113 1114 // update stacks_bitmap 1115 busylock_acquire( &mgr->lock ); 1116 bitmap_clear( &mgr->bitmap , index ); 1117 busylock_release( &mgr->lock ); 1118 } 1119 1120 // release the vseg to VMM mmap allocator if MMAP type 1121 if( (type == VSEG_TYPE_ANON) || (type == VSEG_TYPE_FILE) || (type == VSEG_TYPE_REMOTE) ) 1122 { 1123 // get pointer on mmap allocator 1124 mmap_mgr_t * mgr = &vmm->mmap_mgr; 1125 1126 // compute zombi_list index 1127 uint32_t index = bits_log2( vseg->vpn_size ); 1128 1129 // update zombi_list 1130 busylock_acquire( &mgr->lock ); 1131 list_add_first( &mgr->zombi_list[index] , &vseg->zlist ); 1132 busylock_release( &mgr->lock ); 1133 } 1134 1135 // release physical memory allocated for vseg descriptor if no MMAP type 1136 if( (type != VSEG_TYPE_ANON) && (type != VSEG_TYPE_FILE) && (type != VSEG_TYPE_REMOTE) ) 1137 { 1138 vseg_free( vseg ); 1139 } 1140 } // end vmm_remove_vseg() 1141 1142 ///////////////////////////////////////// 1143 void vmm_unmap_vseg( process_t * process, 1144 vseg_t * vseg ) 1145 { 1139 process_t * process; // local pointer on local process 1140 vmm_t * vmm; // local pointer on local process VMM 1141 vseg_t * vseg; // local pointer on local vseg containing vaddr 1142 gpt_t * gpt; // local pointer on local process GPT 1146 1143 vpn_t vpn; // VPN of current PTE 1147 1144 vpn_t vpn_min; // VPN of first PTE … … 1157 1154 uint32_t forks; // actual number of pendinf forks 1158 1155 1159 #if DEBUG_VMM_ UNMAP_VSEG1156 #if DEBUG_VMM_DELETE_VSEG 1160 1157 uint32_t cycle = (uint32_t)hal_get_cycles(); 1161 1158 thread_t * this = CURRENT_THREAD; 1162 if( DEBUG_VMM_UNMAP_VSEG < cycle ) 1163 printk("\n[%s] thread[%x,%x] enter / process %x / vseg %s / base %x / cycle %d\n", 1164 __FUNCTION__, this->process->pid, this->trdid, process->pid, 1165 vseg_type_str( vseg->type ), vseg->vpn_base, cycle ); 1166 #endif 1167 1168 // get pointer on local GPT 1169 gpt_t * gpt = &process->vmm.gpt; 1170 1171 // loop on pages in vseg 1159 if( DEBUG_VMM_DELETE_VSEG < cycle ) 1160 printk("\n[%s] thread[%x,%x] enter / process %x / vaddr %x / cycle %d\n", 1161 __FUNCTION__, this->process->pid, this->trdid, pid, vaddr, cycle ); 1162 #endif 1163 1164 // get local pointer on local process descriptor 1165 process = cluster_get_local_process_from_pid( pid ); 1166 1167 if( process == NULL ) return; 1168 1169 // get pointers on local process VMM an GPT 1170 vmm = &process->vmm; 1171 gpt = &process->vmm.gpt; 1172 1173 // get local pointer on vseg containing vaddr 1174 vseg = vmm_vseg_from_vaddr( vmm , vaddr ); 1175 1176 if( vseg == NULL ) return; 1177 1178 // loop to invalidate all vseg PTEs in GPT 1172 1179 vpn_min = vseg->vpn_base; 1173 1180 vpn_max = vpn_min + vseg->vpn_size; … … 1180 1187 { 1181 1188 1182 #if( DEBUG_VMM_ UNMAP_VSEG & 1 )1183 if( DEBUG_VMM_ UNMAP_VSEG < cycle )1184 printk("- vpn %x / ppn %x\n" , vpn , ppn);1189 #if( DEBUG_VMM_DELETE_VSEG & 1 ) 1190 if( DEBUG_VMM_DELETE_VSEG < cycle ) 1191 printk("- unmap vpn %x / ppn %x / vseg %s \n" , vpn , ppn, vseg_type_str(vseg->type) ); 1185 1192 #endif 1186 1193 … … 1225 1232 rpc_pmem_release_pages_client( page_cxy , page_ptr ); 1226 1233 } 1234 1235 #if( DEBUG_VMM_DELETE_VSEG & 1 ) 1236 if( DEBUG_VMM_DELETE_VSEG < cycle ) 1237 printk("- release ppn %x\n", ppn ); 1238 #endif 1227 1239 } 1228 1240 } … … 1230 1242 } 1231 1243 1232 #if DEBUG_VMM_UNMAP_VSEG 1244 // remove vseg from VSL and release vseg descriptor (if not MMAP) 1245 vmm_detach_vseg_from_vsl( vmm , vseg ); 1246 1247 #if DEBUG_VMM_DELETE_VSEG 1233 1248 cycle = (uint32_t)hal_get_cycles(); 1234 if( DEBUG_VMM_ UNMAP_VSEG < cycle )1249 if( DEBUG_VMM_DELETE_VSEG < cycle ) 1235 1250 printk("\n[%s] thread[%x,%x] exit / process %x / vseg %s / base %x / cycle %d\n", 1236 __FUNCTION__, this->process->pid, this->trdid, process->pid, 1237 vseg_type_str( vseg->type ), vseg->vpn_base, cycle ); 1238 #endif 1239 1240 } // end vmm_unmap_vseg() 1241 1242 ////////////////////////////////////////////////////////////////////////////////////////// 1243 // This low-level static function is called by the vmm_get_vseg(), vmm_get_pte(), 1244 // and vmm_resize_vseg() functions. It scan the local VSL to find the unique vseg 1245 // containing a given virtual address. 1246 ////////////////////////////////////////////////////////////////////////////////////////// 1247 // @ vmm : pointer on the process VMM. 1248 // @ vaddr : virtual address. 1249 // @ return vseg pointer if success / return NULL if not found. 1250 ////////////////////////////////////////////////////////////////////////////////////////// 1251 static vseg_t * vmm_vseg_from_vaddr( vmm_t * vmm, 1252 intptr_t vaddr ) 1251 __FUNCTION__, this->process->pid, this->trdid, pid, vseg_type_str(vseg->type), vaddr, cycle ); 1252 #endif 1253 1254 } // end vmm_delete_vseg() 1255 1256 ///////////////////////////////////////////// 1257 vseg_t * vmm_vseg_from_vaddr( vmm_t * vmm, 1258 intptr_t vaddr ) 1253 1259 { 1254 1260 xptr_t iter_xp; … … 1310 1316 remote_rwlock_wr_acquire( lock_xp ); 1311 1317 1312 if( (vseg->min > addr_min) || (vseg->max < addr_max) ) // regionnot included in vseg1313 { 1314 error = EINVAL;1315 } 1316 else if( (vseg->min == addr_min) && (vseg->max == addr_max) ) // vseg must be removed1317 { 1318 vmm_ remove_vseg( vseg);1318 if( (vseg->min > addr_min) || (vseg->max < addr_max) ) // not included in vseg 1319 { 1320 error = -1; 1321 } 1322 else if( (vseg->min == addr_min) && (vseg->max == addr_max) ) // vseg must be deleted 1323 { 1324 vmm_delete_vseg( process->pid , vseg->min ); 1319 1325 error = 0; 1320 1326 } 1321 else if( vseg->min == addr_min ) // vseg must be resized1327 else if( vseg->min == addr_min ) // vseg must be resized 1322 1328 { 1323 1329 // update vseg base address … … 1331 1337 error = 0; 1332 1338 } 1333 else if( vseg->max == addr_max ) // vseg must be resized1339 else if( vseg->max == addr_max ) // vseg must be resized 1334 1340 { 1335 1341 // update vseg max address … … 1343 1349 error = 0; 1344 1350 } 1345 else // vseg cut in three regions1351 else // vseg cut in three regions 1346 1352 { 1347 1353 // resize existing vseg … … 1415 1421 vseg_init_from_ref( vseg , vseg_xp ); 1416 1422 1417 // register local vseg in local V MM1418 vmm_ vseg_attach( vmm , vseg );1423 // register local vseg in local VSL 1424 vmm_attach_vseg_to_vsl( vmm , vseg ); 1419 1425 } 1420 1426 -
trunk/kernel/mm/vmm.h
r610 r611 38 38 39 39 struct process_s; 40 struct vseg_s; 40 41 41 42 /********************************************************************************************* 42 43 * This structure defines the STACK allocator used by the VMM to dynamically handle 43 * a STACK vseg requested or released by an user process.44 * This allocator handles a fixed size array of fixed size slots in the STACK zone.44 * vseg allocation or release requests for an user thread. 45 * This allocator handles a fixed size array of fixed size slots in STACK zone of user space. 45 46 * The stack size and the number of slots are defined by the CONFIG_VMM_STACK_SIZE, and 46 47 * CONFIG_VMM_STACK_BASE parameters. 47 * Each slot can contain one user stack vseg. The first page in the slot is not allocated48 * to detect stack overflow.48 * Each slot can contain one user stack vseg. The first 4 Kbytes page in the slot is not 49 * mapped to detect stack overflow. 49 50 * The slot index can be computed form the slot base address, and reversely. 50 51 * All allocation / release operations are registered in the stack_bitmap, that completely 51 * define the STACK zone stat e.52 * define the STACK zone status. 52 53 ********************************************************************************************/ 53 54 … … 159 160 160 161 /********************************************************************************************* 161 * This function adds a vseg descriptor in the VSL of a given VMM,162 * and updates the vmm field in the vseg descriptor.163 * It takes the lock protecting VSL.164 *********************************************************************************************165 * @ vmm : pointer on the VMM166 * @ vseg : pointer on the vseg descriptor167 ********************************************************************************************/168 void vmm_vseg_attach( struct vmm_s * vmm,169 vseg_t * vseg );170 171 /*********************************************************************************************172 * This function removes a vseg descriptor from the set of vsegs controlled by a given VMM,173 * and updates the vmm field in the vseg descriptor. No memory is released.174 * It takes the lock protecting VSL.175 *********************************************************************************************176 * @ vmm : pointer on the VMM177 * @ vseg : pointer on the vseg descriptor178 ********************************************************************************************/179 void vmm_vseg_detach( struct vmm_s * vmm,180 vseg_t * vseg );181 182 /*********************************************************************************************183 162 * This function is called by the process_make_fork() function. It partially copies 184 163 * the content of a remote parent process VMM to the local child process VMM: … … 235 214 236 215 /********************************************************************************************* 237 * This function unmaps from the local GPT all mapped PTEs of a vseg identified by the238 * <process> and <vseg> arguments. It can be used for any type of vseg.239 * If this function is executed in the reference cluster, it handles for each referenced240 * physical pages the pending forks counter :241 * - if counter is non-zero, it decrements it.242 * - if counter is zero, it releases the physical page to local kmem allocator.243 *********************************************************************************************244 * @ process : pointer on process descriptor.245 * @ vseg : pointer on the vseg to be unmapped.246 ********************************************************************************************/247 void vmm_unmap_vseg( struct process_s * process,248 vseg_t * vseg );249 250 /*********************************************************************************************251 216 * This function deletes, in the local cluster, all vsegs registered in the VSL 252 217 * of the process identified by the <process> argument. For each vseg: … … 254 219 * - it removes the vseg from the local VSL. 255 220 * - it releases the memory allocated to the local vseg descriptors. 256 * Finally,it releases the memory allocated to the GPT itself.221 * - it releases the memory allocated to the GPT itself. 257 222 ********************************************************************************************* 258 223 * @ process : pointer on process descriptor. … … 304 269 305 270 /********************************************************************************************* 306 * This function removes a vseg identified by it's pointer from the VMM of the calling process. 307 * - If the vseg has not the STACK or MMAP type, it is removed from the vsegs list, 308 * and the physical memory allocated to vseg descriptor is released to KMEM. 309 * - If the vseg has the STACK type, it is removed from the vsegs list, the physical memory 310 * allocated to vseg descriptor is released to KMEM, and the stack slot is returned to the 311 * VMM STACK allocator. 312 * - If the vseg has the MMAP type, it is removed from the vsegs list and is registered 313 * in the zombi_list of the VMM MMAP allocator for future reuse. The physical memory 314 * allocated to vseg descriptor is NOT released to KMEM. 315 ********************************************************************************************* 316 * @ vseg : pointer on vseg to be removed. 317 ********************************************************************************************/ 318 void vmm_remove_vseg( vseg_t * vseg ); 271 * This function removes from the local VMM of a process descriptor identified by the <pid> 272 * argument a local vseg identified by its base address <vaddr> in user space. 273 * It can be used for any type of vseg, but must be called by a local thread. 274 * Use the RPC_VMM_DELETE_VSEG if the client thread is not local. 275 * It does nothing if the process is not registered in the local cluster. 276 * It does nothing if the vseg is not registered in the local process VSL. 277 * - It removes from the local GPT all registered PTEs. If it is executed in the reference 278 * cluster, it releases the referenced physical pages, to the relevant kmem allocator, 279 * depending on vseg type and the pending forks counter. 280 * - It removes the vseg from the local VSL, and release the vseg descriptor if not MMAP. 281 ********************************************************************************************* 282 * @ process : process identifier. 283 * @ vaddr : vseg base address in user space. 284 ********************************************************************************************/ 285 void vmm_delete_vseg( pid_t pid, 286 intptr_t vaddr ); 287 288 /********************************************************************************************* 289 * This function insert a new <vseg> descriptor in the VSL identifed by the <vmm> argument. 290 * and updates the vmm field in the vseg descriptor. 291 * It takes the lock protecting VSL. 292 ********************************************************************************************* 293 * @ vmm : local pointer on local VMM. 294 * @ vseg : local pointer on local vseg descriptor. 295 ********************************************************************************************/ 296 void vmm_attach_vseg_to_vsl( vmm_t * vmm, 297 vseg_t * vseg ); 298 299 /********************************************************************************************* 300 * This function removes a vseg identified by the <vseg> argument from the local VSL 301 * identified by the <vmm> argument and release the memory allocated to vseg descriptor, 302 * for all vseg types, BUT the MMAP type (i.e. ANON or REMOTE). 303 * - If the vseg has not the STACK or MMAP type, it is simply removed from the VSL, 304 * and vseg descriptor is released. 305 * - If the vseg has the STACK type, it is removed from VSL, vseg descriptor is released, 306 * and the stack slot is returned to the local VMM_STACK allocator. 307 * - If the vseg has the MMAP type, it is removed from VSL and is registered in zombi_list 308 * of the VMM_MMAP allocator for future reuse. The vseg descriptor is NOT released. 309 ********************************************************************************************* 310 * @ vmm : local pointer on local VMM. 311 * @ vseg : local pointer on local vseg to be removed. 312 ********************************************************************************************/ 313 void vmm_detach_vseg_from_vsl( vmm_t * vmm, 314 vseg_t * vseg ); 319 315 320 316 /********************************************************************************************* … … 338 334 339 335 /********************************************************************************************* 336 * This low-level function scan the local VSL in <vmm> to find the unique vseg containing 337 * a given virtual address <vaddr>. 338 * It is called by the vmm_get_vseg(), vmm_get_pte(), and vmm_resize_vseg() functions. 339 ********************************************************************************************* 340 * @ vmm : pointer on the process VMM. 341 * @ vaddr : virtual address. 342 * @ return vseg pointer if success / return NULL if not found. 343 ********************************************************************************************/ 344 struct vseg_s * vmm_vseg_from_vaddr( vmm_t * vmm, 345 intptr_t vaddr ); 346 347 /********************************************************************************************* 340 348 * This function checks that a given virtual address is contained in a registered vseg. 341 349 * It can be called by any thread running in any cluster: … … 344 352 * register it in local VMM and returns the local vseg pointer, if success. 345 353 * - it returns an user error if the vseg is missing in the reference VMM, or if there is 346 * not enough memory for a new vseg descriptor in cluster containing the calling thread.354 * not enough memory for a new vseg descriptor in the calling thread cluster. 347 355 ********************************************************************************************* 348 356 * @ process : [in] pointer on process descriptor … … 350 358 * @ vseg : [out] local pointer on local vseg 351 359 * @ returns 0 if success / returns -1 if user error (out of segment). 352 ******************************************************************************************** */360 ********************************************************************************************/ 353 361 error_t vmm_get_vseg( struct process_s * process, 354 362 intptr_t vaddr, -
trunk/kernel/mm/vseg.h
r595 r611 71 71 typedef struct vseg_s 72 72 { 73 xlist_entry_t xlist; /*! all vsegs in same VSL (or same zombi list)*/73 xlist_entry_t xlist; /*! all vsegs in same VSL */ 74 74 list_entry_t zlist; /*! all vsegs in same zombi list */ 75 75 struct vmm_s * vmm; /*! pointer on associated VM manager */ -
trunk/kernel/syscalls/shared_include/shared_almos.h
r580 r611 52 52 DISPLAY_DQDT = 7, 53 53 DISPLAY_BUSYLOCKS = 8, 54 DISPLAY_MAPPER = 9, 54 55 } 55 56 display_type_t; -
trunk/kernel/syscalls/shared_include/shared_dirent.h
r445 r611 26 26 27 27 /******************************************************************************************* 28 * Th ese two structure defines the informations returned to user by the opendir()29 * function, used by the readdir() function, and released by the closedir() function.30 * - "DIR" describes the complete directory.31 * - "dirent" describes one directory entry.28 * This enum defines the possible types for a dirent inode in a dirent structure. 29 * 30 * WARNING : these types must be kept consistent with inode types in <vfs.h> file. 31 * and with types in <shared_stat.h> file. 32 32 ******************************************************************************************/ 33 33 34 #define DIRENT_NAME_MAX_LENGTH 56 35 #define DIRENT_MAX_NUMBER 63 34 typedef enum 35 { 36 DT_REG = 0, /*! regular file */ 37 DT_DIR = 1, /*! directory */ 38 DT_FIFO = 2, /*! named pipe (FIFO) */ 39 DT_PIPE = 3, /*! anonymous pipe */ 40 DT_SOCK = 4, /*! socket */ 41 DT_CHR = 5, /*! character device */ 42 DT_BLK = 6, /*! block device */ 43 DT_LNK = 7, /*! symbolic link */ 44 DT_UNKNOWN = 8, /*! undetermined type */ 45 } 46 dirent_type_t; 47 48 /******************************************************************************************* 49 * This defines the actual ALMOS-MKH implementation of the DIR user type. 50 ******************************************************************************************/ 51 52 typedef unsigned int DIR; 53 54 /******************************************************************************************* 55 * This structure defines the informations returned to user by the readdir() syscall. 56 * 57 * WARNING: sizeof(dirent) must be 64 bytes. 58 ******************************************************************************************/ 36 59 37 60 struct dirent 38 61 { 39 unsigned int inum; /*! inode identifier */ 40 unsigned int type; /*! inode type */ 41 char name[DIRENT_NAME_MAX_LENGTH]; /*! directory entry name */ 62 int d_ino; /*! inode identifier */ 63 int d_type; /*! inode type */ 64 char d_name[48]; /*! dentry name */ 65 char padding[64 - 48 - (2*sizeof(int))]; 42 66 }; 43 67 44 typedef struct user_directory45 {46 struct dirent entry[DIRENT_MAX_NUMBER];47 unsigned int current;48 }49 DIR;50 51 68 #endif -
trunk/kernel/syscalls/shared_include/shared_stat.h
r594 r611 30 30 *****************************************************************************************/ 31 31 32 typedefstruct stat32 struct stat 33 33 { 34 34 unsigned int st_dev; /*! ID of device containing file */ … … 42 42 unsigned int st_blksize; /*! blocksize for file system I/O */ 43 43 unsigned int st_blocks; /*! number of allocated blocks */ 44 } 45 stat_t; 44 }; 46 45 47 46 /****************************************************************************************** … … 52 51 * 53 52 * WARNING : these macros must be kept consistent with inode types in <vfs.h> file. 53 * and with types in <dirent.h> file. 54 54 *****************************************************************************************/ 55 55 … … 60 60 #define S_ISSOCK(x) ((((x)>>16) & 0xF) == 4) /*! it is a socket */ 61 61 #define S_ISCHR(x) ((((x)>>16) & 0xF) == 5) /*! it is a character device */ 62 #define S_ISLNK(x) ((((x)>>16) & 0xF) == 6) /*! it is a symbolic link */ 62 #define S_ISBLK(x) ((((x)>>16) & 0xF) == 6) /*! it is a block device */ 63 #define S_ISLNK(x) ((((x)>>16) & 0xF) == 7) /*! it is a symbolic link */ 63 64 64 65 #endif /* _STAT_H_ */ -
trunk/kernel/syscalls/sys_closedir.c
r473 r611 1 1 /* 2 * sys_closedir.c - Close an open directory.2 * sys_closedir.c - Close an open VFS directory. 3 3 * 4 * Author Alain Greiner (2016, 2017)4 * Author Alain Greiner (2016,2017,2018) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 22 22 */ 23 23 24 #include <kernel_config.h> 24 25 #include <hal_kernel_types.h> 25 26 #include <vfs.h> … … 27 28 #include <thread.h> 28 29 #include <process.h> 30 #include <remote_dir.h> 29 31 #include <errno.h> 30 32 #include <syscalls.h> … … 34 36 int sys_closedir ( DIR * dirp ) 35 37 { 36 printk("\n[ERROR] in %s : not implemented yet\n", __FUNCTION__, dirp ); 37 return -1; 38 xptr_t dir_xp; // extended pointer on remote_dir_t structure 39 40 thread_t * this = CURRENT_THREAD; // client thread 41 process_t * process = this->process; // client process 42 43 #if (DEBUG_SYS_CLOSEDIR || CONFIG_INSTRUMENTATION_SYSCALLS) 44 uint64_t tm_start = hal_get_cycles(); 45 #endif 46 47 #if DEBUG_SYS_CLOSEDIR 48 if( DEBUG_SYS_CLOSEDIR < tm_start ) 49 printk("\n[%s] thread[%x,%x] enter for DIR <%x> / cycle %d\n", 50 __FUNCTION__, process->pid, this->trdid, dirp, (uint32_t)tm_start ); 51 #endif 52 53 // get extended pointer on kernel remote_dir_t structure from dirp 54 dir_xp = remote_dir_from_ident( (intptr_t)dirp ); 55 56 if( dir_xp == XPTR_NULL ) 57 { 58 59 #if DEBUG_SYSCALLS_ERROR 60 printk("\n[ERROR] in %s / thread[%x,%x] : DIR pointer %x not registered\n", 61 __FUNCTION__ , process->pid , this->trdid, dirp ); 62 #endif 63 this->errno = EINVAL; 64 return -1; 65 } 66 67 // delete kernel remote_dir_t structure 68 remote_dir_destroy( dir_xp ); 69 70 hal_fence(); 71 72 #if (DEBUG_SYS_CLOSEDIR || CONFIG_INSTRUMENTATION_SYSCALLS) 73 uint64_t tm_end = hal_get_cycles(); 74 #endif 75 76 #if DEBUG_SYS_CLOSEDIR 77 if( DEBUG_SYS_CLOSEDIR < tm_end ) 78 printk("\n[%s] thread[%x,%x] exit for DIR <%x> / cycle %d\n", 79 __FUNCTION__, process->pid, this->trdid, dirp, (uint32_t)tm_end ); 80 #endif 81 82 #if CONFIG_INSTRUMENTATION_SYSCALLS 83 hal_atomic_add( &syscalls_cumul_cost[SYS_CLOSEDIR] , tm_end - tm_start ); 84 hal_atomic_add( &syscalls_occurences[SYS_CLOSEDIR] , 1 ); 85 #endif 86 87 return 0; 88 38 89 } // end sys_closedir() -
trunk/kernel/syscalls/sys_display.c
r594 r611 31 31 #include <string.h> 32 32 #include <shared_syscalls.h> 33 #include <vfs.h> 34 #include <mapper.h> 33 35 34 36 #include <syscalls.h> … … 56 58 int sys_display( reg_t type, 57 59 reg_t arg0, 58 reg_t arg1 ) 60 reg_t arg1, 61 reg_t arg2 ) 59 62 { 60 63 … … 278 281 thread_display_busylocks( thread_xp ); 279 282 } 283 ///////////////////////////////// 284 else if( type == DISPLAY_MAPPER ) 285 { 286 xptr_t root_inode_xp; 287 xptr_t inode_xp; 288 cxy_t inode_cxy; 289 vfs_inode_t * inode_ptr; 290 xptr_t mapper_xp; 291 mapper_t * mapper_ptr; 292 293 char kbuf[CONFIG_VFS_MAX_PATH_LENGTH]; 294 295 char * path = (char *)arg0; 296 uint32_t page_id = (uint32_t)arg1; 297 uint32_t nbytes = (uint32_t)arg2; 298 299 // check pathname length 300 if( hal_strlen_from_uspace( path ) >= CONFIG_VFS_MAX_PATH_LENGTH ) 301 { 302 303 #if DEBUG_SYSCALLS_ERROR 304 printk("\n[ERROR] in %s for MAPPER : pathname too long\n", 305 __FUNCTION__ ); 306 #endif 307 this->errno = ENFILE; 308 return -1; 309 } 310 311 // copy pathname in kernel space 312 hal_strcpy_from_uspace( kbuf , path , CONFIG_VFS_MAX_PATH_LENGTH ); 313 314 // compute root inode for pathname 315 if( kbuf[0] == '/' ) // absolute path 316 { 317 // use extended pointer on VFS root inode 318 root_inode_xp = process->vfs_root_xp; 319 } 320 else // relative path 321 { 322 // get cluster and local pointer on reference process 323 xptr_t ref_xp = process->ref_xp; 324 process_t * ref_ptr = (process_t *)GET_PTR( ref_xp ); 325 cxy_t ref_cxy = GET_CXY( ref_xp ); 326 327 // use extended pointer on CWD inode 328 root_inode_xp = hal_remote_l64( XPTR( ref_cxy , &ref_ptr->cwd_xp ) ); 329 } 330 331 // get extended pointer on target inode 332 error = vfs_lookup( root_inode_xp, 333 kbuf, 334 0, 335 &inode_xp, 336 NULL ); 337 if( error ) 338 { 339 340 #if DEBUG_SYSCALLS_ERROR 341 printk("\n[ERROR] in %s for MAPPER : cannot found inode <%s>\n", 342 __FUNCTION__ , kbuf ); 343 #endif 344 this->errno = ENFILE; 345 return -1; 346 } 347 348 // get target inode cluster and local pointer 349 inode_cxy = GET_CXY( inode_xp ); 350 inode_ptr = GET_PTR( inode_xp ); 351 352 // get extended pointer on target mapper 353 mapper_ptr = hal_remote_lpt( XPTR( inode_cxy , &inode_ptr->mapper ) ); 354 mapper_xp = XPTR( inode_cxy , mapper_ptr ); 355 356 // display mapper 357 error = mapper_display_page( mapper_xp , page_id , nbytes , kbuf ); 358 359 if( error ) 360 { 361 362 #if DEBUG_SYSCALLS_ERROR 363 printk("\n[ERROR] in %s for MAPPER : cannot display page %d\n", 364 __FUNCTION__ , page_id ); 365 #endif 366 this->errno = ENFILE; 367 return -1; 368 } 369 } 280 370 //// 281 371 else -
trunk/kernel/syscalls/sys_mmap.c
r594 r611 119 119 // test mmap type : can be FILE / ANON / REMOTE 120 120 121 if( (map_anon == false) && (map_remote == false) ) // FILE 121 /////////////////////////////////////////////////////////// MAP_FILE 122 if( (map_anon == false) && (map_remote == false) ) 122 123 { 123 124 … … 217 218 vseg_cxy = file_cxy; 218 219 } 219 else if ( map_anon ) // MAP_ANON 220 ///////////////////////////////////////////////////////// MAP_ANON 221 else if ( map_anon ) 220 222 { 221 223 mapper_xp = XPTR_NULL; … … 230 232 231 233 } 232 else // MAP_REMOTE 234 /////////////////////////////////////////////////////// MAP_REMOTE 235 else 233 236 { 234 237 mapper_xp = XPTR_NULL; -
trunk/kernel/syscalls/sys_opendir.c
r610 r611 1 1 /* 2 * sys_opendir.c - open adirectory.2 * sys_opendir.c - Open a VFS directory. 3 3 * 4 4 * Author Alain Greiner (2016,2017,2018) … … 22 22 */ 23 23 24 #include <kernel_config.h> 24 25 #include <hal_kernel_types.h> 25 26 #include <hal_uspace.h> 26 27 #include <thread.h> 27 28 #include <process.h> 29 #include <remote_dir.h> 28 30 #include <printk.h> 29 31 #include <errno.h> 32 #include <vseg.h> 30 33 #include <vfs.h> 31 34 #include <syscalls.h> … … 36 39 DIR ** dirp ) 37 40 { 38 error_t error; 39 vseg_t * vseg; // for user space checking 40 xptr_t root_inode_xp; // extended pointer on path root inode 41 41 error_t error; 42 xptr_t root_inode_xp; // extended pointer on path root inode 43 xptr_t inode_xp; // extended pointer on directory inode 44 vfs_inode_t * inode_ptr; // local pointer on directory inode 45 cxy_t inode_cxy; // directory inode cluster 46 uint32_t inode_type; // to check directory inode type 47 xptr_t dir_xp; // extended pointer on remote_dir_t 48 remote_dir_t * dir_ptr; // local pointer on remote_dir_t 49 cxy_t dir_cxy; // remote_dir_t cluster identifier 50 vseg_t * vseg; // for user space checking 51 intptr_t ident; // dirent array pointer in user space 42 52 char kbuf[CONFIG_VFS_MAX_PATH_LENGTH]; 43 53 44 thread_t * this = CURRENT_THREAD;45 process_t * process = this->process;54 thread_t * this = CURRENT_THREAD; // client thread 55 process_t * process = this->process; // client process 46 56 47 57 #if (DEBUG_SYS_OPENDIR || CONFIG_INSTRUMENTATION_SYSCALLS) … … 85 95 #endif 86 96 87 // compute root inode for path 97 // compute root inode for pathname 88 98 if( kbuf[0] == '/' ) // absolute path 89 99 { … … 102 112 } 103 113 104 /* 105 // call the relevant VFS function ??? 106 error = vfs_opendir( root_inode_xp, 107 kbuf ); 114 // get extended pointer on directory inode 115 error = vfs_lookup( root_inode_xp, 116 kbuf, 117 0, 118 &inode_xp, 119 NULL ); 108 120 if( error ) 109 121 { 110 122 111 123 #if DEBUG_SYSCALLS_ERROR 112 printk("\n[ERROR] in %s / thread[%x,%x] : cannot opendirectory <%s>\n",113 __FUNCTION__ , process->pid , this->trdid , pathname);124 printk("\n[ERROR] in %s / thread[%x,%x] : cannot found directory <%s>\n", 125 __FUNCTION__ , process->pid , this->trdid , kbuf ); 114 126 #endif 115 127 this->errno = ENFILE; … … 117 129 } 118 130 119 // copy to user space ??? 120 */ 131 // check inode type 132 inode_ptr = GET_PTR( inode_xp ); 133 inode_cxy = GET_CXY( inode_xp ); 134 inode_type = hal_remote_l32( XPTR( inode_cxy , &inode_ptr->type ) ); 135 136 if( inode_type != INODE_TYPE_DIR ) 137 { 138 139 #if DEBUG_SYSCALLS_ERROR 140 printk("\n[ERROR] in %s / thread[%x,%x] : cannot found directory <%s>\n", 141 __FUNCTION__ , process->pid , this->trdid , kbuf ); 142 #endif 143 this->errno = ENFILE; 144 return -1; 145 } 146 147 // allocate, initialize, and register a new remote_dir_t structure 148 // in the calling process reference cluster 149 dir_xp = remote_dir_create( inode_xp ); 150 dir_ptr = GET_PTR( dir_xp ); 151 dir_cxy = GET_CXY( dir_xp ); 152 153 if( dir_xp == XPTR_NULL ) 154 { 155 156 #if DEBUG_SYSCALLS_ERROR 157 printk("\n[ERROR] in %s / thread[%x,%x] : cannot create remote_dir for <%s>\n", 158 __FUNCTION__ , process->pid , this->trdid , kbuf ); 159 #endif 160 this->errno = ENFILE; 161 return -1; 162 } 163 164 // get ident from remote_dir structure 165 ident = (intptr_t)hal_remote_lpt( XPTR( dir_cxy , &dir_ptr->ident ) ); 166 167 // set ident value in user buffer 168 hal_copy_to_uspace( dirp , &ident , sizeof(intptr_t) ); 121 169 122 170 hal_fence(); -
trunk/kernel/syscalls/sys_readdir.c
r473 r611 1 1 /* 2 * sys_readdir.c - Read one entry from an open directory.2 * sys_readdir.c - Copy one entry from an open VFS directory to an user buffer. 3 3 * 4 * Author Alain Greiner (2016,2017 )4 * Author Alain Greiner (2016,2017,2018) 5 5 * 6 6 * Copyright (c) UPMC Sorbonne Universites … … 30 30 #include <vfs.h> 31 31 #include <process.h> 32 #include <remote_dir.h> 32 33 #include <syscalls.h> 33 34 #include <shared_syscalls.h> … … 35 36 /////////////////////////////////////// 36 37 int sys_readdir( DIR * dirp, 37 struct dirent ** dentp)38 struct dirent ** buffer ) 38 39 { 39 printk("\n[ERROR] in %s : not implemented yet\n", __FUNCTION__, dirp, dentp ); 40 return -1; 40 error_t error; 41 vseg_t * vseg; // for user space checking of buffer 42 xptr_t dir_xp; // extended pointer on remote_dir_t structure 43 remote_dir_t * dir_ptr; // local pointer on remote_dir_t structure 44 cxy_t dir_cxy; // remote_dir_t stucture cluster identifier 45 struct dirent * direntp; // dirent pointer in user space 46 uint32_t entries; // total number of dirent entries 47 uint32_t current; // current dirent index 48 49 thread_t * this = CURRENT_THREAD; // client thread 50 process_t * process = this->process; // client process 51 52 #if (DEBUG_SYS_READDIR || CONFIG_INSTRUMENTATION_SYSCALLS) 53 uint64_t tm_start = hal_get_cycles(); 54 #endif 55 56 #if DEBUG_SYS_READDIR 57 if( DEBUG_SYS_READDIR < tm_start ) 58 printk("\n[%s] thread[%x,%x] enter / dirp %x / cycle %d\n", 59 __FUNCTION__, process->pid, this->trdid, dirp, (uint32_t)tm_start ); 60 #endif 61 62 // check buffer in user space 63 error = vmm_get_vseg( process , (intptr_t)buffer, &vseg ); 64 65 if( error ) 66 { 67 68 #if DEBUG_SYSCALLS_ERROR 69 printk("\n[ERROR] in %s / thread[%x,%x] : user buffer %x unmapped\n", 70 __FUNCTION__ , process->pid , this->trdid, buffer ); 71 vmm_display( process , false ); 72 #endif 73 this->errno = EINVAL; 74 return -1; 75 } 76 77 // get pointers on remote_dir_t structure from dirp 78 dir_xp = remote_dir_from_ident( (intptr_t)dirp ); 79 dir_ptr = GET_PTR( dir_xp ); 80 dir_cxy = GET_CXY( dir_xp ); 81 82 if( dir_xp == XPTR_NULL ) 83 { 84 85 #if DEBUG_SYSCALLS_ERROR 86 printk("\n[ERROR] in %s / thread[%x,%x] : dirp %x not registered\n", 87 __FUNCTION__ , process->pid , this->trdid, dirp ); 88 #endif 89 this->errno = EBADF; 90 return -1; 91 } 92 93 // get "current" and "entries_nr" values from remote_dir_t structure 94 current = hal_remote_l32( XPTR( dir_cxy , &dir_ptr->current ) ); 95 entries = hal_remote_l32( XPTR( dir_cxy , &dir_ptr->entries ) ); 96 97 // check "current" index 98 if( current >= entries ) 99 { 100 this->errno = 0; 101 return -1; 102 } 103 104 // compute dirent pointer in user space 105 direntp = (struct dirent *)dirp + current; 106 107 #if (DEBUG_SYS_READDIR & 1) 108 if( DEBUG_SYS_READDIR < tm_start ) 109 printk("\n[%s] entries = %d / current = %d / direntp = %x\n", 110 __FUNCTION__, entries, current, direntp ); 111 #endif 112 113 // copy dirent pointer to user buffer 114 hal_copy_to_uspace( buffer, &direntp , sizeof(void *) ); 115 116 // update current index in "remote_dir_t" structure 117 hal_remote_atomic_add( XPTR( dir_cxy , &dir_ptr->current ) , 1 ); 118 119 hal_fence(); 120 121 #if (DEBUG_SYS_READDIR || CONFIG_INSTRUMENTATION_SYSCALLS) 122 uint64_t tm_end = hal_get_cycles(); 123 #endif 124 125 #if DEBUG_SYS_READDIR 126 if( DEBUG_SYS_READDIR < tm_end ) 127 printk("\n[%s] thread[%x,%x] exit / cycle %d\n", 128 __FUNCTION__, process->pid, this->trdid, (uint32_t)tm_end ); 129 #endif 130 131 #if CONFIG_INSTRUMENTATION_SYSCALLS 132 hal_atomic_add( &syscalls_cumul_cost[SYS_READDIR] , tm_end - tm_start ); 133 hal_atomic_add( &syscalls_occurences[SYS_READDIR] , 1 ); 134 #endif 135 136 return 0; 137 41 138 } // end sys_readdir() -
trunk/kernel/syscalls/syscalls.h
r610 r611 328 328 /****************************************************************************************** 329 329 * [23] This function open a directory, that must exist in the file system, returning 330 * a DIR pointer on the dire ctory in user space.331 ****************************************************************************************** 332 * @ pathname : pathname (can be relative or absolute).330 * a DIR pointer on the dirent array in user space. 331 ****************************************************************************************** 332 * @ pathname : [in] pathname (can be relative or absolute). 333 333 * @ dirp : [out] buffer for pointer on user directory (DIR). 334 334 * @ return 0 if success / returns -1 if failure. … … 341 341 * next directory entry in the directory identified by the <dirp> argument. 342 342 ****************************************************************************************** 343 * @ dirp : user pointer identifying the searcheddirectory.344 * @ dentp : [out] buffer for pointer on user direntory entry (dirent).343 * @ dirp : [in] user pointer on dirent array identifying the open directory. 344 * @ buffer : [out] pointer on user buffer for a pointer on dirent in user space. 345 345 * @ return O if success / returns -1 if failure. 346 346 *****************************************************************************************/ 347 347 int sys_readdir( DIR * dirp, 348 struct dirent ** dentp);348 struct dirent ** buffer ); 349 349 350 350 /****************************************************************************************** … … 352 352 * all structures associated with the <dirp> pointer. 353 353 ****************************************************************************************** 354 * @ dirp : user pointer identifying thedirectory.354 * @ dirp : [in] user pointer on dirent array identifying the open directory. 355 355 * @ return 0 if success / returns -1 if failure. 356 356 *****************************************************************************************/ … … 575 575 * [43] This debug function displays on the kernel terminal TXT0 an user defined string, 576 576 * or the current state of a kernel structure, identified by the <type> argument. 577 * The <arg0> and <arg1> arguments depends on the structure type:577 * The <arg0>, <arg1>, and <arg2> arguments depends on the structure type: 578 578 * - DISPLAY_STRING : an user defined string 579 579 * - DISPLAY_VMM : VSL and GPT for a process identified by <pid>. … … 583 583 * - DISPLAY_VFS : all files registered in the VFS cache. 584 584 * - DISPLAY_CHDEV : all registered channel devices. 585 * - DISPLAY_DQDT : all DQDT nodes. 585 * - DISPLAY_DQDT : all DQDT nodes curren values. 586 * - DISPLAY_BUSYLOCKS : all busylocks taken by one thread. 587 * - DISPLAY_MAPPER : one page of a given mapper. 586 588 ****************************************************************************************** 587 589 * type : [in] type of display 588 590 * arg0 : [in] type dependant argument. 589 591 * arg1 : [in] type dependant argument. 592 * arg2 : [in] type dependant argument. 590 593 * @ return 0 if success / return -1 if illegal arguments 591 594 *****************************************************************************************/ 592 595 int sys_display( reg_t type, 593 596 reg_t arg0, 594 reg_t arg1 ); 597 reg_t arg1, 598 reg_t arg2 ); 595 599 596 600 /******************************************************************************************
Note: See TracChangeset
for help on using the changeset viewer.