Changeset 656 for trunk/kernel
- Timestamp:
- Dec 6, 2019, 12:07:51 PM (5 years ago)
- Location:
- trunk/kernel
- Files:
-
- 22 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/fs/fatfs.c
r647 r656 850 850 851 851 } // end fatfs_recursive_release() 852 853 854 ////////////////////////////////////////////////////////////////////////////////////////// 855 // This static function access the FAT (File Allocation Table), stored in the FAT mapper, 856 // and returns in <searched_cluster_id> the FATFS cluster_id for a given page of a given 857 // inode, identified by the <searched_page_id> argument, that is the page index in file 858 // (i.e. the page index in file mapper). The entry point in the FAT is defined by the 859 // <first_cluster_id> argument, that is the cluster_id of an already allocated cluster. 860 // It can be the cluster_id of the first page of the file (always registered in the 861 // fatfs_inode extension), or any page of the file whose <first_page_id> argument 862 // is smaller than the searched <first_page_id> argument. 863 // This function can be called by a thread running in any cluster, as it uses remote 864 // access primitives when the FAT mapper is remote. 865 // The FAT mapper being a WRITE-THROUGH cache, this function updates the FAT mapper 866 // from informations stored on IOC device in case of miss when scanning the FAT mapper. 867 // The searched inode mapper being a WRITE-BACK cache, this function allocates a new 868 // cluster_id when the searched page exist in the inode mapper, and there is no FATFS 869 // cluster allocated yet for this page. It updates the FAT, but it does NOT copy the 870 // mapper page content to the File System. 871 ////////////////////////////////////////////////////////////////////////////////////////// 872 // @ first_page_id : [in] index in file mapper for an existing page. 873 // @ first_cluster_id : [in] cluster_id for this existing page. 874 // @ searched_page_id : [in] index in file mapper for the searched page. 875 // @ searched_cluster_id : [out] cluster_id for the searched page. 876 // @ return 0 if success / return -1 if a FAT mapper miss cannot be solved, 877 // or if a missing cluster_id cannot be allocated. 878 ////////////////////////////////////////////////////////////////////////////////////////// 879 static error_t fatfs_get_cluster( uint32_t first_page_id, 880 uint32_t first_cluster_id, 881 uint32_t searched_page_id, 882 uint32_t * searched_cluster_id ) 883 { 884 uint32_t current_page_id; // index of page in file mapper 885 uint32_t current_cluster_id; // index of cluster in FATFS 886 xptr_t lock_xp; // extended pointer on FAT lock 887 888 assert( (searched_page_id > first_page_id) , 889 "searched_page_id must be larger than first_page_id\n"); 890 891 #if DEBUG_FATFS_GET_CLUSTER 892 uint32_t cycle = (uint32_t)hal_get_cycles(); 893 thread_t * this = CURRENT_THREAD; 894 if( DEBUG_FATFS_GET_CLUSTER < cycle ) 895 printk("\n[%s] thread[%x,%x] enter / first_cluster_id %x / searched_page_id %d / cycle %d\n", 896 __FUNCTION__, this->process->pid, this->trdid, first_cluster_id, searched_page_id, cycle ); 897 #endif 898 899 // get local pointer on VFS context (same in all clusters) 900 vfs_ctx_t * vfs_ctx = &fs_context[FS_TYPE_FATFS]; 901 902 // get local pointer on local FATFS context 903 fatfs_ctx_t * loc_fatfs_ctx = vfs_ctx->extend; 904 905 // get extended pointer and cluster on FAT mapper 906 xptr_t fat_mapper_xp = loc_fatfs_ctx->fat_mapper_xp; 907 cxy_t fat_cxy = GET_CXY( fat_mapper_xp ); 908 909 // get local pointer on FATFS context in FAT cluster 910 fatfs_ctx_t * fat_fatfs_ctx = hal_remote_lpt( XPTR( fat_cxy , &vfs_ctx->extend ) ); 911 912 // build extended pointer on FAT lock in FAT cluster 913 lock_xp = XPTR( fat_cxy , &fat_fatfs_ctx->lock ); 914 915 // take FAT lock in read mode 916 remote_rwlock_rd_acquire( lock_xp ); 917 918 // initialize loop variables 919 current_page_id = first_page_id; 920 current_cluster_id = first_cluster_id; 921 922 // scan FAT mapper (i.e. traverse FAT linked list) 923 // starting from first_page_id until searched_page_id 924 // each iteration in this loop can change both 925 // the FAT page index and the slot index in FAT 926 while( current_page_id < searched_page_id ) 927 { 928 // FAT mapper page and slot indexes (1024 slots per FAT page) 929 uint32_t fat_page_index = current_cluster_id >> 10; 930 uint32_t fat_slot_index = current_cluster_id & 0x3FF; 931 932 // get pointer on current page descriptor in FAT mapper 933 xptr_t current_page_xp = mapper_remote_get_page( fat_mapper_xp , fat_page_index ); 934 935 if( current_page_xp == XPTR_NULL ) 936 { 937 printk("\n[ERROR] in %s : cannot get page %d from FAT mapper\n", 938 __FUNCTION__ , fat_page_index ); 939 remote_rwlock_rd_release( lock_xp ); 940 return -1; 941 } 942 943 // get pointer on buffer containing the FAT mapper page 944 xptr_t base_xp = ppm_page2base( current_page_xp ); 945 uint32_t * buffer = (uint32_t *)GET_PTR( base_xp ); 946 947 // get next_cluster_id from FAT slot 948 uint32_t next_cluster_id = hal_remote_l32( XPTR( fat_cxy, &buffer[fat_slot_index] ) ); 949 950 // allocate a new FAT cluster when there is no cluster 951 // allocated on device for the current page 952 if( next_cluster_id >= END_OF_CHAIN_CLUSTER_MIN ) 953 { 954 // release the FAT lock in read mode, 955 remote_rwlock_rd_release( lock_xp ); 956 957 // allocate a new cluster_id (and update both FAT mapper and FAT on device). 958 error_t error = fatfs_cluster_alloc( &next_cluster_id ); 959 960 if( error ) 961 { 962 printk("\n[ERROR] in %s : cannot allocate cluster on FAT32 for page %d\n", 963 __FUNCTION__ , current_page_id ); 964 remote_rwlock_wr_release( lock_xp ); 965 return -1; 966 } 967 968 #if (DEBUG_FATFS_GET_CLUSTER & 1) 969 if( DEBUG_FATFS_GET_CLUSTER < cycle ) 970 printk("\n[%s] allocated a new cluster_id %d in FATFS\n", 971 __FUNCTION__, next_cluster_id ); 972 #endif 973 // take the FAT lock in read mode, 974 remote_rwlock_rd_acquire( lock_xp ); 975 } 976 977 #if (DEBUG_FATFS_GET_CLUSTER & 1) 978 if( DEBUG_FATFS_GET_CLUSTER < cycle ) 979 printk("\n[%s] traverse FAT / fat_page_index %d / fat_slot_index %d / next_cluster_id %x\n", 980 __FUNCTION__, fat_page_index, fat_slot_index , next_cluster_id ); 981 #endif 982 983 // update loop variables 984 current_cluster_id = next_cluster_id; 985 current_page_id++; 986 } 987 988 // release FAT lock 989 remote_rwlock_rd_release( lock_xp ); 990 991 #if DEBUG_FATFS_GET_CLUSTER 992 if( DEBUG_FATFS_GET_CLUSTER < cycle ) 993 printk("\n[%s] thread[%x,%x] exit / searched_cluster_id = %d\n", 994 __FUNCTION__, this->process->pid, this->trdid, current_cluster_id ); 995 #endif 996 997 *searched_cluster_id = current_cluster_id; 998 return 0; 999 1000 } // end fatfs_get_cluster() 1001 1002 1003 852 1004 853 1005 … … 904 1056 ////////////////////////////////////////// 905 1057 void fatfs_display_fat( uint32_t page_id, 906 uint32_t nentries ) 1058 uint32_t min_slot, 1059 uint32_t nb_slots ) 907 1060 { 908 1061 uint32_t line; 909 uint32_t maxline;910 1062 911 1063 // compute number of lines to display 912 maxline = nentries>> 3;913 if( nentries & 0x7 ) maxline++;1064 uint32_t min_line = min_slot >> 3; 1065 uint32_t max_line = (min_slot + nb_slots - 1) >> 3; 914 1066 915 1067 // get pointer on local FATFS context … … 917 1069 fatfs_ctx_t * loc_fatfs_ctx = (fatfs_ctx_t *)vfs_ctx->extend; 918 1070 919 // get extended pointer on FAT mapper 920 xptr_t fat_mapper_xp = loc_fatfs_ctx->fat_mapper_xp; 921 922 // get FAT cluster identifier 923 cxy_t fat_cxy = GET_CXY( fat_mapper_xp ); 1071 // get pointers on FAT mapper (in FAT cluster) 1072 xptr_t mapper_xp = loc_fatfs_ctx->fat_mapper_xp; 1073 cxy_t mapper_cxy = GET_CXY( mapper_xp ); 924 1074 925 1075 // get pointer on FATFS context in FAT cluster 926 fatfs_ctx_t * fat_fatfs_ctx = hal_remote_lpt( XPTR( fat_cxy , &vfs_ctx->extend ) );1076 fatfs_ctx_t * fat_fatfs_ctx = hal_remote_lpt( XPTR( mapper_cxy , &vfs_ctx->extend ) ); 927 1077 928 1078 // get current value of hint and free_clusters 929 uint32_t hint = hal_remote_l32( XPTR( fat_cxy , &fat_fatfs_ctx->free_cluster_hint ) );930 uint32_t free = hal_remote_l32( XPTR( fat_cxy , &fat_fatfs_ctx->free_clusters ) );931 932 // get extended pointer on requested page in FAT mapper933 xptr_t page_xp = mapper_remote_get_page( fat_mapper_xp , page_id );934 935 // get extended pointeron requested page base1079 uint32_t hint = hal_remote_l32( XPTR( mapper_cxy , &fat_fatfs_ctx->free_cluster_hint ) ); 1080 uint32_t free = hal_remote_l32( XPTR( mapper_cxy , &fat_fatfs_ctx->free_clusters ) ); 1081 1082 // get extended pointer on requested page descriptor in FAT mapper 1083 xptr_t page_xp = mapper_remote_get_page( mapper_xp , page_id ); 1084 1085 // get pointers on requested page base 936 1086 xptr_t base_xp = ppm_page2base( page_xp ); 937 1087 void * base = GET_PTR( base_xp ); 938 1088 939 1089 printk("\n***** FAT mapper / cxy %x / page_id %d / base %x / free_clusters %x / hint %x\n", 940 fat_cxy, page_id, base, free, hint );941 942 for( line = 0 ; line < maxline ; line++ )943 { 944 printk("% x: %X | %X | %X | %X | %X | %X | %X | %X\n", (line<<3),1090 mapper_cxy, page_id, base, free, hint ); 1091 1092 for( line = min_line ; line <= max_line ; line++ ) 1093 { 1094 printk("%d : %X | %X | %X | %X | %X | %X | %X | %X\n", (line<<3), 945 1095 hal_remote_l32( base_xp + ((line<<5) ) ), 946 1096 hal_remote_l32( base_xp + ((line<<5) + 4 ) ), … … 954 1104 955 1105 } // end fatfs_display_fat() 956 957 ///////////////////////////////////////////////////////958 error_t fatfs_get_cluster( uint32_t first_cluster_id,959 uint32_t searched_page_index,960 uint32_t * searched_cluster_id )961 {962 xptr_t current_page_xp; // pointer on current page descriptor963 uint32_t * buffer; // pointer on current page (array of uint32_t)964 uint32_t current_page_index; // index of current page in FAT965 uint32_t current_slot_index; // index of slot in current page966 uint32_t page_count_in_file; // index of page in file (index in linked list)967 uint32_t next_cluster_id; // content of current FAT slot968 xptr_t lock_xp; // extended pointer on FAT lock969 970 assert( (searched_page_index > 0) ,971 "no FAT access required for first page\n");972 973 #if DEBUG_FATFS_GET_CLUSTER974 uint32_t cycle = (uint32_t)hal_get_cycles();975 thread_t * this = CURRENT_THREAD;976 if( DEBUG_FATFS_GET_CLUSTER < cycle )977 printk("\n[%s] thread[%x,%x] enter / first_cluster_id %d / searched_index %d / cycle %d\n",978 __FUNCTION__, this->process->pid, this->trdid, first_cluster_id, searched_page_index, cycle );979 #endif980 981 // get local pointer on VFS context (same in all clusters)982 vfs_ctx_t * vfs_ctx = &fs_context[FS_TYPE_FATFS];983 984 // get local pointer on local FATFS context985 fatfs_ctx_t * loc_fatfs_ctx = vfs_ctx->extend;986 987 // get extended pointer and cluster on FAT mapper988 xptr_t fat_mapper_xp = loc_fatfs_ctx->fat_mapper_xp;989 cxy_t fat_cxy = GET_CXY( fat_mapper_xp );990 991 // get local pointer on FATFS context in FAT cluster992 fatfs_ctx_t * fat_fatfs_ctx = hal_remote_lpt( XPTR( fat_cxy , &vfs_ctx->extend ) );993 994 // build extended pointer on FAT lock in FAT cluster995 lock_xp = XPTR( fat_cxy , &fat_fatfs_ctx->lock );996 997 // take FAT lock in read mode998 remote_rwlock_rd_acquire( lock_xp );999 1000 // initialize loop variable (1024 slots per page)1001 current_page_index = first_cluster_id >> 10;1002 current_slot_index = first_cluster_id & 0x3FF;1003 page_count_in_file = 0;1004 next_cluster_id = 0xFFFFFFFF;1005 1006 // scan FAT mapper (i.e. traverse FAT linked list)1007 while( page_count_in_file < searched_page_index )1008 {1009 // get pointer on current page descriptor in FAT mapper1010 current_page_xp = mapper_remote_get_page( fat_mapper_xp , current_page_index );1011 1012 if( current_page_xp == XPTR_NULL )1013 {1014 printk("\n[ERROR] in %s : cannot get next page from FAT mapper\n", __FUNCTION__);1015 remote_rwlock_rd_release( lock_xp );1016 return -1;1017 }1018 1019 // get pointer on buffer for current page1020 xptr_t base_xp = ppm_page2base( current_page_xp );1021 buffer = (uint32_t *)GET_PTR( base_xp );1022 1023 // get FAT slot content1024 next_cluster_id = hal_remote_l32( XPTR( fat_cxy, &buffer[current_slot_index] ) );1025 1026 #if (DEBUG_FATFS_GET_CLUSTER & 1)1027 if( DEBUG_FATFS_GET_CLUSTER < cycle )1028 printk("\n[%s] traverse FAT / current_page_index = %d\n"1029 "current_slot_index = %d / next_cluster_id = %d\n",1030 __FUNCTION__, current_page_index, current_slot_index , next_cluster_id );1031 #endif1032 // update loop variables1033 current_page_index = next_cluster_id >> 10;1034 current_slot_index = next_cluster_id & 0x3FF;1035 page_count_in_file++;1036 }1037 1038 if( next_cluster_id == 0xFFFFFFFF )1039 {1040 printk("\n[ERROR] in %s : searched_cluster_id not found in FAT\n", __FUNCTION__ );1041 remote_rwlock_rd_release( lock_xp );1042 return -1;1043 }1044 1045 // release FAT lock1046 remote_rwlock_rd_release( lock_xp );1047 1048 #if DEBUG_FATFS_GET_CLUSTER1049 cycle = (uint32_t)hal_get_cycles();1050 if( DEBUG_FATFS_GET_CLUSTER < cycle )1051 printk("\n[%s] thread[%x,%x] exit / searched_cluster_id = %d / cycle %d\n",1052 __FUNCTION__, this->process->pid, this->trdid, next_cluster_id / cycle );1053 #endif1054 1055 *searched_cluster_id = next_cluster_id;1056 return 0;1057 1058 } // end fatfs_get_cluster()1059 1106 1060 1107 … … 1691 1738 // by the <mapper> argument, to find the directory entry identified by the <name> argument, 1692 1739 // and return a pointer on the directory entry, described as and array of 32 bytes, and the 1693 // in cex of this entry in the FAT32 mapper, seen as an array of 32 bytes entries.1740 // index of this entry in the FAT32 mapper, seen as an array of 32 bytes entries. 1694 1741 // It is called by the fatfs_new_dentry() and fatfs_update_dentry() functions. 1695 1742 // It must be called by a thread running in the cluster containing the mapper. … … 1701 1748 // @ return 0 if found / return 1 if not found / return -1 if mapper access error. 1702 1749 ////////////////////////////////////////////////////////////////////////////////////////////// 1703 error_t fatfs_scan_directory( mapper_t * mapper,1704 char * name,1705 uint8_t ** entry,1706 uint32_t * index )1750 static error_t fatfs_scan_directory( mapper_t * mapper, 1751 char * name, 1752 uint8_t ** entry, 1753 uint32_t * index ) 1707 1754 { 1708 1755 // Two embedded loops to scan the directory mapper: … … 1725 1772 #endif 1726 1773 1727 char cname[CONFIG_VFS_MAX_NAME_LENGTH]; // name extracted from eachdirectory entry1774 char cname[CONFIG_VFS_MAX_NAME_LENGTH]; // name extracted from directory entry 1728 1775 1729 1776 char lfn1[16]; // buffer for one partial cname … … 1761 1808 #if (DEBUG_FATFS_SCAN_DIRECTORY & 0x1) 1762 1809 if( DEBUG_FATFS_SCAN_DIRECTORY < cycle ) 1763 mapper_display_page( mapper_xp , page_ id, 256 );1810 mapper_display_page( mapper_xp , page_xp , 256 ); 1764 1811 #endif 1765 1812 // scan this page until end of directory, end of page, or name found … … 1883 1930 error_t error; 1884 1931 1885 char dir_name[CONFIG_VFS_MAX_NAME_LENGTH];1932 char parent_name[CONFIG_VFS_MAX_NAME_LENGTH]; 1886 1933 1887 1934 // check arguments … … 1900 1947 assert( (xlist_is_empty( root_xp ) == false ), "child inode must have one parent\n"); 1901 1948 1902 #if DEBUG_FATFS_ GET_DENTRY1949 #if DEBUG_FATFS_NEW_DENTRY 1903 1950 uint32_t cycle = (uint32_t)hal_get_cycles(); 1904 1951 thread_t * this = CURRENT_THREAD; 1905 vfs_inode_get_name( XPTR( local_cxy , parent_inode ) , dir_name );1906 if( DEBUG_FATFS_ GET_DENTRY < cycle )1952 vfs_inode_get_name( XPTR( local_cxy , parent_inode ) , parent_name ); 1953 if( DEBUG_FATFS_NEW_DENTRY < cycle ) 1907 1954 printk("\n[%s] thread[%x,%x] enter for child <%s> in parent <%s> / cycle %d\n", 1908 __FUNCTION__, this->process->pid, this->trdid, name , dir_name , cycle );1955 __FUNCTION__, this->process->pid, this->trdid, name , parent_name , cycle ); 1909 1956 #endif 1910 1957 … … 1916 1963 1917 1964 // return non fatal error if not found 1918 if( error ) return -1; 1965 if( error ) 1966 { 1967 vfs_inode_get_name( XPTR( local_cxy , parent_inode ) , parent_name ); 1968 printk("\n[ERROR] in %s : cannot find <%s> entry in <%s> directory mapper\n", 1969 __FUNCTION__, name , parent_name, name ); 1970 return -1; 1971 } 1972 1919 1973 1920 1974 // get relevant infos from FAT32 directory entry … … 1946 2000 if( found == false ) 1947 2001 { 1948 vfs_inode_get_name( XPTR( local_cxy , parent_inode ) , dir_name );2002 vfs_inode_get_name( XPTR( local_cxy , parent_inode ) , parent_name ); 1949 2003 printk("\n[ERROR] in %s : cannot find <%s> directory in list of parents for <%s>\n", 1950 __FUNCTION__, dir_name, name );2004 __FUNCTION__, parent_name, name ); 1951 2005 return -1; 1952 2006 } … … 1962 2016 dentry_ptr->extend = (void *)(intptr_t)index; 1963 2017 1964 #if DEBUG_FATFS_ GET_DENTRY2018 #if DEBUG_FATFS_NEW_DENTRY 1965 2019 cycle = (uint32_t)hal_get_cycles(); 1966 if( DEBUG_FATFS_GET_DENTRY < cycle ) 1967 printk("\n[%s] thread[%x,%x] exit / intialised inode & dentry for <%s> in <%s> / cycle %d\n", 1968 __FUNCTION__, this->process->pid, this->trdid, name, dir_name, cycle ); 2020 if( DEBUG_FATFS_NEW_DENTRY < cycle ) 2021 printk("\n[%s] thread[%x,%x] exit for <%s> in <%s> / cluster_id %x / size %d / cycle %d\n", 2022 __FUNCTION__, this->process->pid, this->trdid, name, parent_name, cluster, size, cycle ); 2023 #endif 2024 2025 2026 #if (DEBUG_FATFS_NEW_DENTRY & 1) 2027 if( DEBUG_FATFS_NEW_DENTRY < cycle ) 2028 { 2029 fatfs_display_fat( 0 , 0 , 64 ); 2030 fatfs_display_fat( cluster >> 10 , (cluster & 0x3FF) , 32 ); 2031 } 1969 2032 #endif 1970 2033 … … 1988 2051 assert( (inode != NULL) , "inode is NULL\n" ); 1989 2052 assert( (dentry != NULL) , "dentry is NULL\n" ); 1990 assert( (size != 0 ) , "size is 0\n" );1991 2053 1992 2054 #if DEBUG_FATFS_UPDATE_DENTRY … … 2013 2075 } 2014 2076 2015 // set size in FAT32 directory entry 2016 fatfs_set_record( DIR_FILE_SIZE , entry , size ); 2017 2018 // get local pointer on modified page base 2019 void * base = (void *)((intptr_t)entry & (~CONFIG_PPM_PAGE_MASK)); 2020 2021 // get extended pointer on modified page descriptor 2022 xptr_t page_xp = ppm_base2page( XPTR( local_cxy , base ) ); 2023 2024 // synchronously update the modified page on device 2025 error = fatfs_move_page( page_xp , IOC_SYNC_WRITE ); 2026 2027 if( error ) 2028 { 2029 vfs_inode_get_name( XPTR( local_cxy , inode ) , dir_name ); 2030 printk("\n[ERROR] in %s : cannot update parent directory <%s> on device\n", 2031 __FUNCTION__, dir_name ); 2032 return -1; 2077 // get current size value 2078 uint32_t current_size = fatfs_get_record( DIR_FILE_SIZE , entry ); 2079 2080 // update dentry in mapper & device only if required 2081 if( size != current_size ) 2082 { 2083 // set size field in FAT32 directory entry 2084 fatfs_set_record( DIR_FILE_SIZE , entry , size ); 2085 2086 // get pointer on modified page base 2087 void * base = (void *)((intptr_t)entry & (~CONFIG_PPM_PAGE_MASK)); 2088 2089 // get extended pointer on modified page descriptor 2090 xptr_t page_xp = ppm_base2page( XPTR( local_cxy , base ) ); 2091 2092 // synchronously update the modified page on device 2093 error = fatfs_move_page( page_xp , IOC_SYNC_WRITE ); 2094 2095 if( error ) 2096 { 2097 vfs_inode_get_name( XPTR( local_cxy , inode ) , dir_name ); 2098 printk("\n[ERROR] in %s : cannot update parent directory <%s> on device\n", 2099 __FUNCTION__, dir_name ); 2100 return -1; 2101 } 2033 2102 } 2034 2103 … … 2586 2655 return 0; 2587 2656 2588 } // end fat _cluster_alloc()2657 } // end fatfs_cluster_alloc() 2589 2658 2590 2659 ////////////////////////////////////////////// … … 2737 2806 #endif 2738 2807 2739 // get page cluster an local pointer2808 // get page cluster and local pointer 2740 2809 cxy_t page_cxy = GET_CXY( page_xp ); 2741 2810 page_t * page_ptr = GET_PTR( page_xp ); … … 2754 2823 inode_ptr = hal_remote_lpt( XPTR( page_cxy , &mapper_ptr->inode ) ); 2755 2824 2825 ////////////////////////////// FAT mapper ///////////////////////////////////////// 2826 if( inode_ptr == NULL ) 2827 { 2828 2756 2829 #if DEBUG_FATFS_MOVE_PAGE 2757 2830 if( DEBUG_FATFS_MOVE_PAGE < cycle ) 2758 printk("\n[%s] thread[%x,%x] enters : %s / cxy %x / mapper %x / inode %x / page %x\n", 2759 __FUNCTION__, this->process->pid, this->trdid, 2760 dev_ioc_cmd_str( cmd_type ), page_cxy, mapper_ptr, inode_ptr, GET_PTR(buffer_xp) ); 2761 #endif 2762 2763 ////////////////////////////// FAT mapper 2764 if( inode_ptr == NULL ) 2765 { 2831 printk("\n[%s] thread[%x,%x] enters for %s / page %d in FAT mapper / cycle %d\n", 2832 __FUNCTION__, this->process->pid, this->trdid, dev_ioc_cmd_str(cmd_type), page_id, cycle ); 2833 #endif 2766 2834 // get lba from FATFS context and page_id 2767 2835 uint32_t lba = fatfs_ctx->fat_begin_lba + (page_id << 3); … … 2778 2846 #if DEBUG_FATFS_MOVE_PAGE 2779 2847 if( DEBUG_FATFS_MOVE_PAGE < cycle ) 2780 { 2781 if ( (cmd_type == IOC_READ) || (cmd_type == IOC_SYNC_READ) ) 2782 printk("\n[%s] thread[%x,%x] load FAT mapper page %d from IOC / cycle %d\n", 2783 __FUNCTION__, this->process->pid, this->trdid, page_id, cycle ); 2784 else 2785 printk("\n[%s] thread[%x,%x] sync FAT mapper page %d to IOC / cycle %d\n", 2786 __FUNCTION__, this->process->pid, this->trdid, page_id, cycle ); 2787 } 2788 #endif 2789 2790 } 2791 ///////////////////////// inode mapper 2848 printk("\n[%s] thread[%x,%x] exit / page %d in FAT mapper\n", 2849 __FUNCTION__, this->process->pid, this->trdid, page_id, cycle ); 2850 #endif 2851 2852 } 2853 ///////////////////////// inode mapper //////////////////////////////////////////// 2792 2854 else 2793 2855 { … … 2795 2857 #if DEBUG_FATFS_MOVE_PAGE 2796 2858 vfs_inode_get_name( XPTR( page_cxy , inode_ptr ) , name ); 2797 #endif 2798 2799 uint32_t searched_cluster; 2800 uint32_t first_cluster; 2801 2802 // get first_cluster from inode extension 2803 void * extend = hal_remote_lpt( XPTR( page_cxy , &inode_ptr->extend ) ); 2804 first_cluster = (uint32_t)(intptr_t)extend; 2805 2806 // compute searched_cluster 2859 if( DEBUG_FATFS_MOVE_PAGE < cycle ) 2860 printk("\n[%s] thread[%x,%x] enters for %s / page %d in <%s> mapper/ cycle %d\n", 2861 __FUNCTION__, this->process->pid, this->trdid, 2862 dev_ioc_cmd_str( cmd_type ), page_id, name, cycle ); 2863 #endif 2864 2865 uint32_t searched_cluster_id; 2866 uint32_t first_cluster_id; 2867 2868 // get first_cluster_id from inode extension 2869 void * extend = hal_remote_lpt( XPTR( page_cxy , &inode_ptr->extend ) ); 2870 first_cluster_id = (uint32_t)(intptr_t)extend; 2871 2872 // compute searched_cluster_id 2807 2873 if( page_id == 0 ) // no need to access FAT mapper 2808 2874 { 2809 2875 // searched cluster is first cluster 2810 searched_cluster = first_cluster;2876 searched_cluster_id = first_cluster_id; 2811 2877 } 2812 2878 else // FAT mapper access required 2813 2879 { 2814 // access FAT mapper to get searched cluster 2815 error = fatfs_get_cluster( first_cluster, 2880 // scan FAT mapper to get searched_cluster_id 2881 error = fatfs_get_cluster( 0, // first page in mapper 2882 first_cluster_id, 2816 2883 page_id, 2817 &searched_cluster );2884 &searched_cluster_id ); 2818 2885 if( error ) 2819 2886 { 2820 printk("\n[ERROR] in %s : cannot access FAT mapper\n", __FUNCTION__ );2887 printk("\n[ERROR] in %s : cannot get cluster_id\n", __FUNCTION__ ); 2821 2888 return -1; 2822 2889 } … … 2824 2891 2825 2892 // get lba for searched_cluster 2826 uint32_t lba = fatfs_lba_from_cluster( fatfs_ctx , searched_cluster );2827 2828 // access IOC device 2893 uint32_t lba = fatfs_lba_from_cluster( fatfs_ctx , searched_cluster_id ); 2894 2895 // access IOC device to move 8 blocks 2829 2896 error = dev_ioc_move_data( cmd_type , buffer_xp , lba , 8 ); 2830 2897 … … 2837 2904 #if DEBUG_FATFS_MOVE_PAGE 2838 2905 if( DEBUG_FATFS_MOVE_PAGE < cycle ) 2839 { 2840 if ( (cmd_type == IOC_READ) || (cmd_type == IOC_SYNC_READ) ) 2841 printk("\n[%s] thread[%x,%x] load page %d of <%s> / cluster_id %x / cycle %d\n", 2842 __FUNCTION__, this->process->pid, this->trdid, page_id, name, searched_cluster, cycle ); 2843 else 2844 printk("\n[%s] thread[%x,%x] sync page %d of <%s> / cluster_id %x / cycle %d\n", 2845 __FUNCTION__, this->process->pid, this->trdid, page_id, name, searched_cluster, cycle ); 2846 } 2906 vfs_inode_get_name( XPTR( page_cxy, inode_ptr ) , name ); 2907 printk("\n[%s] thread[%x,%x] exit / page %d in <%s> mapper / cluster_id %x\n", 2908 __FUNCTION__, this->process->pid, this->trdid, page_id, name, searched_cluster_id ); 2847 2909 #endif 2848 2910 -
trunk/kernel/fs/fatfs.h
r638 r656 32 32 33 33 34 /************************************************************************************** 34 /****************************************************************************************** 35 35 * The FATFS File System implements a FAT32 read/write file system. 36 36 * … … 43 43 * on the FAT mapper. 44 44 * 2) The vfs_inode_t "extend" contains, for each inode, 45 * the first FAT cluster index(after cast to intptr).45 * the first FAT32 cluster_id (after cast to intptr). 46 46 * 3) The vfs_dentry_t "extend" field contains, for each dentry, the entry index 47 * in the FATFS directory (32 bytes per FATFS entry). 48 *************************************************************************************/ 47 * in the FATFS directory (32 bytes per FATFS directory entry). 48 * 49 * In the FAT32 File System, the File Allocation Table is is actually an array 50 * of uint32_t slots. Each slot in this array contains the index (called cluster_id) 51 * of another slot in this array, to form one linked list for each file stored on 52 * device in the FAT32 File System. This index in the FAT array is also the index of 53 * the FATFS cluster on the device. One FATFS cluster is supposed to contain one PPM page. 54 * For a given file, the entry point in the FAT is the cluster_id of the FATFS cluster 55 * containing the first page of the file, but it can be any cluster_id already allocated 56 * to the file. 57 *****************************************************************************************/ 49 58 50 59 /////////////////////////////////////////////////////////////////////////////////////////// … … 213 222 214 223 /***************************************************************************************** 215 * This function access the FAT (File Allocation Table), stored in the FAT mapper, and 216 * returns in <searched_cluster> the FATFS cluster index for a given page of a given 217 * inode identified by the <first_cluster> and <page_id> arguments. 218 * It can be called by a thread running in any cluster, as it uses remote access 219 * primitives when the FAT mapper is remote. 220 * The FAT is actually an array of uint32_t slots. Each slot in this array contains the 221 * index of another slot in this array, to form one linked list for each file stored on 222 * device in the FATFS file system. This index in the FAT array is also the index of the 223 * FATFS cluster on the device. One FATFS cluster is supposed to contain one PPM page. 224 * For a given file, the entry point in the FAT is simply the index of the FATFS cluster 225 * containing the first page of the file. The FAT mapper being a cache, this function 226 * updates the FAT mapper from informations stored on IOC device in case of miss. 227 ***************************************************************************************** 228 * @ first_cluster : [in] index of first FATFS cluster allocated to the file. 229 * @ page_id : [in] index of searched page in file. 230 * @ searched_cluster : [out] found FATFS cluster index. 231 * @ return 0 if success / return -1 if a FAT mapper miss cannot be solved. 232 ****************************************************************************************/ 233 error_t fatfs_get_cluster( uint32_t first_cluster, 234 uint32_t page_id, 235 uint32_t * searched_cluster ); 236 237 /***************************************************************************************** 238 * This function display the content of the FATFS context copy in cluster identified 239 * by the <cxy> argument. 224 * This debug function display the content of the FATFS context copy in cluster 225 * identified by the <cxy> argument. 240 226 * This function can be called by a thread running in any cluster. 241 227 ***************************************************************************************** … … 245 231 246 232 /***************************************************************************************** 247 * This function access the FAT mapper to display one page of the File Allocation Table. 248 * It loads the requested page fom IOC device to FAT mapper if required. 233 * This debug function access the FAT mapper to display the current FAT state, 234 * as defined by the <page_id>, <min_slot>, and <nb_slots> arguments. 235 * It loads the missing pages from IOC to mapper if required. 249 236 * This function can be called by a thread running in any cluster. 250 237 ***************************************************************************************** 251 * @ page_id : page index in FAT mapper (one page is 4 Kbytes). 252 * @ nb_entries : number of entries (one entry is 4 bytes). 238 * @ page_id : page index in FAT mapper (one page is 4 Kbytes = 1024 slots). 239 * @ min_slot : first slot in page 240 * @ nb_slots : number of slots (one slot is 4 bytes). 253 241 ****************************************************************************************/ 254 242 void fatfs_display_fat( uint32_t page_id, 255 uint32_t nb_entries ); 243 uint32_t min_slot, 244 uint32_t nb_slots ); 256 245 257 246 … … 330 319 ***************************************************************************************** 331 320 * It scan a parent directory mapper, identified by the <parent_inode> argument to find 332 * a directory entry identified by the <name> argument. In case of success, it 333 * initializ es the inode/dentry couple, identified by the <child_inode_xp> argument334 * in the Inode Tree. The child inode descriptor, and the associated dentry descriptor335 * must have beenpreviously allocated by the caller.321 * a directory entry identified by the <name> argument. In case of success, it completes 322 * initialization the inode/dentry couple, identified by the <child_inode_xp> argument. 323 * The child inode descriptor, and the associated dentry descriptor must have been 324 * previously allocated by the caller. 336 325 * - It set the "type", "size", and "extend" fields in the child inode descriptor. 337 326 * - It set the " extend" field in the dentry descriptor. … … 421 410 * TODO : the current implementation check ALL pages in the FAT region, even if most 422 411 * pages are empty, and not copied in mapper. It is sub-optimal. 423 * - A firstsolution is to maintain in the FAT context two "dirty_min" and "dirty_max"424 * 412 * A solution is to maintain in the FAT context two "dirty_min" and "dirty_max" 413 * variables defining the smallest/largest dirty page index in FAT mapper... 425 414 ***************************************************************************************** 426 415 * @ return 0 if success / return -1 if failure during IOC device access. … … 448 437 * in <searched_cluster> the FATFS cluster index of a free cluster. 449 438 * It can be called by a thread running in any cluster, as it uses remote access 450 * primitives when the FAT mapper is remote. It takes the queuelock stored in the FATFS439 * primitives when the FAT mapper is remote. It takes the rwlock stored in the FATFS 451 440 * context located in the same cluster as the FAT mapper itself, to get exclusive 452 441 * access to the FAT. It uses and updates the <free_cluster_hint> and <free_clusters> … … 457 446 * - it returns the allocated cluster index. 458 447 ***************************************************************************************** 459 * @ searched_cluster : [out] found FATFS cluster index.448 * @ searched_cluster_id : [out] allocated FATFS cluster index. 460 449 * @ return 0 if success / return -1 if no more free clusters on IOC device. 461 450 ****************************************************************************************/ 462 error_t fatfs_cluster_alloc( uint32_t * searched_cluster );451 error_t fatfs_cluster_alloc( uint32_t * searched_cluster_id ); 463 452 464 453 /***************************************************************************************** 465 454 * This function implements the generic vfs_fs_release_inode() function for the FATFS. 466 ***************************************************************************************** 455 ***************************************************************************************** 456 * This function is used to remove a given file or directory from FATFS the file system. 467 457 * It releases all clusters allocated to a file/directory identified by the <inode_xp> 468 458 * argument. All released clusters are marked FREE_CLUSTER in the FAT mapper. … … 470 460 * the clusters in reverse order of the linked list (from last to first). 471 461 * When the FAT mapper has been updated, it calls the fatfs_sync_fat() function to 472 * synchronously update all dirtypages in the FAT mapper to the IOC device.462 * synchronously update all modified pages in the FAT mapper to the IOC device. 473 463 * Finally the FS-INFO sector on the IOC device is updated. 474 464 ***************************************************************************************** … … 485 475 * The pointer on the mapper and the page index in file are found in the page descriptor. 486 476 * It is used for both a regular file/directory mapper, and the FAT mapper. 487 * - For the FAT mapper, it updates the FAT region on IOC device. 488 * - For a regular file, it access the FAT mapper to get the cluster index on IOC device. 477 * - For the FAT mapper, it read/write the FAT region on IOC device. 478 * - For a regular file, it scan the FAT mapper to get the cluster_id on IOC device, 479 * and read/write this cluster. 489 480 * It can be called by any thread running in any cluster. 490 481 * 491 482 * WARNING : For the FAT mapper, the inode field in the mapper MUST be NULL, as this 492 * is used to indicate that the corresponding mapper is the FAT mapper. 483 * is used to indicate that the corresponding mapper is the FAT mapper. 484 * 485 * TODO : In this first implementation, the entry point in the FAT to get the cluster_id 486 * is always the cluster_id of the first page, registered in the inode extension. 487 * This can introduce a quadratic cost when trying of acessing all pages of a 488 * big file. An optimisation would be to introduce in the inode extension two 489 * new fields <other_page_id> & <other_cluster_id>, defining a second entry point 490 * in the FAT. 493 491 ***************************************************************************************** 494 492 * @ page_xp : extended pointer on page descriptor. -
trunk/kernel/fs/vfs.c
r651 r656 235 235 236 236 #if DEBUG_VFS_INODE_CREATE 237 char name[CONFIG_VFS_MAX_NAME_LENGTH];238 237 uint32_t cycle = (uint32_t)hal_get_cycles(); 239 238 thread_t * this = CURRENT_THREAD; 240 vfs_inode_get_name( *inode_xp , name );241 239 if( DEBUG_VFS_INODE_CREATE < cycle ) 242 printk("\n[%s] thread[%x,%x] created <%s> / inode [%x,%x]/ cycle %d\n",243 __FUNCTION__, this->process->pid, this->trdid, name,local_cxy, inode, cycle );240 printk("\n[%s] thread[%x,%x] created inode (%x,%x) / cycle %d\n", 241 __FUNCTION__, this->process->pid, this->trdid, local_cxy, inode, cycle ); 244 242 #endif 245 243 … … 513 511 uint32_t cycle = (uint32_t)hal_get_cycles(); 514 512 if( DEBUG_VFS_DENTRY_CREATE < cycle ) 515 printk("\n[%s] thread[%x,%x] created <%s> / dentry [%x,%x]/ cycle %d\n",513 printk("\n[%s] thread[%x,%x] created dentry <%s> : (%x,%x) / cycle %d\n", 516 514 __FUNCTION__, this->process->pid, this->trdid, name, local_cxy, dentry, cycle ); 517 515 #endif … … 777 775 } // end vfs_open() 778 776 779 ////////////////////////////////////// 780 int vfs_user_move( bool_t to_buffer, 781 xptr_t file_xp, 782 void * buffer, 783 uint32_t size ) 784 { 785 cxy_t file_cxy; // remote file descriptor cluster 786 vfs_file_t * file_ptr; // remote file descriptor local pointer 787 vfs_inode_type_t inode_type; 788 uint32_t file_offset; // current offset in file 789 mapper_t * mapper; 777 /////////////////////////////////////////// 778 uint32_t vfs_user_move( bool_t to_buffer, 779 xptr_t file_xp, 780 void * buffer, 781 uint32_t count ) 782 { 783 cxy_t file_cxy; // remote file descriptor cluster 784 vfs_file_t * file_ptr; // remote file descriptor local pointer 785 mapper_t * mapper; // local pointer on file mapper 786 vfs_inode_t * inode; // local pointer on file inode 787 vfs_inode_type_t type; // inode type 788 uint32_t offset; // current offset in file 789 uint32_t size; // current file size 790 uint32_t nbytes; // number of bytes actually transfered 790 791 error_t error; 791 792 … … 797 798 file_ptr = GET_PTR( file_xp ); 798 799 799 // get inode type from remote file descriptor 800 inode_type = hal_remote_l32( XPTR( file_cxy , &file_ptr->type ) ); 800 // get various infos from remote file descriptor 801 type = hal_remote_l32( XPTR( file_cxy , &file_ptr->type ) ); 802 offset = hal_remote_l32( XPTR( file_cxy , &file_ptr->offset ) ); 803 mapper = hal_remote_lpt( XPTR( file_cxy , &file_ptr->mapper ) ); 804 inode = hal_remote_lpt( XPTR( file_cxy , &file_ptr->inode ) ); 805 size = hal_remote_l32( XPTR( file_cxy , &inode->size ) ); 801 806 802 807 // check inode type 803 assert( (inode_type == INODE_TYPE_FILE), "bad inode type" ); 804 805 // get mapper pointer and file offset from file descriptor 806 file_offset = hal_remote_l32( XPTR( file_cxy , &file_ptr->offset ) ); 807 mapper = hal_remote_lpt( XPTR( file_cxy , &file_ptr->mapper ) ); 808 assert( (type == INODE_TYPE_FILE), "bad inode type" ); 808 809 809 810 #if DEBUG_VFS_USER_MOVE … … 811 812 uint32_t cycle = (uint32_t)hal_get_cycles(); 812 813 thread_t * this = CURRENT_THREAD; 813 vfs_inode_t * inode = hal_remote_lpt( XPTR( file_cxy , &file_ptr->inode ) );814 814 vfs_inode_get_name( XPTR( file_cxy , inode ) , name ); 815 815 if( cycle > DEBUG_VFS_USER_MOVE ) … … 817 817 if( to_buffer ) 818 818 printk("\n[%s] thread[%x,%x] enter / %d bytes / map(%s) -> buf(%x) / offset %d / cycle %d\n", 819 __FUNCTION__ , this->process->pid, this->trdid, size, name, buffer, file_offset, cycle );819 __FUNCTION__ , this->process->pid, this->trdid, count, name, buffer, offset, cycle ); 820 820 else 821 821 printk("\n[%s] thread[%x,%x] enter / %d bytes / buf(%x) -> map(%s) / offset %d / cycle %d\n", 822 __FUNCTION__ , this->process->pid, this->trdid, size, buffer, name, file_offset, cycle );822 __FUNCTION__ , this->process->pid, this->trdid, count, buffer, name, offset, cycle ); 823 823 } 824 824 #endif 825 825 826 // move data between mapper and buffer 827 error = mapper_move_user( XPTR( file_cxy , mapper ), 828 to_buffer, 829 file_offset, 830 buffer, 831 size ); 826 if( to_buffer ) // => compute the number of bytes to move and make the move 827 { 828 // compute number of bytes to move 829 if ( size <= offset ) nbytes = 0; 830 else if( size < offset + count ) nbytes = size - offset; 831 else nbytes = count; 832 833 // move data from mapper to buffer when required 834 if( nbytes > 0 ) 835 { 836 error = mapper_move_user( XPTR( file_cxy , mapper ), 837 to_buffer, 838 offset, 839 buffer, 840 nbytes ); 841 } 842 else 843 { 844 error = 0; 845 } 846 } 847 else // to mapper => make the move and update the file size if required 848 { 849 nbytes = count; 850 851 // move data from buffer to mapper 852 error = mapper_move_user( XPTR( file_cxy , mapper ), 853 to_buffer, 854 offset, 855 buffer, 856 count ); 857 858 // update file size in inode if required 859 if( offset + count > size ) 860 { 861 vfs_inode_update_size( XPTR( file_cxy , inode ) , offset + count ); 862 } 863 } 864 832 865 if( error ) 833 866 { … … 837 870 838 871 // update file offset in file descriptor 839 hal_remote_atomic_add( XPTR( file_cxy , &file_ptr->offset ) , size);872 hal_remote_atomic_add( XPTR( file_cxy , &file_ptr->offset ) , nbytes ); 840 873 841 874 #if DEBUG_VFS_USER_MOVE … … 844 877 { 845 878 if( to_buffer ) 846 printk("\n[%s] thread[%x,%x] exit / cycle %d\n",847 __FUNCTION__ , this->process->pid, cycle);879 printk("\n[%s] thread[%x,%x] exit / %d bytes moved from mapper to buffer\n", 880 __FUNCTION__ , this->process->pid, nbytes ); 848 881 else 849 printk("\n[%s] thread[%x,%x] exit / cycle %d\n",850 __FUNCTION__ , this->process->pid, cycle);882 printk("\n[%s] thread[%x,%x] exit / %d bytes moved from buffer to mapper / size %d\n", 883 __FUNCTION__ , this->process->pid, nbytes, hal_remote_l32(XPTR(file_cxy,&inode->size)) ); 851 884 } 852 885 #endif 853 886 854 return size;887 return nbytes; 855 888 856 889 } // end vfs_user_move() … … 2483 2516 #endif 2484 2517 2518 #if ( DEBUG_VFS_LOOKUP & 1 ) 2519 if( DEBUG_VFS_LOOKUP < cycle ) 2520 vfs_display( root_xp ); 2521 #endif 2522 2485 2523 // compute lookup flags 2486 2524 create = (lookup_mode & VFS_LOOKUP_CREATE) == VFS_LOOKUP_CREATE; … … 2527 2565 #endif 2528 2566 2529 // search the child dentry matching name in parent inode 2567 // search the child dentry matching name in parent inode XHTAB 2530 2568 found = vfs_get_child( parent_xp, 2531 2569 name, … … 2593 2631 #if (DEBUG_VFS_LOOKUP & 1) 2594 2632 if( DEBUG_VFS_LOOKUP < cycle ) 2595 printk("\n[%s] thread[%x,%x] created missing inode for<%s> in cluster %x\n",2633 printk("\n[%s] thread[%x,%x] created missing inode <%s> in cluster %x\n", 2596 2634 __FUNCTION__, process->pid, this->trdid, name, child_cxy ); 2597 2635 #endif … … 2771 2809 { 2772 2810 error_t error; 2773 uint32_t cluster ;2811 uint32_t cluster_id; 2774 2812 uint32_t child_type; 2775 2813 uint32_t child_size; … … 2798 2836 vfs_inode_t * child_ptr = GET_PTR( child_xp ); 2799 2837 2800 // 1. allocate one free cluster in file system to child inode,2838 // 1. allocate one free cluster_id in file system to child inode, 2801 2839 // and update the File Allocation Table in both the FAT mapper and IOC device. 2802 2840 // It depends on the child inode FS type. … … 2804 2842 2805 2843 error = vfs_fs_cluster_alloc( ctx->type, 2806 &cluster );2844 &cluster_id ); 2807 2845 if ( error ) 2808 2846 { 2809 printk("\n[ERROR] in %s : cannot find a free VFS cluster \n",2847 printk("\n[ERROR] in %s : cannot find a free VFS cluster_id\n", 2810 2848 __FUNCTION__ ); 2811 2849 return -1; … … 2814 2852 #if( DEBUG_VFS_NEW_DENTRY_INIT & 1) 2815 2853 if( DEBUG_VFS_NEW_DENTRY_INIT < cycle ) 2816 printk("\n[%s] thread[%x,%x] allocated FS cluster %x to <%s>\n",2817 __FUNCTION__ , this->process->pid, this->trdid, cluster , child_name );2854 printk("\n[%s] thread[%x,%x] allocated FS cluster_id %x to <%s>\n", 2855 __FUNCTION__ , this->process->pid, this->trdid, cluster_id, child_name ); 2818 2856 #endif 2819 2857 2820 2858 // 2. update the child inode descriptor size and extend 2821 2859 child_type = hal_remote_l32( XPTR( child_cxy , &child_ptr->type ) ); 2822 child_size = (child_type == INODE_TYPE_DIR) ? 4096 :0;2860 child_size = 0; 2823 2861 2824 2862 hal_remote_s32( XPTR( child_cxy , &child_ptr->size ) , child_size ); 2825 hal_remote_spt( XPTR( child_cxy , &child_ptr->extend ) , (void*)(intptr_t)cluster );2863 hal_remote_spt( XPTR( child_cxy , &child_ptr->extend ) , (void*)(intptr_t)cluster_id ); 2826 2864 2827 2865 // 3. update the parent inode mapper, and … … 3285 3323 #if(DEBUG_VFS_ADD_CHILD & 1) 3286 3324 if( DEBUG_VFS_ADD_CHILD < cycle ) 3287 printk("\n[%s] thread[%x,%x] / dentry <%s> created(%x,%x)\n",3325 printk("\n[%s] thread[%x,%x] created dentry <%s> : (%x,%x)\n", 3288 3326 __FUNCTION__, this->process->pid, this->trdid, name, parent_cxy, new_dentry_ptr ); 3289 3327 #endif … … 3332 3370 #if(DEBUG_VFS_ADD_CHILD & 1) 3333 3371 if( DEBUG_VFS_ADD_CHILD < cycle ) 3334 printk("\n[%s] thread[%x,%x] / inode <%s> created(%x,%x)\n",3372 printk("\n[%s] thread[%x,%x] created inode <%s> : (%x,%x)\n", 3335 3373 __FUNCTION__ , this->process->pid, this->trdid, name , child_cxy, new_inode_ptr ); 3336 3374 #endif … … 3345 3383 #if(DEBUG_VFS_ADD_CHILD & 1) 3346 3384 if( DEBUG_VFS_ADD_CHILD < cycle ) 3347 printk("\n[%s] thread[%x,%x] link dentry(%x,%x) to child inode(%x,%x)\n",3385 printk("\n[%s] thread[%x,%x] linked dentry(%x,%x) to child inode(%x,%x)\n", 3348 3386 __FUNCTION__, this->process->pid, this->trdid, 3349 3387 parent_cxy, new_dentry_ptr, child_cxy, new_inode_ptr ); … … 3357 3395 #if(DEBUG_VFS_ADD_CHILD & 1) 3358 3396 if( DEBUG_VFS_ADD_CHILD < cycle ) 3359 printk("\n[%s] thread[%x,%x] link dentry(%x,%x) to parent inode(%x,%x)\n",3397 printk("\n[%s] thread[%x,%x] linked dentry(%x,%x) to parent inode(%x,%x)\n", 3360 3398 __FUNCTION__, this->process->pid, this->trdid, 3361 3399 parent_cxy, new_dentry_ptr, parent_cxy, parent_inode_ptr ); … … 3814 3852 return error; 3815 3853 3816 } // end vfs_fs_ alloc_cluster()3854 } // end vfs_fs_cluster_alloc() 3817 3855 3818 3856 //////////////////////////////////////////////// -
trunk/kernel/fs/vfs.h
r635 r656 602 602 * and synchronously update the IOC device). 603 603 * 2. It set the "size", and "extend" fields in child inode descriptor. 604 * The size is 4096 for a directory, the size is 0 for a file. 604 605 * 3. It updates the parent directory mapper to introduce the new child, 605 606 * and synchronously update the IOC device. … … 660 661 * account the offset in <file_xp>. The transfer direction is defined by <to_buffer>. 661 662 * It is called by the sys_read() and sys_write() functions. 663 * - for a read, it checks the actual file size (registered in the inode descriptor), 664 * against the (offset + count), and moves only the significant bytes. 665 * - for a write, it updates the the file size in inode descriptor if required. 666 * In case of write to the mapper, the "inode.size" field is updated as required. 662 667 ****************************************************************************************** 663 668 * @ to_buffer : mapper -> buffer if true / buffer -> mapper if false. 664 669 * @ file_xp : extended pointer on the remote file descriptor. 665 670 * @ buffer : user space pointer on buffer (can be physically distributed). 666 * @ size: requested number of bytes from offset.671 * @ count : requested number of bytes from offset. 667 672 * @ returns number of bytes actually moved if success / -1 if error. 668 673 *****************************************************************************************/ 669 int vfs_user_move( bool_t to_buffer,670 xptr_t file_xp,671 void * buffer,672 uint32_t size);674 uint32_t vfs_user_move( bool_t to_buffer, 675 xptr_t file_xp, 676 void * buffer, 677 uint32_t count ); 673 678 674 679 /****************************************************************************************** … … 745 750 * to the file, and removes these pages from the dirty list, using an RPC if required. 746 751 * 2) It updates the file size in all parent directory mapper(s), and update the modified 747 * pages on the block device, using RPCs if required .752 * pages on the block device, using RPCs if required, only if the size is modified. 748 753 * 3) All entries in the fd_array copies are directly reset by the calling thread, 749 754 * using remote accesses. … … 895 900 * argument to/from the IOC device from/to the mapper, as defined by the <cmd_type>. 896 901 * Depending on the file system type, it calls the proper, FS specific function. 897 * It is used in case of MISS on the mapper , or when a dirty page in the mapper must898 * be updated in the File System.899 * The mapper pointer isobtained from the page descriptor.902 * It is used in case of MISS on the mapper (read), or when a dirty page in the mapper 903 * must be updated in the File System (write). 904 * The mapper pointer, and the page index in file are obtained from the page descriptor. 900 905 * It can be executed by any thread running in any cluster. 901 906 * This function does NOT take any lock. -
trunk/kernel/kernel_config.h
r651 r656 84 84 #define DEBUG_FATFS_RELEASE_INODE 0 85 85 #define DEBUG_FATFS_REMOVE_DENTRY 0 86 #define DEBUG_FATFS_SCAN_DIRECTORY 0 86 87 #define DEBUG_FATFS_SYNC_FAT 0 87 88 #define DEBUG_FATFS_SYNC_FSINFO 0 … … 89 90 #define DEBUG_FATFS_UPDATE_DENTRY 0 90 91 #define DEBUG_FATFS_UPDATE_IOC 0 92 93 #define DEBUG_GRDXT_INSERT 0 91 94 92 95 #define DEBUG_HAL_CONTEXT_FORK 0 … … 119 122 #define DEBUG_MAPPER_GET_PAGE 0 120 123 #define DEBUG_MAPPER_HANDLE_MISS 0 124 #define DEBUG_MAPPER_MOVE_KERNEL 0 121 125 #define DEBUG_MAPPER_MOVE_USER 0 122 #define DEBUG_MAPPER_MOVE_KERNEL 0123 126 #define DEBUG_MAPPER_SYNC 0 124 127 … … 243 246 #define DEBUG_VFS_FILE_CREATE 0 244 247 #define DEBUG_VFS_GET_PATH 0 245 #define DEBUG_VFS_INODE_CREATE 0 248 #define DEBUG_VFS_INODE_CREATE 0 246 249 #define DEBUG_VFS_INODE_LOAD_ALL 0 247 250 #define DEBUG_VFS_KERNEL_MOVE 0 -
trunk/kernel/libk/grdxt.c
r635 r656 315 315 else 316 316 { 317 *found_key = (ix1 << (w2+w3)) | (ix2 << w 1) | ix3;317 *found_key = (ix1 << (w2+w3)) | (ix2 << w3) | ix3; 318 318 return ptr3[ix3]; 319 319 } … … 343 343 grdxt_t * rt_ptr = GET_PTR( rt_xp ); 344 344 345 #if DEBUG_GRDXT_INSERT 346 uint32_t cycle = (uint32_t)hal_get_cycles(); 347 if(DEBUG_GRDXT_INSERT < cycle) 348 printk("\n[%s] enter / rt_xp (%x,%x) / key %x / value %x\n", 349 __FUNCTION__, rt_cxy, rt_ptr, key, (intptr_t)value ); 350 #endif 351 345 352 // get widths 346 353 uint32_t w1 = hal_remote_l32( XPTR( rt_cxy , &rt_ptr->ix1_width ) ); … … 348 355 uint32_t w3 = hal_remote_l32( XPTR( rt_cxy , &rt_ptr->ix3_width ) ); 349 356 357 #if DEBUG_GRDXT_INSERT 358 if(DEBUG_GRDXT_INSERT < cycle) 359 printk("\n[%s] get widths : w1 %d / w2 %d / w3 %d\n", 360 __FUNCTION__, w1, w2, w3 ); 361 #endif 362 350 363 // Check key value 351 364 assert( ((key >> (w1 + w2 + w3)) == 0 ), "illegal key value %x\n", key ); … … 356 369 uint32_t ix3 = key & ((1 << w3) - 1); // index in level 3 array 357 370 371 #if DEBUG_GRDXT_INSERT 372 if(DEBUG_GRDXT_INSERT < cycle) 373 printk("\n[%s] compute indexes : ix1 %d / ix2 %d / ix3 %d\n", 374 __FUNCTION__, ix1, ix2, ix3 ); 375 #endif 376 358 377 // get ptr1 359 378 void ** ptr1 = hal_remote_lpt( XPTR( rt_cxy , &rt_ptr->root ) ); … … 361 380 if( ptr1 == NULL ) return -1; 362 381 382 #if DEBUG_GRDXT_INSERT 383 if(DEBUG_GRDXT_INSERT < cycle) 384 printk("\n[%s] compute ptr1 = %x\n", 385 __FUNCTION__, (intptr_t)ptr1 ); 386 #endif 387 363 388 // get ptr2 364 389 void ** ptr2 = hal_remote_lpt( XPTR( rt_cxy , &ptr1[ix1] ) ); 390 391 #if DEBUG_GRDXT_INSERT 392 if(DEBUG_GRDXT_INSERT < cycle) 393 printk("\n[%s] get current ptr2 = %x\n", 394 __FUNCTION__, (intptr_t)ptr2 ); 395 #endif 365 396 366 397 // allocate memory for the missing level_2 array if required … … 374 405 375 406 if( ptr2 == NULL ) return -1; 376 407 377 408 // update level_1 entry 378 409 hal_remote_spt( XPTR( rt_cxy , &ptr1[ix1] ) , ptr2 ); 410 411 #if DEBUG_GRDXT_INSERT 412 if(DEBUG_GRDXT_INSERT < cycle) 413 printk("\n[%s] update ptr1[%d] : &ptr1[%d] = %x / ptr2 = %x\n", 414 __FUNCTION__, ix1, ix1, &ptr1[ix1], ptr2 ); 415 #endif 416 379 417 } 380 418 381 419 // get ptr3 382 420 void ** ptr3 = hal_remote_lpt( XPTR( rt_cxy , &ptr2[ix2] ) ); 421 422 #if DEBUG_GRDXT_INSERT 423 if(DEBUG_GRDXT_INSERT < cycle) 424 printk("\n[%s] get current ptr3 = %x\n", 425 __FUNCTION__, (intptr_t)ptr3 ); 426 #endif 383 427 384 428 // allocate memory for the missing level_3 array if required … … 395 439 // update level_2 entry 396 440 hal_remote_spt( XPTR( rt_cxy , &ptr2[ix2] ) , ptr3 ); 441 442 #if DEBUG_GRDXT_INSERT 443 if(DEBUG_GRDXT_INSERT < cycle) 444 printk("\n[%s] update ptr2[%d] : &ptr2[%d] %x / ptr3 %x\n", 445 __FUNCTION__, ix2, ix2, &ptr2[ix2], ptr3 ); 446 #endif 447 397 448 } 398 449 399 450 // register value in level_3 array 400 451 hal_remote_spt( XPTR( rt_cxy , &ptr3[ix3] ) , value ); 452 453 #if DEBUG_GRDXT_INSERT 454 if(DEBUG_GRDXT_INSERT < cycle) 455 printk("\n[%s] update ptr3[%d] : &ptr3[%d] %x / value %x\n", 456 __FUNCTION__, ix3, ix3, &ptr3[ix3], value ); 457 #endif 401 458 402 459 hal_fence(); … … 498 555 uint32_t ix3; 499 556 557 void ** ptr1; 558 void ** ptr2; 559 void ** ptr3; 560 500 561 // check rt_xp 501 562 assert( (rt_xp != XPTR_NULL) , "pointer on radix tree is NULL\n" ); … … 510 571 uint32_t w3 = hal_remote_l32( XPTR( rt_cxy , &rt_ptr->ix3_width ) ); 511 572 512 void **ptr1 = hal_remote_lpt( XPTR( rt_cxy , &rt_ptr->root ) );573 ptr1 = hal_remote_lpt( XPTR( rt_cxy , &rt_ptr->root ) ); 513 574 514 575 printk("\n***** Generic Radix Tree for <%s>\n", name ); … … 516 577 for( ix1=0 ; ix1 < (uint32_t)(1<<w1) ; ix1++ ) 517 578 { 518 void **ptr2 = hal_remote_lpt( XPTR( rt_cxy , &ptr1[ix1] ) );579 ptr2 = hal_remote_lpt( XPTR( rt_cxy , &ptr1[ix1] ) ); 519 580 if( ptr2 == NULL ) continue; 520 581 521 582 for( ix2=0 ; ix2 < (uint32_t)(1<<w2) ; ix2++ ) 522 583 { 523 void **ptr3 = hal_remote_lpt( XPTR( rt_cxy , &ptr2[ix2] ) );584 ptr3 = hal_remote_lpt( XPTR( rt_cxy , &ptr2[ix2] ) ); 524 585 if( ptr3 == NULL ) continue; 525 586 … … 530 591 531 592 uint32_t key = (ix1<<(w2+w3)) + (ix2<<w3) + ix3; 532 printk(" - key = %x / value = %x\n", key , (intptr_t)value ); 593 printk(" - key = %x / value = %x / ptr1 = %x / ptr2 = %x / ptr3 = %x\n", 594 key, (intptr_t)value, (intptr_t)ptr1, (intptr_t)ptr2, (intptr_t)ptr3 ); 533 595 } 534 596 } -
trunk/kernel/libk/grdxt.h
r635 r656 61 61 /******************************************************************************************* 62 62 * This function initialises the radix-tree descriptor, 63 * and allocates memory for the first level array of pointers. 63 64 * It must be called by a local thread. 64 * and allocates memory for the first level array of pointers.65 65 ******************************************************************************************* 66 66 * @ rt : pointer on the radix-tree descriptor. … … 77 77 /******************************************************************************************* 78 78 * This function releases all memory allocated to the radix-tree infrastructure. 79 * A warning message is printed on the kernel TXT0 if the radix tree is not empty. 79 80 * It must be called by a local thread. 80 * A warning message is printed on the kernel TXT0 if the radix tree is not empty.81 81 ******************************************************************************************* 82 82 * @ rt : pointer on the radix-tree descriptor. … … 86 86 /******************************************************************************************* 87 87 * This function insert a new item in the radix-tree. 88 * It dynamically allocates memory for new second and third level arrays if required. 88 89 * It must be called by a local thread. 89 * It dynamically allocates memory for new second and third level arrays if required.90 90 ******************************************************************************************* 91 91 * @ rt : pointer on the radix-tree descriptor. … … 100 100 /******************************************************************************************* 101 101 * This function removes an item identified by its key from the radix tree, 102 * and returns a pointer on the removed item. No memory is released. 102 103 * It must be called by a local thread. 103 * and returns a pointer on the removed item. No memory is released.104 104 ******************************************************************************************* 105 105 * @ rt : pointer on the radix-tree descriptor. … … 124 124 /******************************************************************************************* 125 125 * This function scan all radix-tree entries in increasing key order, starting from 126 * the value defined by the <start_key> argument, and return a pointer on the first valid 127 * registered item, and the found item key value. 126 128 * It must be called by a local thread. 127 * the value defined by the <key> argument, and return a pointer on the first valid128 * registered item, and the found item key value.129 129 ******************************************************************************************* 130 130 * @ rt : pointer on the radix-tree descriptor. 131 131 * @ start_key : key starting value for the scan. 132 132 * @ found_key : [out] buffer for found key value. 133 * @ return pointer on first valid item if found / return NULL if no tfound.133 * @ return pointer on first valid item if found / return NULL if no item found. 134 134 ******************************************************************************************/ 135 135 void * grdxt_get_first( grdxt_t * rt, -
trunk/kernel/libk/list.h
r651 r656 1 1 /* 2 * list.h - Double circular linked list2 * list.h - Local double circular linked list, using local pointers. 3 3 * 4 4 * Authors Ghassan Almaless (2008,2009,2010,2011,2012) … … 91 91 92 92 /*************************************************************************** 93 * This macro returns tpointer on the first element of a list.93 * This macro returns a pointer on the first element of a list. 94 94 *************************************************************************** 95 95 * @ root : pointer on the list root … … 171 171 list_entry_t * entry ) 172 172 { 173 list_entry_t * next = root->next;174 175 entry->next = next;173 list_entry_t * first = root->next; 174 175 entry->next = first; 176 176 entry->pred = root; 177 177 178 root->next = entry;179 next->pred = entry;178 root->next = entry; 179 first->pred = entry; 180 180 } 181 181 … … 190 190 list_entry_t * entry ) 191 191 { 192 list_entry_t * pred= root->pred;192 list_entry_t * last = root->pred; 193 193 194 194 entry->next = root; 195 entry->pred = pred;195 entry->pred = last; 196 196 197 197 root->pred = entry; 198 pred->next = entry;198 last->next = entry; 199 199 } 200 200 … … 366 366 list_entry_t * entry ) 367 367 { 368 list_entry_t * next = hal_remote_lpt( XPTR( cxy , &root->next ) );368 list_entry_t * first = hal_remote_lpt( XPTR( cxy , &root->next ) ); 369 369 370 hal_remote_spt( XPTR( cxy , &entry->next ) , next );370 hal_remote_spt( XPTR( cxy , &entry->next ) , first ); 371 371 hal_remote_spt( XPTR( cxy , &entry->pred ) , root ); 372 372 373 hal_remote_spt( XPTR( cxy , &root->next ) , entry );374 hal_remote_spt( XPTR( cxy , & next->pred ) , entry );373 hal_remote_spt( XPTR( cxy , &root->next ) , entry ); 374 hal_remote_spt( XPTR( cxy , &first->pred ) , entry ); 375 375 } 376 376 … … 387 387 list_entry_t * entry ) 388 388 { 389 list_entry_t * pred= hal_remote_lpt( XPTR( cxy , &root->pred ) );389 list_entry_t * last = hal_remote_lpt( XPTR( cxy , &root->pred ) ); 390 390 391 391 hal_remote_spt( XPTR( cxy , &entry->next ) , root ); 392 hal_remote_spt( XPTR( cxy , &entry->pred ) , pred);392 hal_remote_spt( XPTR( cxy , &entry->pred ) , last ); 393 393 394 394 hal_remote_spt( XPTR( cxy , &root->pred ) , entry ); 395 hal_remote_spt( XPTR( cxy , & pred->next ) , entry );395 hal_remote_spt( XPTR( cxy , &last->next ) , entry ); 396 396 } 397 397 … … 401 401 *************************************************************************** 402 402 * @ cxy : remote list cluster identifier 403 * @ entry : pointer on the entry to be removed.403 * @ entry : local pointer on the remote entry to be removed. 404 404 **************************************************************************/ 405 405 static inline void list_remote_unlink( cxy_t cxy, -
trunk/kernel/libk/xlist.h
r636 r656 1 1 /* 2 * xlist.h - Double Circular Linked lists, using extended pointers.2 * xlist.h - Trans-cluster double circular linked list, using extended pointers. 3 3 * 4 4 * Author : Alain Greiner (2016,2017,2018,2019) -
trunk/kernel/mm/kcm.c
r635 r656 509 509 { 510 510 // get one 4 Kbytes page from remote PPM 511 page_t * page= ppm_remote_alloc_pages( kcm_cxy , 0 );512 513 if( page ==NULL )511 xptr_t page_xp = ppm_remote_alloc_pages( kcm_cxy , 0 ); 512 513 if( page_xp == XPTR_NULL ) 514 514 { 515 515 printk("\n[ERROR] in %s : failed to allocate page in cluster %x\n", … … 519 519 } 520 520 521 // get remote page base address522 xptr_t base_xp = ppm_page2base( XPTR( kcm_cxy , page ));521 // get extended pointer on allocated buffer 522 xptr_t base_xp = ppm_page2base( page_xp ); 523 523 524 524 // get local pointer on kcm_page … … 529 529 hal_remote_s64( XPTR( kcm_cxy , &kcm_page->status ) , 0 ); 530 530 hal_remote_spt( XPTR( kcm_cxy , &kcm_page->kcm ) , kcm_ptr ); 531 hal_remote_spt( XPTR( kcm_cxy , &kcm_page->page ) , page);531 hal_remote_spt( XPTR( kcm_cxy , &kcm_page->page ) , GET_PTR( page_xp ) ); 532 532 533 533 // introduce new page in remote KCM active_list -
trunk/kernel/mm/kmem.c
r635 r656 45 45 flags = req->flags; 46 46 47 ////////////////////// //////////// PPM47 ////////////////////// 48 48 if( type == KMEM_PPM ) 49 49 { … … 76 76 return ptr; 77 77 } 78 /////////////////////////// ////////// KCM78 /////////////////////////// 79 79 else if( type == KMEM_KCM ) 80 80 { … … 102 102 return ptr; 103 103 } 104 /////////////////////////// ///////// KHM104 /////////////////////////// 105 105 else if( type == KMEM_KHM ) 106 106 { … … 140 140 uint32_t type = req->type; 141 141 142 ////////////////////// 142 143 if( type == KMEM_PPM ) 143 144 { … … 146 147 ppm_free_pages( page ); 147 148 } 149 /////////////////////////// 148 150 else if( type == KMEM_KCM ) 149 151 { 150 152 kcm_free( req->ptr ); 151 153 } 154 /////////////////////////// 152 155 else if( type == KMEM_KHM ) 153 156 { … … 172 175 flags = req->flags; 173 176 174 ////////////////////// /////////// PPM175 if( type == KMEM_PPM ) 176 { 177 // allocate the number of requested pages 178 page_t * page_ptr= ppm_remote_alloc_pages( cxy , order );179 180 if( page_ ptr ==NULL )177 ////////////////////// 178 if( type == KMEM_PPM ) 179 { 180 // allocate the number of requested pages from remote cluster 181 xptr_t page_xp = ppm_remote_alloc_pages( cxy , order ); 182 183 if( page_xp == XPTR_NULL ) 181 184 { 182 185 printk("\n[ERROR] in %s : failed for PPM / order %d in cluster %x\n", … … 185 188 } 186 189 187 xptr_t page_xp = XPTR( cxy , page_ptr ); 188 189 // get pointer on buffer from the page descriptor 190 // get extended pointer on remote buffer 190 191 xptr_t base_xp = ppm_page2base( page_xp ); 191 192 … … 193 194 if( flags & AF_ZERO ) hal_remote_memset( base_xp , 0 , CONFIG_PPM_PAGE_SIZE ); 194 195 195 void * ptr = GET_PTR( base_xp );196 196 197 197 #if DEBUG_KMEM_REMOTE … … 201 201 printk("\n[%s] thread[%x,%x] from PPM / %d page(s) / ppn %x / cxy %x / cycle %d\n", 202 202 __FUNCTION__, this->process->pid, this->trdid, 203 1<<order, ppm_page2ppn( XPTR(local_cxy,ptr)), cxy, cycle );204 #endif 205 return ptr;206 } 207 /////////////////////////// //////// KCM203 1<<order, ppm_page2ppn( page_xp ), cxy, cycle ); 204 #endif 205 return GET_PTR( base_xp ); 206 } 207 /////////////////////////// 208 208 else if( type == KMEM_KCM ) 209 209 { … … 231 231 return ptr; 232 232 } 233 /////////////////////////// //////// KHM233 /////////////////////////// 234 234 else if( type == KMEM_KHM ) 235 235 { … … 250 250 uint32_t type = req->type; 251 251 252 ////////////////////// 252 253 if( type == KMEM_PPM ) 253 254 { … … 256 257 ppm_remote_free_pages( cxy , page ); 257 258 } 259 /////////////////////////// 258 260 else if( type == KMEM_KCM ) 259 261 { 260 262 kcm_remote_free( cxy , req->ptr ); 261 263 } 264 /////////////////////////// 262 265 else if( type == KMEM_KHM ) 263 266 { -
trunk/kernel/mm/kmem.h
r635 r656 29 29 30 30 /************************************************************************************* 31 * This enum defines the three Kernel Memory Allocaror types :31 * This enum defines the three Kernel Memory Allocaror types 32 32 ************************************************************************************/ 33 33 … … 71 71 * - KHM (Kernel Heap Manager) allocates physical memory buffers of M bytes, 72 72 * M can have any value, and req.order = M. 73 * 74 * WARNING: the physical memory allocated with a given allocator type must be 75 * released using the same allocator type. 73 76 ************************************************************************************* 74 77 * @ cxy : target cluster identifier for a remote access. -
trunk/kernel/mm/mapper.c
r651 r656 27 27 #include <hal_special.h> 28 28 #include <hal_uspace.h> 29 #include <hal_vmm.h> 29 30 #include <grdxt.h> 30 31 #include <string.h> … … 141 142 error_t error; 142 143 144 uint32_t inode_size; 145 uint32_t inode_type; 146 143 147 thread_t * this = CURRENT_THREAD; 144 148 145 149 // get target mapper cluster and local pointer 146 cxy_t mapper_cxy = GET_CXY( mapper_xp ); 147 mapper_t * mapper_ptr = GET_PTR( mapper_xp ); 150 cxy_t mapper_cxy = GET_CXY( mapper_xp ); 151 mapper_t * mapper_ptr = GET_PTR( mapper_xp ); 152 153 // get inode pointer 154 vfs_inode_t * inode = hal_remote_lpt( XPTR( mapper_cxy , &mapper_ptr->inode ) ); 155 156 // get inode size and type if relevant 157 if( inode != NULL ) 158 { 159 inode_size = hal_remote_l32( XPTR( mapper_cxy , &inode->size ) ); 160 inode_type = hal_remote_l32( XPTR( mapper_cxy , &inode->type ) ); 161 } 162 else 163 { 164 inode_size = 0; 165 inode_type = 0; 166 } 148 167 149 168 #if DEBUG_MAPPER_HANDLE_MISS 150 169 uint32_t cycle = (uint32_t)hal_get_cycles(); 151 170 char name[CONFIG_VFS_MAX_NAME_LENGTH]; 152 vfs_inode_t * inode = mapper->inode;153 171 if( (DEBUG_MAPPER_HANDLE_MISS < cycle) && (inode != NULL) ) 154 172 { 155 vfs_inode_get_name( XPTR( local_cxy , inode ) , name );156 printk("\n[%s] thread[%x,%x] enter for page %d in <%s> / c luster %x / cycle %d",173 vfs_inode_get_name( XPTR( mapper_cxy , inode ) , name ); 174 printk("\n[%s] thread[%x,%x] enter for page %d in <%s> / cxy %x / cycle %d\n", 157 175 __FUNCTION__, this->process->pid, this->trdid, page_id, name, mapper_cxy, cycle ); 158 if( DEBUG_MAPPER_HANDLE_MISS & 1 ) grdxt_display( XPTR(local_cxy,&mapper->rt), name );159 176 } 160 177 if( (DEBUG_MAPPER_HANDLE_MISS < cycle) && (inode == NULL) ) 161 178 { 162 printk("\n[%s] thread[%x,%x] enter for page %d in FAT / c luster %x / cycle %d",179 printk("\n[%s] thread[%x,%x] enter for page %d in FAT / cxy %x / cycle %d\n", 163 180 __FUNCTION__, this->process->pid, this->trdid, page_id, mapper_cxy, cycle ); 164 if( DEBUG_MAPPER_HANDLE_MISS & 1 ) grdxt_display( XPTR(local_cxy,&mapper->rt), "FAT" ); 181 } 182 #endif 183 184 #if( DEBUG_MAPPER_HANDLE_MISS & 2 ) 185 if( DEBUG_MAPPER_HANDLE_MISS < cycle ) 186 { 187 if (inode != NULL) grdxt_remote_display( XPTR( mapper_cxy , &mapper_ptr->rt ) , name ); 188 else grdxt_remote_display( XPTR( mapper_cxy , &mapper_ptr->rt ) , "FAT" ); 165 189 } 166 190 #endif 167 191 168 192 // allocate one 4 Kbytes page from the remote mapper cluster 169 page_t * page_ptr = ppm_remote_alloc_pages( mapper_cxy , 0 ); 193 xptr_t page_xp = ppm_remote_alloc_pages( mapper_cxy , 0 ); 194 page_t * page_ptr = GET_PTR( page_xp ); 170 195 171 if( page_ ptr ==NULL )196 if( page_xp == XPTR_NULL ) 172 197 { 173 198 printk("\n[ERROR] in %s : thread [%x,%x] cannot allocate page in cluster %x\n", … … 176 201 } 177 202 178 // build extended pointer on new page descriptor179 xptr_t page_xp = XPTR( mapper_cxy , page_ptr );180 181 203 // initialize the page descriptor 182 204 page_remote_init( page_xp ); 183 205 206 // initialize specific page descriptor fields 184 207 hal_remote_s32( XPTR( mapper_cxy , &page_ptr->refcount ) , 1 ); 185 208 hal_remote_s32( XPTR( mapper_cxy , &page_ptr->index ) , page_id ); … … 200 223 } 201 224 202 // launch I/O operation to load page from IOC device to mapper 203 error = vfs_fs_move_page( page_xp , IOC_SYNC_READ ); 204 205 if( error ) 206 { 207 printk("\n[ERROR] in %s : thread[%x,%x] cannot load page from device\n", 208 __FUNCTION__ , this->process->pid, this->trdid ); 209 mapper_remote_release_page( mapper_xp , page_ptr ); 210 return -1; 225 // launch I/O operation to load page from IOC device when required: 226 // - it is the FAT mapper 227 // - it is a directory mapper 228 // - it is a file mapper, and it exist data on IOC device for this page 229 if( (inode == NULL) || (inode_type == INODE_TYPE_DIR) || (inode_size > (page_id << 10) ) ) 230 { 231 error = vfs_fs_move_page( page_xp , IOC_SYNC_READ ); 232 233 if( error ) 234 { 235 printk("\n[ERROR] in %s : thread[%x,%x] cannot load page from device\n", 236 __FUNCTION__ , this->process->pid, this->trdid ); 237 mapper_remote_release_page( mapper_xp , page_ptr ); 238 return -1; 239 } 211 240 } 212 241 … … 215 244 216 245 #if DEBUG_MAPPER_HANDLE_MISS 217 cycle = (uint32_t)hal_get_cycles();246 ppn_t ppn = ppm_page2ppn( page_xp ); 218 247 if( (DEBUG_MAPPER_HANDLE_MISS < cycle) && (inode != NULL) ) 219 248 { 220 printk("\n[%s] thread[%x,%x] exit for page %d in <%s> / ppn %x / cycle %d", 221 __FUNCTION__, this->process->pid, this->trdid, 222 page_id, name, ppm_page2ppn( page_xp ), cycle ); 223 if( DEBUG_MAPPER_HANDLE_MISS & 1 ) grdxt_display( XPTR(local_cxy,&mapper->rt) , name ); 249 printk("\n[%s] thread[%x,%x] exit for page %d in <%s> / page %x / ppn %x\n", 250 __FUNCTION__, this->process->pid, this->trdid, page_id, name, page_ptr, ppn ); 224 251 } 225 252 if( (DEBUG_MAPPER_HANDLE_MISS < cycle) && (inode == NULL) ) 226 253 { 227 printk("\n[%s] thread[%x,%x] exit for page %d in FAT / ppn %x / cycle %d", 228 __FUNCTION__, this->process->pid, this->trdid, 229 page_id, ppm_page2ppn( page_xp ), cycle ); 230 if( DEBUG_MAPPER_HANDLE_MISS & 1 ) grdxt_display( XPTR(local_cxy,&mapper->rt ), "FAT" ); 254 printk("\n[%s] thread[%x,%x] exit for page %d in FAT / page %x / ppn %x\n", 255 __FUNCTION__, this->process->pid, this->trdid, page_id, page_ptr, ppn ); 256 } 257 #endif 258 259 #if( DEBUG_MAPPER_HANDLE_MISS & 2 ) 260 if( DEBUG_MAPPER_HANDLE_MISS < cycle ) 261 { 262 if (inode != NULL) grdxt_remote_display( XPTR( mapper_cxy , &mapper_ptr->rt ) , name ); 263 else grdxt_remote_display( XPTR( mapper_cxy , &mapper_ptr->rt ) , "FAT" ); 231 264 } 232 265 #endif … … 241 274 { 242 275 error_t error; 243 mapper_t * mapper_ptr;244 cxy_t mapper_cxy;245 xptr_t lock_xp; // extended pointer on mapper lock246 xptr_t page_xp; // extended pointer on searched page descriptor247 xptr_t rt_xp; // extended pointer on radix tree in mapper248 276 249 277 thread_t * this = CURRENT_THREAD; 250 278 251 279 // get mapper cluster and local pointer 252 mapper_ ptr = GET_PTR( mapper_xp );253 mapper_cxy = GET_CXY( mapper_xp );280 mapper_t * mapper_ptr = GET_PTR( mapper_xp ); 281 cxy_t mapper_cxy = GET_CXY( mapper_xp ); 254 282 255 283 #if DEBUG_MAPPER_GET_PAGE … … 270 298 #endif 271 299 300 #if( DEBUG_MAPPER_GET_PAGE & 2 ) 301 if( DEBUG_MAPPER_GET_PAGE < cycle ) 302 ppm_remote_display( local_cxy ); 303 #endif 304 272 305 // check thread can yield 273 306 thread_assert_can_yield( this , __FUNCTION__ ); 274 307 275 308 // build extended pointer on mapper lock and mapper rt 276 lock_xp = XPTR( mapper_cxy , &mapper_ptr->lock );277 rt_xp = XPTR( mapper_cxy , &mapper_ptr->rt );309 xptr_t lock_xp = XPTR( mapper_cxy , &mapper_ptr->lock ); 310 xptr_t rt_xp = XPTR( mapper_cxy , &mapper_ptr->rt ); 278 311 279 312 // take mapper lock in READ_MODE … … 281 314 282 315 // search page in radix tree 283 page_xp = grdxt_remote_lookup( rt_xp , page_id );316 xptr_t page_xp = grdxt_remote_lookup( rt_xp , page_id ); 284 317 285 318 // test mapper miss … … 310 343 311 344 #if (DEBUG_MAPPER_GET_PAGE & 1) 312 if( DEBUG_MAPPER_GET_PAGE < cycle ) 313 printk("\n[%s] thread[%x,%x] load missing page from FS : ppn %x\n", 314 __FUNCTION__, this->process->pid, this->trdid, ppm_page2ppn(page_xp) ); 345 if( (DEBUG_MAPPER_GET_PAGE < cycle) && (inode != NULL) ) 346 { 347 printk("\n[%s] thread[%x,%x] introduced missing page in <%s> mapper / ppn %x\n", 348 __FUNCTION__, this->process->pid, this->trdid, name, ppm_page2ppn(page_xp) ); 349 } 350 if( (DEBUG_MAPPER_GET_PAGE < cycle) && (inode == NULL) ) 351 { 352 printk("\n[%s] thread[%x,%x] introduced missing page in FAT mapper / ppn %x\n", 353 __FUNCTION__, this->process->pid, this->trdid, ppm_page2ppn(page_xp) ); 354 } 315 355 #endif 316 356 … … 328 368 if( (DEBUG_MAPPER_GET_PAGE < cycle) && (inode != NULL) ) 329 369 { 330 printk("\n[%s] thread[%x,%x] exit for page %d of <%s> mapper / ppn %x / cycle %d\n", 331 __FUNCTION__, this->process->pid, this->trdid, page_id, 332 name, ppm_page2ppn(page_xp), cycle ); 370 printk("\n[%s] thread[%x,%x] exit for page %d of <%s> mapper / ppn %x\n", 371 __FUNCTION__, this->process->pid, this->trdid, page_id, name, ppm_page2ppn(page_xp) ); 333 372 } 334 373 if( (DEBUG_MAPPER_GET_PAGE < cycle) && (inode == NULL) ) 335 374 { 336 printk("\n[%s] thread[%x,%x] exit for page %d of FAT mapper / ppn %x / cycle %d\n", 337 __FUNCTION__, this->process->pid, this->trdid, page_id, 338 ppm_page2ppn(page_xp), cycle ); 339 } 375 printk("\n[%s] thread[%x,%x] exit for page %d of FAT mapper / ppn %x\n", 376 __FUNCTION__, this->process->pid, this->trdid, page_id, ppm_page2ppn(page_xp) ); 377 } 378 #endif 379 380 #if( DEBUG_MAPPER_GET_PAGE & 2) 381 if( DEBUG_MAPPER_GET_PAGE < cycle ) 382 ppm_remote_display( local_cxy ); 340 383 #endif 341 384 … … 476 519 __FUNCTION__, this->process->pid, this->trdid, page_bytes, 477 520 local_cxy, buf_ptr, name, GET_CXY(map_xp), GET_PTR(map_xp) ); 478 mapper_display_page( mapper_xp , page_ id, 128 );521 mapper_display_page( mapper_xp , page_xp , 128 ); 479 522 #endif 480 523 … … 600 643 { 601 644 if( to_buffer ) 602 printk("\n[%s] mapper <%s> page %d => buffer (%x,%x) / %d bytes\n",645 printk("\n[%s] mapper <%s> page %d => buffer (%x,%x) / %d bytes\n", 603 646 __FUNCTION__, name, page_id, dst_cxy, dst_ptr, page_bytes ); 604 647 else 605 printk("\n[%s] buffer (%x,%x) => mapper <%s> page %d / %d bytes\n",648 printk("\n[%s] buffer (%x,%x) => mapper <%s> page %d / %d bytes\n", 606 649 __FUNCTION__, src_cxy, src_ptr, name, page_id, page_bytes ); 607 650 } … … 617 660 cycle = (uint32_t)hal_get_cycles(); 618 661 if( DEBUG_MAPPER_MOVE_KERNEL < cycle ) 619 printk("\n[%s] thread[%x,%x] exit / cycle %d\n",620 __FUNCTION__, this->process->pid, this->trdid, cycle );662 printk("\n[%s] thread[%x,%x] exit / mapper <%s> / buffer (%x,%x) / cycle %d\n", 663 __FUNCTION__, this->process->pid, this->trdid, name, buffer_cxy, buffer_ptr, cycle ); 621 664 #endif 622 665 … … 707 750 if( page == NULL ) break; 708 751 709 assert( (page->index == found_key ), " wrong page descriptor index");710 assert( (page->order == 0), "mapper page order must be 0");752 assert( (page->index == found_key ), "page_index (%d) != key (%d)", page->index, found_key ); 753 assert( (page->order == 0), "page_order (%d] != 0", page->order ); 711 754 712 755 // build extended pointer on page descriptor … … 753 796 } // end mapper_sync() 754 797 755 ////////////////////////////////////////////////// 756 error_t mapper_display_page( xptr_t mapper_xp, 757 uint32_t page_id, 758 uint32_t nbytes ) 759 { 760 xptr_t page_xp; // extended pointer on page descriptor 761 xptr_t base_xp; // extended pointer on page base 798 /////////////////////////////////////////////// 799 void mapper_display_page( xptr_t mapper_xp, 800 xptr_t page_xp, 801 uint32_t nbytes ) 802 { 762 803 char buffer[4096]; // local buffer 763 uint32_t * tabi; // pointer on uint32_t to scan buffer764 804 uint32_t line; // line index 765 805 uint32_t word; // word index 766 cxy_t mapper_cxy; // mapper cluster identifier767 mapper_t * mapper_ptr; // mapper local pointer768 vfs_inode_t * inode_ptr; // inode local pointer769 806 770 807 char name[CONFIG_VFS_MAX_NAME_LENGTH]; 771 808 772 if( nbytes > 4096) 773 { 774 printk("\n[ERROR] in %s : nbytes (%d) cannot be larger than 4096\n", 775 __FUNCTION__, nbytes ); 776 return -1; 777 } 778 779 // get extended pointer on page descriptor 780 page_xp = mapper_remote_get_page( mapper_xp , page_id ); 781 782 if( page_xp == XPTR_NULL) 783 { 784 printk("\n[ERROR] in %s : cannot access page %d in mapper\n", 785 __FUNCTION__, page_id ); 786 return -1; 787 } 788 789 // get cluster and local pointer 790 mapper_cxy = GET_CXY( mapper_xp ); 791 mapper_ptr = GET_PTR( mapper_xp ); 809 assert( (nbytes <= 4096) , "nbytes cannot be larger than 4096"); 810 assert( (mapper_xp != XPTR_NULL) , "mapper_xp argument cannot be null"); 811 assert( (page_xp != XPTR_NULL) , "page_xp argument cannot be null"); 812 813 // get mapper cluster and local pointer 814 cxy_t mapper_cxy = GET_CXY( mapper_xp ); 815 mapper_t * mapper_ptr = GET_PTR( mapper_xp ); 816 817 // get page cluster an local pointer 818 cxy_t page_cxy = GET_CXY( page_xp ); 819 page_t * page_ptr = GET_PTR( page_xp ); 820 821 // get page_id and mapper from page descriptor 822 uint32_t page_id = hal_remote_l32( XPTR( page_cxy , &page_ptr->index ) ); 823 mapper_t * mapper = hal_remote_lpt( XPTR( page_cxy , &page_ptr->mapper ) ); 824 825 assert( (mapper_cxy == page_cxy ) , "mapper and page must be in same cluster"); 826 assert( (mapper_ptr == mapper ) , "unconsistent mapper_xp & page_xp arguments"); 792 827 793 828 // get inode 794 inode_ptr = hal_remote_lpt( XPTR( mapper_cxy , &mapper_ptr->inode ) );829 vfs_inode_t * inode_ptr = hal_remote_lpt( XPTR( mapper_cxy , &mapper_ptr->inode ) ); 795 830 796 831 // get inode name 797 if( inode_ptr == NULL ) strcpy( name , " fat" );832 if( inode_ptr == NULL ) strcpy( name , "FAT" ); 798 833 else vfs_inode_get_name( XPTR( mapper_cxy , inode_ptr ) , name ); 799 834 800 835 // get extended pointer on page base 801 base_xp = ppm_page2base( page_xp );836 xptr_t base_xp = ppm_page2base( page_xp ); 802 837 803 838 // copy remote page to local buffer 804 839 hal_remote_memcpy( XPTR( local_cxy , buffer ) , base_xp , nbytes ); 805 840 841 // display header 842 uint32_t * tabi = (uint32_t *)buffer; 843 printk("\n***** mapper <%s> / page_id %d / cxy %x / mapper %x / buffer %x\n", 844 name, page_id, mapper_cxy, mapper_ptr, GET_PTR( base_xp ) ); 845 806 846 // display 8 words per line 807 tabi = (uint32_t *)buffer;808 printk("\n***** mapper <%s> / %d bytes in page %d (%x,%x)\n",809 name, nbytes, page_id, GET_CXY(base_xp), GET_PTR(base_xp) );810 847 for( line = 0 ; line < (nbytes >> 5) ; line++ ) 811 848 { … … 815 852 } 816 853 817 return 0; 818 819 } // end mapper_display_page 820 821 854 } // end mapper_display_page() 855 856 -
trunk/kernel/mm/mapper.h
r635 r656 62 62 * and the allocated memory is only released when the mapper/inode is destroyed. 63 63 * 64 * TODO (1) the mapper being only used to implement the VFS cache(s), the mapper.c 65 * and mapper.h file should be trandfered to the fs directory. 66 * TODO (2) the "type" field in mapper descriptor is redundant and probably unused. 64 * TODO the "type" field in mapper descriptor is redundant and probably unused. 67 65 ******************************************************************************************/ 68 66 … … 161 159 162 160 /******************************************************************************************** 163 * This function move data between a remote mapper, identified by the <mapper_xp> argument, 164 * and a localised remote kernel buffer. It can be called by a thread running any cluster. 161 * This function move <size> bytes from/to a remote mapper, identified by the <mapper_xp> 162 * argument, to/from a remote kernel buffer, identified by the <buffer_xp> argument. 163 * It can be called by a thread running in any cluster. 165 164 * If required, the data transfer is split in "fragments", where one fragment contains 166 * contiguous bytes in the same mapper page. 167 * It uses a "remote_memcpy" to move a fragment to/from the kernel buffer. 168 * In case of write, the dirty bit is set for all pages written in the mapper. 165 * contiguous bytes in the same mapper page. Each fragment uses a "remote_memcpy". 166 * In case of write to mapper, the dirty bit is set for all pages written in the mapper. 169 167 ******************************************************************************************* 170 168 * @ mapper_xp : extended pointer on mapper. … … 248 246 249 247 /******************************************************************************************* 250 * This debug function displays the content of a given page of a given mapper. 251 * - the mapper is identified by the <mapper_xp> argument. 252 * - the page is identified by the <page_id> argument. 253 * - the number of bytes to display in page is defined by the <nbytes> argument. 248 * This debug function displays the content of a given page of a given mapper, identified 249 * by the <mapper_xp> and <page_xp> arguments. 250 * The number of bytes to display in page is defined by the <nbytes> argument. 254 251 * The format is eigth (32 bits) words per line in hexadecimal. 255 252 * It can be called by any thread running in any cluster. 256 * In case of miss in mapper, it load the missing page from device to mapper.257 253 ******************************************************************************************* 258 254 * @ mapper_xp : [in] extended pointer on the mapper. 259 * @ page_ id : [in] page index in file.260 * @ nbytes : [in] value to be written.261 * @ returns 0 if success / return -1 if error. 262 ******************************************************************************************/ 263 error_tmapper_display_page( xptr_t mapper_xp,264 uint32_t page_id,265 255 * @ page_xp : [in] extended pointer on page descriptor. 256 * @ nbytes : [in] number of bytes in page. 257 * @ returns 0 if success / return -1 if error. 258 ******************************************************************************************/ 259 void mapper_display_page( xptr_t mapper_xp, 260 xptr_t page_xp, 261 uint32_t nbytes ); 266 262 267 263 -
trunk/kernel/mm/page.h
r635 r656 49 49 * - The remote_busylock is used to allows any remote thread to atomically 50 50 * test/modify the forks counter or the flags. 51 * - The list entry is used to register the page in a free list or in dirty list. 52 * The refcount is used for page release to KMEM. 51 * - The list field is used to register the page in a free list, or in dirty list, 52 * as a given page cannot be simultaneously dirty and free. 53 * - The refcount is used to release the page to the PPM. 53 54 * NOTE: the size is 48 bytes for a 32 bits core. 54 55 ************************************************************************************/ -
trunk/kernel/mm/ppm.c
r651 r656 151 151 page_t * buddy; // searched buddy page descriptor 152 152 uint32_t buddy_index; // buddy page index in page_tbl[] 153 page_t * current ;// current (merged) page descriptor153 page_t * current_ptr; // current (merged) page descriptor 154 154 uint32_t current_index; // current (merged) page index in page_tbl[] 155 155 uint32_t current_order; // current (merged) page order … … 168 168 169 169 // initialise loop variables 170 current 170 current_ptr = page; 171 171 current_order = page->order; 172 172 current_index = page - ppm->pages_tbl; … … 191 191 buddy->order = 0; 192 192 193 // compute next (merged) page index in page_tbl[]193 // compute next values for loop variables 194 194 current_index &= buddy_index; 195 196 // compute next (merged) page order197 195 current_order++; 198 199 // compute next (merged) page descripror 200 current = pages_tbl + current_index; 196 current_ptr = pages_tbl + current_index; 201 197 } 202 198 203 199 // update order field for merged page descriptor 204 current ->order = current_order;200 current_ptr->order = current_order; 205 201 206 202 // insert merged page in relevant free list 207 list_add_first( &ppm->free_pages_root[current_order] , ¤t ->list );203 list_add_first( &ppm->free_pages_root[current_order] , ¤t_ptr->list ); 208 204 ppm->free_pages_nr[current_order] ++; 209 205 210 206 } // end ppm_free_pages_nolock() 211 212 207 213 208 //////////////////////////////////////////// … … 221 216 thread_t * this = CURRENT_THREAD; 222 217 218 ppm_t * ppm = &LOCAL_CLUSTER->ppm; 219 223 220 #if DEBUG_PPM_ALLOC_PAGES 224 221 uint32_t cycle = (uint32_t)hal_get_cycles(); 225 222 #endif 226 223 227 #if (DEBUG_PPM_ALLOC_PAGES & 1)224 #if DEBUG_PPM_ALLOC_PAGES 228 225 if( DEBUG_PPM_ALLOC_PAGES < cycle ) 229 226 { 230 227 printk("\n[%s] thread[%x,%x] enter for %d page(s) in cluster %x / cycle %d\n", 231 228 __FUNCTION__, this->process->pid, this->trdid, 1<<order, local_cxy, cycle ); 232 ppm_remote_display( local_cxy ); 233 } 234 #endif 235 236 ppm_t * ppm = &LOCAL_CLUSTER->ppm; 229 if( DEBUG_PPM_ALLOC_PAGES & 1 ) ppm_remote_display( local_cxy ); 230 } 231 #endif 237 232 238 233 // check order … … 316 311 dqdt_increment_pages( local_cxy , order ); 317 312 313 hal_fence(); 314 318 315 #if DEBUG_PPM_ALLOC_PAGES 319 316 if( DEBUG_PPM_ALLOC_PAGES < cycle ) … … 322 319 __FUNCTION__, this->process->pid, this->trdid, 323 320 1<<order, local_cxy, ppm_page2ppn(XPTR( local_cxy , found_block )), cycle ); 324 ppm_remote_display( local_cxy );321 if( DEBUG_PPM_ALLOC_PAGES & 1 ) ppm_remote_display( local_cxy ); 325 322 } 326 323 #endif … … 340 337 #endif 341 338 342 #if ( DEBUG_PPM_FREE_PAGES & 1 )339 #if DEBUG_PPM_FREE_PAGES 343 340 if( DEBUG_PPM_FREE_PAGES < cycle ) 344 341 { … … 346 343 __FUNCTION__, this->process->pid, this->trdid, 347 344 1<<page->order, local_cxy, ppm_page2ppn(XPTR(local_cxy , page)), cycle ); 348 ppm_remote_display( local_cxy ); 345 if( DEBUG_PPM_FREE_PAGES & 1 ) ppm_remote_display( local_cxy ); 346 } 349 347 #endif 350 348 … … 362 360 // update DQDT 363 361 dqdt_decrement_pages( local_cxy , page->order ); 362 363 hal_fence(); 364 364 365 365 #if DEBUG_PPM_FREE_PAGES … … 369 369 __FUNCTION__, this->process->pid, this->trdid, 370 370 1<<page->order, local_cxy, ppm_page2ppn(XPTR(local_cxy , page)) , cycle ); 371 ppm_remote_display( local_cxy );371 if( DEBUG_PPM_FREE_PAGES & 1 ) ppm_remote_display( local_cxy ); 372 372 } 373 373 #endif … … 376 376 377 377 378 379 380 378 ///////////////////////////////////////////// 381 void *ppm_remote_alloc_pages( cxy_t cxy,379 xptr_t ppm_remote_alloc_pages( cxy_t cxy, 382 380 uint32_t order ) 383 381 { … … 389 387 thread_t * this = CURRENT_THREAD; 390 388 389 // check order 390 assert( (order < CONFIG_PPM_MAX_ORDER) , "illegal order argument = %d\n" , order ); 391 392 // get local pointer on PPM (same in all clusters) 393 ppm_t * ppm = &LOCAL_CLUSTER->ppm; 394 391 395 #if DEBUG_PPM_REMOTE_ALLOC_PAGES 392 396 uint32_t cycle = (uint32_t)hal_get_cycles(); 393 397 #endif 394 398 395 #if ( DEBUG_PPM_REMOTE_ALLOC_PAGES & 1 )399 #if DEBUG_PPM_REMOTE_ALLOC_PAGES 396 400 if( DEBUG_PPM_REMOTE_ALLOC_PAGES < cycle ) 397 401 { 398 printk("\n[%s] thread[%x,%x] enter for %d smallpage(s) in cluster %x / cycle %d\n",402 printk("\n[%s] thread[%x,%x] enter for %d page(s) in cluster %x / cycle %d\n", 399 403 __FUNCTION__, this->process->pid, this->trdid, 1<<order, cxy, cycle ); 400 ppm_remote_display( cxy ); 401 } 402 #endif 403 404 // check order 405 assert( (order < CONFIG_PPM_MAX_ORDER) , "illegal order argument = %d\n" , order ); 406 407 // get local pointer on PPM (same in all clusters) 408 ppm_t * ppm = &LOCAL_CLUSTER->ppm; 404 if( DEBUG_PPM_REMOTE_ALLOC_PAGES & 1 ) ppm_remote_display( cxy ); 405 } 406 #endif 409 407 410 408 //build extended pointer on lock protecting remote PPM … … 489 487 dqdt_increment_pages( cxy , order ); 490 488 489 hal_fence(); 490 491 491 #if DEBUG_PPM_REMOTE_ALLOC_PAGES 492 492 if( DEBUG_PPM_REMOTE_ALLOC_PAGES < cycle ) … … 495 495 __FUNCTION__, this->process->pid, this->trdid, 496 496 1<<order, cxy, ppm_page2ppn(XPTR( cxy , found_block )), cycle ); 497 ppm_remote_display( cxy );498 } 499 #endif 500 501 return found_block;497 if( DEBUG_PPM_REMOTE_ALLOC_PAGES & 1 ) ppm_remote_display( cxy ); 498 } 499 #endif 500 501 return XPTR( cxy , found_block ); 502 502 503 503 } // end ppm_remote_alloc_pages() … … 515 515 uint32_t current_order; // current (merged) page order 516 516 517 // get local pointer on PPM (same in all clusters) 518 ppm_t * ppm = &LOCAL_CLUSTER->ppm; 519 520 // get page ppn and order 521 uint32_t order = hal_remote_l32( XPTR( page_cxy , &page_ptr->order ) ); 522 517 523 #if DEBUG_PPM_REMOTE_FREE_PAGES 518 524 thread_t * this = CURRENT_THREAD; 519 525 uint32_t cycle = (uint32_t)hal_get_cycles(); 520 #endif 521 522 #if ( DEBUG_PPM_REMOTE_FREE_PAGES & 1 ) 526 ppn_t ppn = ppm_page2ppn( XPTR( page_cxy , page_ptr ) ); 527 #endif 528 529 #if DEBUG_PPM_REMOTE_FREE_PAGES 523 530 if( DEBUG_PPM_REMOTE_FREE_PAGES < cycle ) 524 531 { 525 532 printk("\n[%s] thread[%x,%x] enter for %d page(s) in cluster %x / ppn %x / cycle %d\n", 526 __FUNCTION__, this->process->pid, this->trdid, 527 1<<page_ptr->order, page_cxy, ppm_page2ppn(XPTR( page_cxy , page_ptr )), cycle ); 528 ppm_remote_display( page_cxy ); 533 __FUNCTION__, this->process->pid, this->trdid, 1<<order, page_cxy, ppn, cycle ); 534 if( DEBUG_PPM_REMOTE_FREE_PAGES & 1 ) ppm_remote_display( page_cxy ); 529 535 } 530 536 #endif … … 533 539 page_xp = XPTR( page_cxy , page_ptr ); 534 540 535 // get local pointer on PPM (same in all clusters)536 ppm_t * ppm = &LOCAL_CLUSTER->ppm;537 538 541 // build extended pointer on lock protecting remote PPM 539 542 xptr_t lock_xp = XPTR( page_cxy , &ppm->free_lock ); … … 556 559 // initialise loop variables 557 560 current_ptr = page_ptr; 558 current_order = hal_remote_l32( XPTR( page_cxy , &page_ptr->order ) );561 current_order = order; 559 562 current_index = page_ptr - ppm->pages_tbl; 560 563 … … 582 585 hal_remote_s32( XPTR( page_cxy , &buddy_ptr->order ) , 0 ); 583 586 584 // compute next (merged) page index in page_tbl[]587 // compute next values for loop variables 585 588 current_index &= buddy_index; 586 587 // compute next (merged) page order588 589 current_order++; 589 590 // compute next (merged) page descripror591 590 current_ptr = pages_tbl + current_index; 592 591 … … 594 593 595 594 // update current (merged) page descriptor order field 596 current_ptr = pages_tbl + current_index;597 595 hal_remote_s32( XPTR( page_cxy , ¤t_ptr->order ) , current_order ); 598 596 599 597 // insert current (merged) page into relevant free list 600 list_remote_add_first( page_cxy , &ppm->free_pages_root[current_order], ¤t_ptr->list );598 list_remote_add_first( page_cxy, &ppm->free_pages_root[current_order], ¤t_ptr->list ); 601 599 hal_remote_atomic_add( XPTR( page_cxy , &ppm->free_pages_nr[current_order] ) , 1 ); 602 600 … … 607 605 dqdt_decrement_pages( page_cxy , page_ptr->order ); 608 606 607 hal_fence(); 608 609 609 #if DEBUG_PPM_REMOTE_FREE_PAGES 610 610 if( DEBUG_PPM_REMOTE_FREE_PAGES < cycle ) 611 611 { 612 612 printk("\n[%s] thread[%x,%x] released %d page(s) in cluster %x / ppn %x / cycle %d\n", 613 __FUNCTION__, this->process->pid, this->trdid, 614 1<<page_ptr->order, page_cxy, ppm_page2ppn(XPTR( page_cxy , page_ptr ) ), cycle ); 615 ppm_remote_display( page_cxy ); 613 __FUNCTION__, this->process->pid, this->trdid, 1<<order, page_cxy, ppn, cycle ); 614 if( DEBUG_PPM_REMOTE_FREE_PAGES & 1 ) ppm_remote_display( page_cxy ); 616 615 } 617 616 #endif … … 658 657 uint32_t n = hal_remote_l32( XPTR( cxy , &ppm->free_pages_nr[order] ) ); 659 658 660 // display directfree_list[order]661 nolock_printk("- forward : order = %d / n = %d \t: ", order , n );659 // display forward free_list[order] 660 nolock_printk("- forward : order = %d / n = %d : ", order , n ); 662 661 LIST_REMOTE_FOREACH( cxy , &ppm->free_pages_root[order] , iter ) 662 { 663 page_xp = XPTR( cxy , LIST_ELEMENT( iter , page_t , list ) ); 664 nolock_printk("%x," , ppm_page2ppn( page_xp ) ); 665 } 666 nolock_printk("\n"); 667 668 // display backward free_list[order] 669 nolock_printk("- backward : order = %d / n = %d : ", order , n ); 670 LIST_REMOTE_FOREACH_BACKWARD( cxy , &ppm->free_pages_root[order] , iter ) 663 671 { 664 672 page_xp = XPTR( cxy , LIST_ELEMENT( iter , page_t , list ) ); -
trunk/kernel/mm/ppm.h
r635 r656 84 84 /***************************************************************************************** 85 85 * This local allocator must be called by a thread running in local cluster. 86 * It allocates ncontiguous physical 4 Kbytes pages from the local cluster, where87 * nis a power of 2 defined by the <order> argument.86 * It allocates N contiguous physical 4 Kbytes pages from the local cluster, where 87 * N is a power of 2 defined by the <order> argument. 88 88 * In normal use, it should not be called directly, as the recommended way to allocate 89 89 * physical pages is to call the generic allocator defined in kmem.h. … … 116 116 /***************************************************************************************** 117 117 * This remote allocator can be called by any thread running in any cluster. 118 * It allocates ncontiguous physical 4 Kbytes pages from cluster identified119 * by the <cxy> argument, where nis a power of 2 defined by the <order> argument.118 * It allocates N contiguous physical 4 Kbytes pages from cluster identified 119 * by the <cxy> argument, where N is a power of 2 defined by the <order> argument. 120 120 * In normal use, it should not be called directly, as the recommended way to allocate 121 121 * physical pages is to call the generic allocator defined in kmem.h. … … 123 123 * @ cxy : remote cluster identifier. 124 124 * @ order : ln2( number of 4 Kbytes pages) 125 * @ returns a local pointer on remotepage descriptor if success / XPTR_NULL if error.126 ****************************************************************************************/ 127 void *ppm_remote_alloc_pages( cxy_t cxy,125 * @ returns an extended pointer on page descriptor if success / XPTR_NULL if error. 126 ****************************************************************************************/ 127 xptr_t ppm_remote_alloc_pages( cxy_t cxy, 128 128 uint32_t order ); 129 129 -
trunk/kernel/mm/vmm.c
r651 r656 1745 1745 1746 1746 //////////////////////////////////////////////////////////////////////////////////////////// 1747 // This static function is called by the vmm_remove_vseg() and vmm_resize_vseg() functions. 1748 // Depending on the vseg <type>, it decrements the physical page refcount, and 1749 // conditionnally release to the relevant kmem the physical page identified by <ppn>. 1747 // This static function is called by the vmm_remove_vseg() and vmm_resize_vseg() functions 1748 // to update the physical page descriptor identified by the <ppn> argument. 1749 // It decrements the refcount, set the dirty bit when required, and releases the physical 1750 // page to kmem depending on the vseg type. 1751 // - KERNEL : refcount decremented / not released to kmem / dirty bit not set 1752 // - FILE : refcount decremented / not released to kmem / dirty bit set when required. 1753 // - CODE : refcount decremented / released to kmem / dirty bit not set. 1754 // - STAK : refcount decremented / released to kmem / dirty bit not set. 1755 // - DATA : refcount decremented / released to kmem if ref / dirty bit not set. 1756 // - MMAP : refcount decremented / released to kmem if ref / dirty bit not set. 1750 1757 //////////////////////////////////////////////////////////////////////////////////////////// 1751 1758 // @ process : local pointer on process. 1752 1759 // @ vseg : local pointer on vseg. 1753 1760 // @ ppn : released pysical page index. 1761 // @ dirty : set the dirty bit in page descriptor when non zero. 1754 1762 //////////////////////////////////////////////////////////////////////////////////////////// 1755 1763 static void vmm_ppn_release( process_t * process, 1756 1764 vseg_t * vseg, 1757 ppn_t ppn ) 1765 ppn_t ppn, 1766 uint32_t dirty ) 1758 1767 { 1759 bool_t do_ release;1768 bool_t do_kmem_release; 1760 1769 1761 1770 // get vseg type 1762 1771 vseg_type_t type = vseg->type; 1763 1772 1764 // compute is_ref 1773 // compute is_ref <=> this vseg is the reference vseg 1765 1774 bool_t is_ref = (GET_CXY( process->ref_xp ) == local_cxy); 1766 1775 … … 1774 1783 hal_remote_atomic_add( count_xp , -1 ); 1775 1784 1776 // compute the do_release condition depending on vseg type 1777 if( (type == VSEG_TYPE_FILE) || 1778 (type == VSEG_TYPE_KCODE) || 1785 // compute the do_kmem_release condition depending on vseg type 1786 if( (type == VSEG_TYPE_KCODE) || 1779 1787 (type == VSEG_TYPE_KDATA) || 1780 1788 (type == VSEG_TYPE_KDEV) ) 1781 1789 { 1782 // no physical page release for FILE and KERNEL 1783 do_release = false; 1784 } 1790 // no physical page release for KERNEL 1791 do_kmem_release = false; 1792 } 1793 else if( type == VSEG_TYPE_FILE ) 1794 { 1795 // no physical page release for KERNEL 1796 do_kmem_release = false; 1797 1798 // set dirty bit if required 1799 if( dirty ) ppm_page_do_dirty( page_xp ); 1800 } 1785 1801 else if( (type == VSEG_TYPE_CODE) || 1786 1802 (type == VSEG_TYPE_STACK) ) 1787 1803 { 1788 1804 // always release physical page for private vsegs 1789 do_ release = true;1805 do_kmem_release = true; 1790 1806 } 1791 1807 else if( (type == VSEG_TYPE_ANON) || … … 1793 1809 { 1794 1810 // release physical page if reference cluster 1795 do_ release = is_ref;1811 do_kmem_release = is_ref; 1796 1812 } 1797 1813 else if( is_ref ) // vseg_type == DATA in reference cluster … … 1814 1830 1815 1831 // release physical page if forks == 0 1816 do_ release = (forks == 0);1832 do_kmem_release = (forks == 0); 1817 1833 } 1818 1834 else // vseg_type == DATA not in reference cluster 1819 1835 { 1820 1836 // no physical page release if not in reference cluster 1821 do_ release = false;1837 do_kmem_release = false; 1822 1838 } 1823 1839 1824 1840 // release physical page to relevant kmem when required 1825 if( do_release ) 1826 { 1827 ppm_remote_free_pages( page_cxy , page_ptr ); 1841 if( do_kmem_release ) 1842 { 1843 kmem_req_t req; 1844 req.type = KMEM_PPM; 1845 req.ptr = GET_PTR( ppm_ppn2base( ppn ) ); 1846 1847 kmem_remote_free( page_cxy , &req ); 1828 1848 1829 1849 #if DEBUG_VMM_PPN_RELEASE … … 1892 1912 hal_gpt_reset_pte( gpt_xp , vpn ); 1893 1913 1894 // release physical page when required1895 vmm_ppn_release( process , vseg , ppn );1914 // release physical page depending on vseg type 1915 vmm_ppn_release( process , vseg , ppn , attr & GPT_DIRTY ); 1896 1916 } 1897 1917 } … … 1986 2006 1987 2007 // release physical page when required 1988 vmm_ppn_release( process , vseg , ppn );2008 vmm_ppn_release( process , vseg , ppn , attr & GPT_DIRTY ); 1989 2009 } 1990 2010 } … … 2008 2028 2009 2029 // release physical page when required 2010 vmm_ppn_release( process , vseg , ppn );2030 vmm_ppn_release( process , vseg , ppn , attr & GPT_DIRTY ); 2011 2031 } 2012 2032 } … … 2170 2190 // @ vseg : local pointer on vseg. 2171 2191 // @ vpn : unmapped vpn. 2172 // @ return an extended pointer on the allocated page 2192 // @ return an extended pointer on the allocated page descriptor. 2173 2193 ////////////////////////////////////////////////////////////////////////////////////// 2174 2194 static xptr_t vmm_page_allocate( vseg_t * vseg, … … 2186 2206 xptr_t page_xp; 2187 2207 cxy_t page_cxy; 2188 page_t * page_ptr;2189 2208 uint32_t index; 2190 2209 … … 2197 2216 assert( ( type != VSEG_TYPE_FILE ) , "illegal vseg type\n" ); 2198 2217 2218 // compute target cluster identifier 2199 2219 if( flags & VSEG_DISTRIB ) // distributed => cxy depends on vpn LSB 2200 2220 { … … 2214 2234 2215 2235 // allocate one small physical page from target cluster 2216 page_ptr = ppm_remote_alloc_pages( page_cxy , 0 ); 2217 2218 page_xp = XPTR( page_cxy , page_ptr ); 2236 kmem_req_t req; 2237 req.type = KMEM_PPM; 2238 req.order = 0; 2239 req.flags = AF_ZERO; 2240 2241 // get local pointer on page base 2242 void * ptr = kmem_remote_alloc( page_cxy , &req ); 2243 2244 // get extended pointer on page descriptor 2245 page_xp = ppm_base2page( XPTR( page_cxy , ptr ) ); 2219 2246 2220 2247 #if DEBUG_VMM_PAGE_ALLOCATE … … 2245 2272 uint32_t cycle = (uint32_t)hal_get_cycles(); 2246 2273 thread_t * this = CURRENT_THREAD; 2247 if( (DEBUG_VMM_GET_ONE_PPN < cycle) && (vpn == 0x40b))2248 printk("\n[%s] thread[%x,%x] enter for vpn %x / type%s / page_id %d / cycle %d\n",2274 if( DEBUG_VMM_GET_ONE_PPN < cycle ) 2275 printk("\n[%s] thread[%x,%x] enter for vpn %x / vseg %s / page_id %d / cycle %d\n", 2249 2276 __FUNCTION__, this->process->pid, this->trdid, vpn, vseg_type_str(type), page_id, cycle ); 2277 #endif 2278 2279 #if (DEBUG_VMM_GET_ONE_PPN & 2) 2280 if( DEBUG_VMM_GET_ONE_PPN < cycle ) 2281 hal_vmm_display( XPTR( local_cxy , this->process ) , true ); 2250 2282 #endif 2251 2283 … … 2291 2323 2292 2324 #if (DEBUG_VMM_GET_ONE_PPN & 0x1) 2293 if( (DEBUG_VMM_GET_ONE_PPN < cycle) && (vpn == 0x40b))2325 if( DEBUG_VMM_GET_ONE_PPN < cycle ) 2294 2326 printk("\n[%s] thread[%x,%x] for vpn = %x / elf_offset = %x\n", 2295 2327 __FUNCTION__, this->process->pid, this->trdid, vpn, elf_offset ); … … 2305 2337 2306 2338 #if (DEBUG_VMM_GET_ONE_PPN & 0x1) 2307 if( (DEBUG_VMM_GET_ONE_PPN < cycle) && (vpn == 0x40b))2339 if( DEBUG_VMM_GET_ONE_PPN < cycle ) 2308 2340 printk("\n[%s] thread[%x,%x] for vpn %x / fully in BSS\n", 2309 2341 __FUNCTION__, this->process->pid, this->trdid, vpn ); … … 2322 2354 2323 2355 #if (DEBUG_VMM_GET_ONE_PPN & 0x1) 2324 if( (DEBUG_VMM_GET_ONE_PPN < cycle) && (vpn == 0x40b))2356 if( DEBUG_VMM_GET_ONE_PPN < cycle ) 2325 2357 printk("\n[%s] thread[%x,%x] for vpn %x / fully in mapper\n", 2326 2358 __FUNCTION__, this->process->pid, this->trdid, vpn ); … … 2339 2371 2340 2372 #if (DEBUG_VMM_GET_ONE_PPN & 0x1) 2341 if( (DEBUG_VMM_GET_ONE_PPN < cycle) && (vpn == 0x40b))2373 if( DEBUG_VMM_GET_ONE_PPN < cycle ) 2342 2374 printk("\n[%s] thread[%x,%x] for vpn %x / both mapper & BSS\n" 2343 2375 " %d bytes from mapper / %d bytes from BSS\n", … … 2365 2397 } 2366 2398 } 2367 } // end initialisation for CODE or DATA types 2399 2400 } // end if CODE or DATA types 2368 2401 } 2369 2402 … … 2372 2405 2373 2406 #if DEBUG_VMM_GET_ONE_PPN 2374 cycle = (uint32_t)hal_get_cycles(); 2375 if( (DEBUG_VMM_GET_ONE_PPN < cycle) && (vpn == 0x40b) ) 2407 if( DEBUG_VMM_GET_ONE_PPN < cycle ) 2376 2408 printk("\n[%s] thread[%x,%x] exit for vpn %x / ppn %x / cycle %d\n", 2377 2409 __FUNCTION__ , this->process->pid, this->trdid , vpn , *ppn, cycle ); 2410 #endif 2411 2412 #if (DEBUG_VMM_GET_ONE_PPN & 2) 2413 if( DEBUG_VMM_GET_ONE_PPN < cycle ) 2414 hal_vmm_display( XPTR( local_cxy , this->process ) , true ); 2378 2415 #endif 2379 2416 … … 2404 2441 2405 2442 #if DEBUG_VMM_HANDLE_PAGE_FAULT 2406 if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) & &(vpn > 0) )2443 if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) & (vpn > 0) ) 2407 2444 printk("\n[%s] thread[%x,%x] enter for vpn %x / cycle %d\n", 2408 2445 __FUNCTION__, this->process->pid, this->trdid, vpn, start_cycle ); 2409 2446 #endif 2410 2447 2411 #if (DEBUG_VMM_HANDLE_PAGE_FAULT & 1)2448 #if (DEBUG_VMM_HANDLE_PAGE_FAULT & 2) 2412 2449 if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) ) 2413 hal_vmm_display( this->process, true );2450 hal_vmm_display( XPTR( local_cxy , this->process ) , true ); 2414 2451 #endif 2415 2452 … … 2504 2541 #if (CONFIG_INSTRUMENTATION_PGFAULTS || DEBUG_VMM_HANDLE_PAGE_FAULT) 2505 2542 uint32_t end_cycle = (uint32_t)hal_get_cycles(); 2506 uint32_t cost = end_cycle - start_cycle;2507 2543 #endif 2508 2544 … … 2513 2549 #endif 2514 2550 2551 #if (DEBUG_VMM_HANDLE_PAGE_FAULT & 2) 2552 if( (end_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) ) 2553 hal_vmm_display( XPTR( local_cxy , this->process ) , true ); 2554 #endif 2555 2515 2556 #if CONFIG_INSTRUMENTATION_PGFAULTS 2557 uint32_t cost = end_cycle - start_cycle; 2516 2558 this->info.local_pgfault_nr++; 2517 2559 this->info.local_pgfault_cost += cost; … … 2584 2626 #if (CONFIG_INSTRUMENTATION_PGFAULTS || DEBUG_VMM_HANDLE_PAGE_FAULT) 2585 2627 uint32_t end_cycle = (uint32_t)hal_get_cycles(); 2586 uint32_t cost = end_cycle - start_cycle;2587 2628 #endif 2588 2629 … … 2593 2634 #endif 2594 2635 2636 #if (DEBUG_VMM_HANDLE_PAGE_FAULT & 2) 2637 if( (end_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) ) 2638 hal_vmm_display( XPTR( local_cxy , this->process ) , true ); 2639 #endif 2640 2595 2641 #if CONFIG_INSTRUMENTATION_PGFAULTS 2642 uint32_t cost = end_cycle - start_cycle; 2596 2643 this->info.false_pgfault_nr++; 2597 2644 this->info.false_pgfault_cost += cost; … … 2651 2698 #if (CONFIG_INSTRUMENTATION_PGFAULTS || DEBUG_VMM_HANDLE_PAGE_FAULT) 2652 2699 uint32_t end_cycle = (uint32_t)hal_get_cycles(); 2653 uint32_t cost = end_cycle - start_cycle;2654 2700 #endif 2655 2701 … … 2660 2706 #endif 2661 2707 2708 #if (DEBUG_VMM_HANDLE_PAGE_FAULT & 2) 2709 if( (end_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) ) 2710 hal_vmm_display( XPTR( local_cxy , this->process ) , true ); 2711 #endif 2712 2662 2713 #if CONFIG_INSTRUMENTATION_PGFAULTS 2714 uint32_t cost = end_cycle - start_cycle; 2663 2715 this->info.global_pgfault_nr++; 2664 2716 this->info.global_pgfault_cost += cost; … … 2676 2728 #if (CONFIG_INSTRUMENTATION_PGFAULTS || DEBUG_VMM_HANDLE_PAGE_FAULT) 2677 2729 uint32_t end_cycle = (uint32_t)hal_get_cycles(); 2678 uint32_t cost = end_cycle - start_cycle;2679 2730 #endif 2680 2731 … … 2686 2737 2687 2738 #if CONFIG_INSTRUMENTATION_PGFAULTS 2739 uint32_t cost = end_cycle - start_cycle; 2688 2740 this->info.false_pgfault_nr++; 2689 2741 this->info.false_pgfault_cost += cost; … … 2720 2772 #endif 2721 2773 2722 #if ( (DEBUG_VMM_HANDLE_COW & 3) == 3)2774 #if (DEBUG_VMM_HANDLE_COW & 2) 2723 2775 hal_vmm_display( XPTR( local_cxy , process ) , true ); 2724 2776 #endif … … 2902 2954 #endif 2903 2955 2904 #if ( (DEBUG_VMM_HANDLE_COW & 3) == 3)2956 #if (DEBUG_VMM_HANDLE_COW & 2) 2905 2957 hal_vmm_display( XPTR( local_cxy , process ) , true ); 2906 2958 #endif -
trunk/kernel/mm/vmm.h
r651 r656 312 312 313 313 /********************************************************************************************* 314 * This function removes from the VMM of a process descriptor identified by the <process>315 * argumentthe vseg identified by the <vseg> argument.316 * It is called by the vmm_user_reset(), vmm_global_delete_vseg() andvmm_destroy() functions.314 * This function removes from the VMM of a local process descriptor, identified by 315 * the <process> argument, the vseg identified by the <vseg> argument. 316 * It is called by the vmm_user_reset(), vmm_global_delete_vseg(), vmm_destroy() functions. 317 317 * It must be called by a local thread, running in the cluster containing the modified VMM. 318 318 * Use the RPC_VMM_REMOVE_VSEG if required. … … 324 324 * . for ANON and REMOTE, the vseg is not released, but registered in local zombi_list. 325 325 * . for STACK the vseg is released to the local stack allocator. 326 * . for all other types, the vseg is released to the local kmem.326 * . for all other types, the vseg descriptor is released to the local kmem. 327 327 * Regarding the physical pages release: 328 328 * . for KERNEL and FILE, the pages are not released to kmem. 329 * . for CODE and STACK, the pages are released to local kmem when they are not COW.329 * . for CODE and STACK, the pages are released to local kmem. 330 330 * . for DATA, ANON and REMOTE, the pages are released to relevant kmem only when 331 331 * the local cluster is the reference cluster. -
trunk/kernel/syscalls/sys_display.c
r640 r656 300 300 xptr_t mapper_xp; 301 301 mapper_t * mapper_ptr; 302 xptr_t page_xp; 302 303 303 304 char kbuf[CONFIG_VFS_MAX_PATH_LENGTH]; … … 315 316 __FUNCTION__ ); 316 317 #endif 317 this->errno = ENFILE; 318 return -1; 319 } 320 318 this->errno = EINVAL; 319 return -1; 320 } 321 322 // check nbytes 323 if( nbytes >= 4096 ) 324 { 325 326 #if DEBUG_SYSCALLS_ERROR 327 printk("\n[ERROR] in %s for MAPPER : nbytes cannot be larger than 4096\n", 328 __FUNCTION__ ); 329 #endif 330 this->errno = EINVAL; 331 return -1; 332 } 333 321 334 // copy pathname in kernel space 322 335 hal_strcpy_from_uspace( XPTR( local_cxy , kbuf ), … … 366 379 mapper_xp = XPTR( inode_cxy , mapper_ptr ); 367 380 368 // display mapper369 error = mapper_display_page( mapper_xp , page_id , nbytes);370 371 if( error)381 // get extended pointer on target page 382 page_xp = mapper_remote_get_page( mapper_xp , page_id ); 383 384 if( page_xp == XPTR_NULL ) 372 385 { 373 386 374 387 #if DEBUG_SYSCALLS_ERROR 375 printk("\n[ERROR] in %s for MAPPER : cannot displaypage %d\n",388 printk("\n[ERROR] in %s for MAPPER : cannot get page %d\n", 376 389 __FUNCTION__ , page_id ); 377 390 #endif … … 379 392 return -1; 380 393 } 394 395 // display mapper 396 mapper_display_page( mapper_xp , page_xp , nbytes ); 397 381 398 382 399 break; … … 463 480 uint32_t page = (uint32_t)arg0; 464 481 465 fatfs_display_fat( page , entries );482 fatfs_display_fat( page , 0 , entries ); 466 483 } 467 484 -
trunk/kernel/syscalls/sys_read.c
r635 r656 63 63 cxy_t file_cxy; // remote file cluster identifier 64 64 uint32_t file_type; // file type 65 uint32_t file_attr; // file_attribute 65 uint32_t file_offset; // file offset 66 uint32_t file_attr; // file attributes 67 vfs_inode_t * inode_ptr; // local pointer on file inode 66 68 uint32_t nbytes; // number of bytes actually read 67 69 reg_t save_sr; // required to enable IRQs during syscall … … 129 131 file_cxy = GET_CXY( file_xp ); 130 132 131 // get file type and attributes 132 file_type = hal_remote_l32( XPTR( file_cxy , &file_ptr->type ) ); 133 file_attr = hal_remote_l32( XPTR( file_cxy , &file_ptr->attr ) ); 133 // get inode, file type, offset and attributes 134 inode_ptr = hal_remote_lpt( XPTR( file_cxy , &file_ptr->inode ) ); 135 file_type = hal_remote_l32( XPTR( file_cxy , &file_ptr->type ) ); 136 file_offset = hal_remote_l32( XPTR( file_cxy , &file_ptr->offset ) ); 137 file_attr = hal_remote_l32( XPTR( file_cxy , &file_ptr->attr ) ); 134 138 135 139 // enable IRQs 136 140 hal_enable_irq( &save_sr ); 137 141 138 // action depend on file type 142 // action depend on file type: 143 139 144 if( file_type == INODE_TYPE_FILE ) // read from file mapper 140 145 { … … 152 157 } 153 158 154 // move count bytes from mapper159 // try to move count bytes from mapper 155 160 nbytes = vfs_user_move( true, // from mapper to buffer 156 161 file_xp, 157 162 vaddr, 158 163 count ); 159 if( nbytes != count )160 {161 162 #if DEBUG_SYSCALLS_ERROR163 printk("\n[ERROR] in %s : thread[%x,‰x] cannot read %d bytes from file %d\n",164 __FUNCTION__, process->pid, this->trdid, count, file_id );165 #endif166 this->errno = EIO;167 hal_restore_irq( save_sr );168 return -1;169 }170 164 } 171 165 else if( file_type == INODE_TYPE_DEV ) // read from TXT device … … 184 178 txt_owner_xp = hal_remote_l64( XPTR( chdev_cxy , &chdev_ptr->ext.txt.owner_xp ) ); 185 179 186 // checkTXT_RX ownership180 // wait for TXT_RX ownership 187 181 if ( process_owner_xp != txt_owner_xp ) 188 182 { … … 202 196 } 203 197 204 // move count bytes fromdevice198 // try to move count bytes from TXT device 205 199 nbytes = devfs_user_move( true, // from device to buffer 206 200 file_xp, 207 201 vaddr, 208 202 count ); 209 if( nbytes != count )210 {211 212 #if DEBUG_SYSCALLS_ERROR213 printk("\n[ERROR] in %s : thread[%x,‰x] cannot read data from file %d\n",214 __FUNCTION__, process->pid, this->trdid, file_id );215 #endif216 this->errno = EIO;217 hal_restore_irq( save_sr );218 return -1;219 }220 203 } 221 204 else // not FILE and not DEV … … 229 212 hal_restore_irq( save_sr ); 230 213 return -1; 214 } 215 216 // check error 217 if( nbytes == 0xFFFFFFFF ) 218 { 219 220 #if DEBUG_SYSCALLS_ERROR 221 printk("\n[ERROR] in %s : thread[%x,‰x] cannot read data from file %d\n", 222 __FUNCTION__, process->pid, this->trdid, file_id ); 223 #endif 224 this->errno = EIO; 225 hal_restore_irq( save_sr ); 226 return -1; 231 227 } 232 228 -
trunk/kernel/syscalls/sys_write.c
r635 r656 62 62 cxy_t file_cxy; // remote file cluster identifier 63 63 uint32_t file_type; // file type 64 uint32_t file_offset; // currentfile offset65 uint32_t file_attr; // file _attribute64 uint32_t file_offset; // file offset 65 uint32_t file_attr; // file attributes 66 66 vfs_inode_t * inode_ptr; // local pointer on associated inode 67 67 uint32_t nbytes; // number of bytes actually written … … 138 138 hal_enable_irq( &save_sr ); 139 139 140 // action depend on file type 140 // action depend on file type: 141 141 142 if( file_type == INODE_TYPE_FILE ) // write to a file mapper 142 143 { … … 159 160 vaddr, 160 161 count ); 161 if ( nbytes != count )162 {163 164 #if DEBUG_SYSCALLS_ERROR165 printk("\n[ERROR] in %s : thread[%x,%x] cannot write %d bytes into file %d\n",166 __FUNCTION__ , process->pid, this->trdid, count, file_id );167 #endif168 hal_restore_irq( save_sr );169 this->errno = EIO;170 return -1;171 172 }173 174 // update file size in inode descriptor175 // only if (file_offset + count) > current_size176 // note: the parent directory entry in mapper will177 // be updated by the close syscall178 xptr_t inode_xp = XPTR( file_cxy , inode_ptr );179 vfs_inode_update_size( inode_xp , file_offset + count );180 162 } 181 163 else if( file_type == INODE_TYPE_DEV ) // write to TXT device … … 186 168 vaddr, 187 169 count ); 188 if( nbytes != count ) 189 { 170 } 171 else // not FILE and not DEV 172 { 173 174 #if DEBUG_SYSCALLS_ERROR 175 printk("\n[ERROR] in %s : thread[%x,%x] / illegal inode type %\n", 176 __FUNCTION__, vfs_inode_type_str( file_type ) ); 177 #endif 178 hal_restore_irq( save_sr ); 179 this->errno = EBADFD; 180 return -1; 181 } 182 183 // chek error 184 if( nbytes == 0xFFFFFFFF ) 185 { 190 186 191 187 #if DEBUG_SYSCALLS_ERROR … … 193 189 __FUNCTION__ , process->pid, this->trdid, file_id ); 194 190 #endif 195 hal_restore_irq( save_sr );196 this->errno = EIO;197 return -1;198 }199 }200 else // not FILE and not DEV201 {202 203 #if DEBUG_SYSCALLS_ERROR204 printk("\n[ERROR] in %s : thread[%x,%x] / illegal inode type %\n",205 __FUNCTION__, vfs_inode_type_str( file_type ) );206 #endif207 191 hal_restore_irq( save_sr ); 208 this->errno = EBADFD;209 192 this->errno = EIO; 193 return -1; 210 194 } 211 195
Note: See TracChangeset
for help on using the changeset viewer.