Changeset 656 for trunk/kernel


Ignore:
Timestamp:
Dec 6, 2019, 12:07:51 PM (5 years ago)
Author:
alain
Message:

Fix several bugs in the FATFS and in the VFS,
related to the creation of big files requiring
more than 4 Kbytes (one cluster) on device.

Location:
trunk/kernel
Files:
22 edited

Legend:

Unmodified
Added
Removed
  • trunk/kernel/fs/fatfs.c

    r647 r656  
    850850
    851851}  // end fatfs_recursive_release()
     852
     853
     854//////////////////////////////////////////////////////////////////////////////////////////
     855// This static function access the FAT (File Allocation Table), stored in the FAT mapper,
     856// and returns in <searched_cluster_id> the FATFS cluster_id for a given page of a given
     857// inode, identified by the <searched_page_id> argument, that is the page index in file
     858// (i.e. the page index in file mapper). The entry point in the FAT is defined by the
     859// <first_cluster_id> argument, that is the cluster_id of an already allocated cluster.
     860// It can be the cluster_id of the first page of the file (always registered in the
     861// fatfs_inode extension), or any page of the file whose <first_page_id> argument
     862// is smaller than the searched <first_page_id> argument.
     863// This function can be called by a thread running in any cluster, as it uses remote
     864// access primitives when the FAT mapper is remote.
     865// The FAT mapper being a WRITE-THROUGH cache, this function updates the FAT mapper
     866// from informations stored on IOC device in case of miss when scanning the FAT mapper.
     867// The searched inode mapper being a WRITE-BACK cache, this function allocates a new
     868// cluster_id when the searched page exist in the inode mapper, and there is no FATFS
     869// cluster allocated yet for this page. It updates the FAT, but it does NOT copy the
     870// mapper page content to the File System.
     871//////////////////////////////////////////////////////////////////////////////////////////
     872// @ first_page_id       : [in]  index in file mapper for an existing page.
     873// @ first_cluster_id    : [in]  cluster_id for this existing page.
     874// @ searched_page_id    : [in]  index in file mapper for the searched page.
     875// @ searched_cluster_id : [out] cluster_id for the searched page.
     876// @ return 0 if success / return -1 if a FAT mapper miss cannot be solved,
     877//                         or if a missing cluster_id cannot be allocated.
     878//////////////////////////////////////////////////////////////////////////////////////////
     879static error_t fatfs_get_cluster( uint32_t   first_page_id,
     880                                  uint32_t   first_cluster_id,
     881                                  uint32_t   searched_page_id,
     882                                  uint32_t * searched_cluster_id )
     883{
     884    uint32_t   current_page_id;        // index of page in file mapper
     885    uint32_t   current_cluster_id;     // index of cluster in FATFS
     886    xptr_t     lock_xp;                // extended pointer on FAT lock
     887
     888assert( (searched_page_id > first_page_id) ,
     889"searched_page_id must be larger than first_page_id\n");
     890
     891#if DEBUG_FATFS_GET_CLUSTER
     892uint32_t   cycle = (uint32_t)hal_get_cycles();
     893thread_t * this  = CURRENT_THREAD;
     894if( DEBUG_FATFS_GET_CLUSTER < cycle )
     895printk("\n[%s] thread[%x,%x] enter / first_cluster_id %x / searched_page_id %d / cycle %d\n",
     896__FUNCTION__, this->process->pid, this->trdid, first_cluster_id, searched_page_id, cycle );
     897#endif
     898
     899    // get local pointer on VFS context (same in all clusters)
     900    vfs_ctx_t * vfs_ctx = &fs_context[FS_TYPE_FATFS];
     901
     902    // get local pointer on local FATFS context
     903    fatfs_ctx_t * loc_fatfs_ctx = vfs_ctx->extend;
     904
     905    // get extended pointer and cluster on FAT mapper
     906    xptr_t fat_mapper_xp  = loc_fatfs_ctx->fat_mapper_xp;
     907    cxy_t  fat_cxy        = GET_CXY( fat_mapper_xp );
     908
     909    // get local pointer on FATFS context in FAT cluster
     910    fatfs_ctx_t * fat_fatfs_ctx = hal_remote_lpt( XPTR( fat_cxy , &vfs_ctx->extend ) );
     911
     912    // build extended pointer on FAT lock in FAT cluster
     913    lock_xp = XPTR( fat_cxy , &fat_fatfs_ctx->lock );
     914
     915    // take FAT lock in read mode
     916    remote_rwlock_rd_acquire( lock_xp );
     917
     918    // initialize loop variables
     919    current_page_id    = first_page_id;
     920    current_cluster_id = first_cluster_id;
     921
     922    // scan FAT mapper (i.e. traverse FAT linked list)
     923    // starting from first_page_id until searched_page_id
     924    // each iteration in this loop can change both
     925    // the FAT page index and the slot index in FAT
     926    while( current_page_id < searched_page_id )
     927    {
     928        // FAT mapper page and slot indexes (1024 slots per FAT page)
     929        uint32_t fat_page_index   = current_cluster_id >> 10;
     930        uint32_t fat_slot_index   = current_cluster_id & 0x3FF;
     931
     932        // get pointer on current page descriptor in FAT mapper
     933        xptr_t current_page_xp = mapper_remote_get_page( fat_mapper_xp , fat_page_index );
     934
     935        if( current_page_xp == XPTR_NULL )
     936        {
     937            printk("\n[ERROR] in %s : cannot get page %d from FAT mapper\n",
     938            __FUNCTION__ , fat_page_index );
     939            remote_rwlock_rd_release( lock_xp );
     940            return -1;
     941        }
     942
     943        // get pointer on buffer containing the FAT mapper page
     944        xptr_t base_xp = ppm_page2base( current_page_xp );
     945        uint32_t * buffer = (uint32_t *)GET_PTR( base_xp );
     946
     947        // get next_cluster_id from FAT slot 
     948        uint32_t next_cluster_id = hal_remote_l32( XPTR( fat_cxy, &buffer[fat_slot_index] ) );
     949
     950        // allocate a new FAT cluster when there is no cluster
     951        // allocated on device for the current page
     952        if( next_cluster_id >= END_OF_CHAIN_CLUSTER_MIN )
     953        {
     954            // release the FAT lock in read mode,
     955            remote_rwlock_rd_release( lock_xp );
     956
     957            // allocate a new cluster_id (and update both FAT mapper and FAT on device).
     958            error_t error = fatfs_cluster_alloc( &next_cluster_id );
     959
     960            if( error )
     961            {
     962                printk("\n[ERROR] in %s : cannot allocate cluster on FAT32 for page %d\n",
     963                __FUNCTION__ , current_page_id );
     964                remote_rwlock_wr_release( lock_xp );
     965                return -1;
     966            }
     967
     968#if (DEBUG_FATFS_GET_CLUSTER & 1)
     969if( DEBUG_FATFS_GET_CLUSTER < cycle )
     970printk("\n[%s] allocated a new cluster_id %d in FATFS\n",
     971__FUNCTION__, next_cluster_id );
     972#endif
     973            // take the FAT lock in read mode,
     974            remote_rwlock_rd_acquire( lock_xp );
     975        }
     976
     977#if (DEBUG_FATFS_GET_CLUSTER & 1)
     978if( DEBUG_FATFS_GET_CLUSTER < cycle )
     979printk("\n[%s] traverse FAT / fat_page_index %d / fat_slot_index %d / next_cluster_id %x\n",
     980__FUNCTION__, fat_page_index, fat_slot_index , next_cluster_id );
     981#endif
     982
     983        // update loop variables
     984        current_cluster_id = next_cluster_id;
     985        current_page_id++;
     986    }
     987   
     988    // release FAT lock
     989    remote_rwlock_rd_release( lock_xp );
     990
     991#if DEBUG_FATFS_GET_CLUSTER
     992if( DEBUG_FATFS_GET_CLUSTER < cycle )
     993printk("\n[%s] thread[%x,%x] exit / searched_cluster_id = %d\n",
     994__FUNCTION__, this->process->pid, this->trdid, current_cluster_id );
     995#endif
     996
     997    *searched_cluster_id = current_cluster_id;
     998    return 0;
     999
     1000}  // end fatfs_get_cluster()
     1001
     1002
     1003
    8521004
    8531005
     
    9041056//////////////////////////////////////////
    9051057void fatfs_display_fat( uint32_t  page_id,
    906                         uint32_t  nentries )
     1058                        uint32_t  min_slot,
     1059                        uint32_t  nb_slots )
    9071060{
    9081061    uint32_t line;
    909     uint32_t maxline;
    9101062
    9111063    // compute number of lines to display
    912     maxline = nentries >> 3;
    913     if( nentries & 0x7 ) maxline++;
     1064    uint32_t min_line = min_slot >> 3;
     1065    uint32_t max_line = (min_slot + nb_slots - 1) >> 3;
    9141066
    9151067    // get pointer on local FATFS context
     
    9171069    fatfs_ctx_t * loc_fatfs_ctx = (fatfs_ctx_t *)vfs_ctx->extend;
    9181070
    919     // get extended pointer on FAT mapper
    920     xptr_t fat_mapper_xp  = loc_fatfs_ctx->fat_mapper_xp;
    921 
    922     // get FAT cluster identifier
    923     cxy_t  fat_cxy = GET_CXY( fat_mapper_xp );
     1071    // get pointers on FAT mapper (in FAT cluster)
     1072    xptr_t     mapper_xp  = loc_fatfs_ctx->fat_mapper_xp;
     1073    cxy_t      mapper_cxy = GET_CXY( mapper_xp );
    9241074
    9251075    // get pointer on FATFS context in FAT cluster
    926     fatfs_ctx_t * fat_fatfs_ctx = hal_remote_lpt( XPTR( fat_cxy , &vfs_ctx->extend ) );
     1076    fatfs_ctx_t * fat_fatfs_ctx = hal_remote_lpt( XPTR( mapper_cxy , &vfs_ctx->extend ) );
    9271077 
    9281078    // get current value of hint and free_clusters
    929     uint32_t hint = hal_remote_l32( XPTR( fat_cxy , &fat_fatfs_ctx->free_cluster_hint ) );
    930     uint32_t free = hal_remote_l32( XPTR( fat_cxy , &fat_fatfs_ctx->free_clusters ) );
    931  
    932     // get extended pointer on requested page in FAT mapper
    933     xptr_t     page_xp  = mapper_remote_get_page( fat_mapper_xp , page_id );
    934 
    935     // get extended pointer on requested page base
     1079    uint32_t hint = hal_remote_l32( XPTR( mapper_cxy , &fat_fatfs_ctx->free_cluster_hint ) );
     1080    uint32_t free = hal_remote_l32( XPTR( mapper_cxy , &fat_fatfs_ctx->free_clusters ) );
     1081
     1082    // get extended pointer on requested page descriptor in FAT mapper
     1083    xptr_t page_xp = mapper_remote_get_page( mapper_xp , page_id );
     1084
     1085    // get pointers on requested page base
    9361086    xptr_t     base_xp  = ppm_page2base( page_xp );
    9371087    void     * base     = GET_PTR( base_xp );
    9381088
    9391089    printk("\n***** FAT mapper / cxy %x / page_id %d / base %x / free_clusters %x / hint %x\n",
    940     fat_cxy, page_id, base, free, hint );
    941 
    942     for( line = 0 ; line < maxline ; line++ )
    943     {
    944         printk("%x : %X | %X | %X | %X | %X | %X | %X | %X\n", (line<<3),
     1090    mapper_cxy, page_id, base, free, hint );
     1091
     1092    for( line = min_line ; line <= max_line ; line++ )
     1093    {
     1094        printk("%d : %X | %X | %X | %X | %X | %X | %X | %X\n", (line<<3),
    9451095        hal_remote_l32( base_xp + ((line<<5)      ) ),
    9461096        hal_remote_l32( base_xp + ((line<<5) + 4  ) ),
     
    9541104
    9551105}  // end fatfs_display_fat()
    956 
    957 ///////////////////////////////////////////////////////
    958 error_t fatfs_get_cluster( uint32_t   first_cluster_id,
    959                            uint32_t   searched_page_index,
    960                            uint32_t * searched_cluster_id )
    961 {
    962     xptr_t     current_page_xp;        // pointer on current page descriptor
    963     uint32_t * buffer;                 // pointer on current page (array of uint32_t)
    964     uint32_t   current_page_index;     // index of current page in FAT
    965     uint32_t   current_slot_index;     // index of slot in current page
    966     uint32_t   page_count_in_file;     // index of page in file (index in linked list)
    967     uint32_t   next_cluster_id;        // content of current FAT slot
    968     xptr_t     lock_xp;                // extended pointer on FAT lock
    969 
    970 assert( (searched_page_index > 0) ,
    971 "no FAT access required for first page\n");
    972 
    973 #if DEBUG_FATFS_GET_CLUSTER
    974 uint32_t   cycle = (uint32_t)hal_get_cycles();
    975 thread_t * this  = CURRENT_THREAD;
    976 if( DEBUG_FATFS_GET_CLUSTER < cycle )
    977 printk("\n[%s] thread[%x,%x] enter / first_cluster_id %d / searched_index %d / cycle %d\n",
    978 __FUNCTION__, this->process->pid, this->trdid, first_cluster_id, searched_page_index, cycle );
    979 #endif
    980 
    981     // get local pointer on VFS context (same in all clusters)
    982     vfs_ctx_t * vfs_ctx = &fs_context[FS_TYPE_FATFS];
    983 
    984     // get local pointer on local FATFS context
    985     fatfs_ctx_t * loc_fatfs_ctx = vfs_ctx->extend;
    986 
    987     // get extended pointer and cluster on FAT mapper
    988     xptr_t fat_mapper_xp  = loc_fatfs_ctx->fat_mapper_xp;
    989     cxy_t  fat_cxy        = GET_CXY( fat_mapper_xp );
    990 
    991     // get local pointer on FATFS context in FAT cluster
    992     fatfs_ctx_t * fat_fatfs_ctx = hal_remote_lpt( XPTR( fat_cxy , &vfs_ctx->extend ) );
    993 
    994     // build extended pointer on FAT lock in FAT cluster
    995     lock_xp = XPTR( fat_cxy , &fat_fatfs_ctx->lock );
    996 
    997     // take FAT lock in read mode
    998     remote_rwlock_rd_acquire( lock_xp );
    999 
    1000     // initialize loop variable (1024 slots per page)
    1001     current_page_index  = first_cluster_id >> 10;
    1002     current_slot_index  = first_cluster_id & 0x3FF;
    1003     page_count_in_file  = 0;
    1004     next_cluster_id     = 0xFFFFFFFF;
    1005 
    1006     // scan FAT mapper (i.e. traverse FAT linked list)
    1007     while( page_count_in_file < searched_page_index )
    1008     {
    1009         // get pointer on current page descriptor in FAT mapper
    1010         current_page_xp = mapper_remote_get_page( fat_mapper_xp , current_page_index );
    1011 
    1012         if( current_page_xp == XPTR_NULL )
    1013         {
    1014             printk("\n[ERROR] in %s : cannot get next page from FAT mapper\n", __FUNCTION__);
    1015             remote_rwlock_rd_release( lock_xp );
    1016             return -1;
    1017         }
    1018 
    1019         // get pointer on buffer for current page
    1020         xptr_t base_xp = ppm_page2base( current_page_xp );
    1021         buffer = (uint32_t *)GET_PTR( base_xp );
    1022 
    1023         // get FAT slot content
    1024         next_cluster_id = hal_remote_l32( XPTR( fat_cxy, &buffer[current_slot_index] ) );
    1025 
    1026 #if (DEBUG_FATFS_GET_CLUSTER & 1)
    1027 if( DEBUG_FATFS_GET_CLUSTER < cycle )
    1028 printk("\n[%s] traverse FAT / current_page_index = %d\n"
    1029 "current_slot_index = %d / next_cluster_id = %d\n",
    1030 __FUNCTION__, current_page_index, current_slot_index , next_cluster_id );
    1031 #endif
    1032         // update loop variables
    1033         current_page_index = next_cluster_id >> 10;
    1034         current_slot_index = next_cluster_id & 0x3FF;
    1035         page_count_in_file++;
    1036     }
    1037 
    1038     if( next_cluster_id == 0xFFFFFFFF )
    1039     {
    1040         printk("\n[ERROR] in %s : searched_cluster_id not found in FAT\n", __FUNCTION__ );
    1041         remote_rwlock_rd_release( lock_xp );
    1042         return -1;
    1043     }
    1044    
    1045     // release FAT lock
    1046     remote_rwlock_rd_release( lock_xp );
    1047 
    1048 #if DEBUG_FATFS_GET_CLUSTER
    1049 cycle = (uint32_t)hal_get_cycles();
    1050 if( DEBUG_FATFS_GET_CLUSTER < cycle )
    1051 printk("\n[%s] thread[%x,%x] exit / searched_cluster_id = %d / cycle %d\n",
    1052 __FUNCTION__, this->process->pid, this->trdid, next_cluster_id / cycle );
    1053 #endif
    1054 
    1055     *searched_cluster_id = next_cluster_id;
    1056     return 0;
    1057 
    1058 }  // end fatfs_get_cluster()
    10591106
    10601107
     
    16911738// by the <mapper> argument, to find the directory entry identified by the <name> argument,
    16921739// and return a pointer on the directory entry, described as and array of 32 bytes, and the
    1693 // incex of this entry in the FAT32 mapper, seen as an array of 32 bytes entries.
     1740// index of this entry in the FAT32 mapper, seen as an array of 32 bytes entries.
    16941741// It is called by the fatfs_new_dentry() and fatfs_update_dentry() functions.
    16951742// It must be called by a thread running in the cluster containing the mapper.
     
    17011748// @ return 0 if found / return 1 if not found / return -1 if mapper access error.
    17021749//////////////////////////////////////////////////////////////////////////////////////////////
    1703 error_t fatfs_scan_directory( mapper_t *  mapper,
    1704                               char     *  name,
    1705                               uint8_t  ** entry,
    1706                               uint32_t *  index )
     1750static error_t fatfs_scan_directory( mapper_t *  mapper,
     1751                                     char     *  name,
     1752                                     uint8_t  ** entry,
     1753                                     uint32_t *  index )
    17071754{
    17081755    // Two embedded loops to scan the directory mapper:
     
    17251772#endif
    17261773
    1727     char       cname[CONFIG_VFS_MAX_NAME_LENGTH];  // name extracted from each directory entry
     1774    char       cname[CONFIG_VFS_MAX_NAME_LENGTH];  // name extracted from directory entry
    17281775
    17291776    char       lfn1[16];         // buffer for one partial cname
     
    17611808#if (DEBUG_FATFS_SCAN_DIRECTORY & 0x1)
    17621809if( DEBUG_FATFS_SCAN_DIRECTORY < cycle )
    1763 mapper_display_page( mapper_xp , page_id , 256 );
     1810mapper_display_page( mapper_xp , page_xp , 256 );
    17641811#endif
    17651812        // scan this page until end of directory, end of page, or name found
     
    18831930    error_t        error;
    18841931
    1885     char           dir_name[CONFIG_VFS_MAX_NAME_LENGTH];
     1932    char           parent_name[CONFIG_VFS_MAX_NAME_LENGTH];
    18861933
    18871934// check arguments
     
    19001947assert( (xlist_is_empty( root_xp ) == false ), "child inode must have one parent\n");
    19011948
    1902 #if DEBUG_FATFS_GET_DENTRY
     1949#if DEBUG_FATFS_NEW_DENTRY
    19031950uint32_t   cycle = (uint32_t)hal_get_cycles();
    19041951thread_t * this  = CURRENT_THREAD;
    1905 vfs_inode_get_name( XPTR( local_cxy , parent_inode ) , dir_name );
    1906 if( DEBUG_FATFS_GET_DENTRY < cycle )
     1952vfs_inode_get_name( XPTR( local_cxy , parent_inode ) , parent_name );
     1953if( DEBUG_FATFS_NEW_DENTRY < cycle )
    19071954printk("\n[%s]  thread[%x,%x] enter for child <%s> in parent <%s> / cycle %d\n",
    1908 __FUNCTION__, this->process->pid, this->trdid, name , dir_name , cycle );
     1955__FUNCTION__, this->process->pid, this->trdid, name , parent_name , cycle );
    19091956#endif
    19101957
     
    19161963
    19171964    // return non fatal error if not found
    1918     if( error ) return -1;
     1965    if( error )
     1966    {
     1967        vfs_inode_get_name( XPTR( local_cxy , parent_inode ) , parent_name );
     1968        printk("\n[ERROR] in %s : cannot find <%s> entry in <%s> directory mapper\n",
     1969        __FUNCTION__, name , parent_name, name );
     1970        return -1;
     1971    }
     1972 
    19191973
    19201974    // get relevant infos from FAT32 directory entry
     
    19462000    if( found == false )
    19472001    {
    1948         vfs_inode_get_name( XPTR( local_cxy , parent_inode ) , dir_name );
     2002        vfs_inode_get_name( XPTR( local_cxy , parent_inode ) , parent_name );
    19492003        printk("\n[ERROR] in %s : cannot find <%s> directory in list of parents for <%s>\n",
    1950         __FUNCTION__, dir_name, name );
     2004        __FUNCTION__, parent_name, name );
    19512005        return -1;
    19522006    }
     
    19622016    dentry_ptr->extend = (void *)(intptr_t)index;
    19632017
    1964 #if DEBUG_FATFS_GET_DENTRY
     2018#if DEBUG_FATFS_NEW_DENTRY
    19652019cycle = (uint32_t)hal_get_cycles();
    1966 if( DEBUG_FATFS_GET_DENTRY < cycle )
    1967 printk("\n[%s]  thread[%x,%x] exit / intialised inode & dentry for <%s> in <%s> / cycle %d\n",
    1968 __FUNCTION__, this->process->pid, this->trdid, name, dir_name, cycle );
     2020if( DEBUG_FATFS_NEW_DENTRY < cycle )
     2021printk("\n[%s]  thread[%x,%x] exit for <%s> in <%s> / cluster_id %x / size %d  / cycle %d\n",
     2022__FUNCTION__, this->process->pid, this->trdid, name, parent_name, cluster, size,  cycle );
     2023#endif
     2024
     2025
     2026#if (DEBUG_FATFS_NEW_DENTRY & 1)
     2027if( DEBUG_FATFS_NEW_DENTRY < cycle )
     2028{
     2029    fatfs_display_fat( 0 , 0 , 64 );
     2030    fatfs_display_fat( cluster >> 10 ,  (cluster & 0x3FF) , 32 );
     2031}
    19692032#endif
    19702033
     
    19882051assert( (inode  != NULL) , "inode is NULL\n" );
    19892052assert( (dentry != NULL) , "dentry is NULL\n" );
    1990 assert( (size   != 0   ) , "size is 0\n" );
    19912053
    19922054#if DEBUG_FATFS_UPDATE_DENTRY
     
    20132075    }
    20142076
    2015     // set size in FAT32 directory entry
    2016     fatfs_set_record( DIR_FILE_SIZE , entry , size );
    2017 
    2018     // get local pointer on modified page base
    2019     void * base = (void *)((intptr_t)entry & (~CONFIG_PPM_PAGE_MASK));
    2020 
    2021     // get extended pointer on modified page descriptor
    2022     xptr_t page_xp = ppm_base2page( XPTR( local_cxy , base ) );
    2023 
    2024     // synchronously update the modified page on device
    2025     error = fatfs_move_page( page_xp , IOC_SYNC_WRITE );
    2026 
    2027     if( error )
    2028     {
    2029         vfs_inode_get_name( XPTR( local_cxy , inode ) , dir_name );
    2030         printk("\n[ERROR] in %s : cannot update parent directory <%s> on device\n",
    2031         __FUNCTION__, dir_name );
    2032         return -1;
     2077    // get current size value
     2078    uint32_t current_size = fatfs_get_record( DIR_FILE_SIZE , entry );
     2079
     2080    // update dentry in mapper & device only if required
     2081    if( size != current_size )
     2082    {
     2083        // set size field in FAT32 directory entry
     2084        fatfs_set_record( DIR_FILE_SIZE , entry , size );
     2085
     2086        // get pointer on modified page base
     2087        void * base = (void *)((intptr_t)entry & (~CONFIG_PPM_PAGE_MASK));
     2088
     2089        // get extended pointer on modified page descriptor
     2090        xptr_t page_xp = ppm_base2page( XPTR( local_cxy , base ) );
     2091
     2092        // synchronously update the modified page on device
     2093        error = fatfs_move_page( page_xp , IOC_SYNC_WRITE );
     2094
     2095        if( error )
     2096        {
     2097            vfs_inode_get_name( XPTR( local_cxy , inode ) , dir_name );
     2098            printk("\n[ERROR] in %s : cannot update parent directory <%s> on device\n",
     2099            __FUNCTION__, dir_name );
     2100            return -1;
     2101        }
    20332102    }
    20342103
     
    25862655    return 0;
    25872656
    2588 }  // end fat_cluster_alloc()
     2657}  // end fatfs_cluster_alloc()
    25892658
    25902659//////////////////////////////////////////////
     
    27372806#endif
    27382807
    2739     // get page cluster an local pointer
     2808    // get page cluster and local pointer
    27402809    cxy_t    page_cxy = GET_CXY( page_xp );
    27412810    page_t * page_ptr = GET_PTR( page_xp );
     
    27542823    inode_ptr  = hal_remote_lpt( XPTR( page_cxy , &mapper_ptr->inode ) );
    27552824
     2825    //////////////////////////////  FAT mapper  /////////////////////////////////////////
     2826    if( inode_ptr == NULL )
     2827    {
     2828
    27562829#if DEBUG_FATFS_MOVE_PAGE
    27572830if( DEBUG_FATFS_MOVE_PAGE < cycle )
    2758 printk("\n[%s] thread[%x,%x] enters : %s / cxy %x / mapper %x / inode %x / page %x\n",
    2759 __FUNCTION__, this->process->pid, this->trdid,
    2760 dev_ioc_cmd_str( cmd_type ), page_cxy, mapper_ptr, inode_ptr, GET_PTR(buffer_xp) );
    2761 #endif
    2762 
    2763     //////////////////////////////  FAT mapper
    2764     if( inode_ptr == NULL )
    2765     {
     2831printk("\n[%s] thread[%x,%x] enters for %s /  page %d in FAT mapper / cycle %d\n",
     2832__FUNCTION__, this->process->pid, this->trdid, dev_ioc_cmd_str(cmd_type), page_id, cycle );
     2833#endif
    27662834        // get lba from FATFS context and page_id
    27672835        uint32_t      lba = fatfs_ctx->fat_begin_lba + (page_id << 3);
     
    27782846#if DEBUG_FATFS_MOVE_PAGE
    27792847if( DEBUG_FATFS_MOVE_PAGE < cycle )
    2780 {
    2781     if ( (cmd_type == IOC_READ) || (cmd_type == IOC_SYNC_READ) )
    2782         printk("\n[%s] thread[%x,%x] load FAT mapper page %d from IOC / cycle %d\n",
    2783         __FUNCTION__, this->process->pid, this->trdid, page_id, cycle );
    2784     else
    2785         printk("\n[%s] thread[%x,%x] sync FAT mapper page %d to IOC / cycle %d\n",
    2786         __FUNCTION__, this->process->pid, this->trdid, page_id, cycle );
    2787 }
    2788 #endif
    2789 
    2790     }
    2791     /////////////////////////  inode mapper
     2848printk("\n[%s] thread[%x,%x] exit / page %d in FAT mapper\n",
     2849__FUNCTION__, this->process->pid, this->trdid, page_id, cycle );
     2850#endif
     2851
     2852    }
     2853    /////////////////////////  inode mapper  ////////////////////////////////////////////
    27922854    else                       
    27932855    {
     
    27952857#if DEBUG_FATFS_MOVE_PAGE
    27962858vfs_inode_get_name( XPTR( page_cxy , inode_ptr ) , name );
    2797 #endif
    2798 
    2799         uint32_t  searched_cluster;
    2800         uint32_t  first_cluster;
    2801 
    2802         // get first_cluster from inode extension
    2803         void * extend = hal_remote_lpt( XPTR( page_cxy , &inode_ptr->extend ) );
    2804         first_cluster = (uint32_t)(intptr_t)extend;
    2805 
    2806         // compute searched_cluster
     2859if( DEBUG_FATFS_MOVE_PAGE < cycle )
     2860printk("\n[%s] thread[%x,%x] enters for %s / page %d in <%s> mapper/ cycle %d\n",
     2861__FUNCTION__, this->process->pid, this->trdid,
     2862dev_ioc_cmd_str( cmd_type ), page_id, name, cycle );
     2863#endif
     2864
     2865        uint32_t  searched_cluster_id;
     2866        uint32_t  first_cluster_id;
     2867
     2868        // get first_cluster_id from inode extension
     2869        void * extend    = hal_remote_lpt( XPTR( page_cxy , &inode_ptr->extend ) );
     2870        first_cluster_id = (uint32_t)(intptr_t)extend;
     2871
     2872        // compute searched_cluster_id
    28072873        if( page_id == 0 )            // no need to access FAT mapper
    28082874        {
    28092875            // searched cluster is first cluster
    2810             searched_cluster = first_cluster;
     2876            searched_cluster_id = first_cluster_id;
    28112877        }
    28122878        else                        // FAT mapper access required
    28132879        {
    2814             // access FAT mapper to get searched cluster
    2815             error = fatfs_get_cluster( first_cluster,
     2880            // scan FAT mapper to get searched_cluster_id
     2881            error = fatfs_get_cluster( 0,                    // first page in mapper
     2882                                       first_cluster_id,
    28162883                                       page_id,
    2817                                        &searched_cluster );
     2884                                       &searched_cluster_id );
    28182885            if( error )
    28192886            {
    2820                 printk("\n[ERROR] in %s : cannot access FAT mapper\n", __FUNCTION__ );
     2887                printk("\n[ERROR] in %s : cannot get cluster_id\n", __FUNCTION__ );
    28212888                return -1;
    28222889            }
     
    28242891
    28252892        // get lba for searched_cluster
    2826         uint32_t lba = fatfs_lba_from_cluster( fatfs_ctx , searched_cluster );
    2827 
    2828         // access IOC device
     2893        uint32_t lba = fatfs_lba_from_cluster( fatfs_ctx , searched_cluster_id );
     2894
     2895        // access IOC device to move 8 blocks
    28292896        error = dev_ioc_move_data( cmd_type , buffer_xp , lba , 8 );
    28302897
     
    28372904#if DEBUG_FATFS_MOVE_PAGE
    28382905if( DEBUG_FATFS_MOVE_PAGE < cycle )
    2839 {
    2840     if ( (cmd_type == IOC_READ) || (cmd_type == IOC_SYNC_READ) )
    2841     printk("\n[%s] thread[%x,%x] load page %d of <%s> / cluster_id %x / cycle %d\n",
    2842     __FUNCTION__, this->process->pid, this->trdid, page_id, name, searched_cluster, cycle );
    2843     else
    2844     printk("\n[%s] thread[%x,%x] sync page %d of <%s> / cluster_id %x / cycle %d\n",
    2845     __FUNCTION__, this->process->pid, this->trdid, page_id, name, searched_cluster, cycle );
    2846 }
     2906vfs_inode_get_name( XPTR( page_cxy, inode_ptr ) , name );
     2907printk("\n[%s] thread[%x,%x] exit / page %d in <%s> mapper / cluster_id %x\n",
     2908__FUNCTION__, this->process->pid, this->trdid, page_id, name, searched_cluster_id );
    28472909#endif
    28482910
  • trunk/kernel/fs/fatfs.h

    r638 r656  
    3232
    3333
    34 /**************************************************************************************
     34/******************************************************************************************
    3535 * The FATFS File System implements a FAT32 read/write file system.
    3636 *
     
    4343 *    on the FAT mapper.
    4444 * 2) The vfs_inode_t "extend" contains, for each inode,
    45  *    the first FAT cluster index (after cast to intptr).
     45 *    the first FAT32 cluster_id (after cast to intptr).
    4646 * 3) The vfs_dentry_t "extend" field contains, for each dentry, the entry index
    47  *    in the FATFS directory (32 bytes per FATFS entry).
    48  *************************************************************************************/
     47 *    in the FATFS directory (32 bytes per FATFS directory entry).
     48 *
     49 * In the FAT32 File System, the File Allocation Table is is actually an array
     50 * of uint32_t slots. Each slot in this array contains the index (called cluster_id)
     51 * of another slot in this array, to form one linked list for each file stored on
     52 * device in the FAT32 File System. This index in the FAT array is also the index of
     53 * the FATFS cluster on the device. One FATFS cluster is supposed to contain one PPM page.
     54 * For a given file, the entry point in the FAT is the cluster_id of the FATFS cluster
     55 * containing the first page of the file, but it can be any cluster_id already allocated
     56 * to the file.
     57 *****************************************************************************************/
    4958 
    5059///////////////////////////////////////////////////////////////////////////////////////////
     
    213222
    214223/*****************************************************************************************
    215  * This function access the FAT (File Allocation Table), stored in the FAT mapper, and
    216  * returns in <searched_cluster> the FATFS cluster index for a given page of a given
    217  * inode identified by the <first_cluster> and <page_id> arguments.
    218  * It can be called by a thread running in any cluster, as it uses remote access
    219  * primitives when the FAT mapper is remote.
    220  * The FAT is actually an array of uint32_t slots. Each slot in this array contains the
    221  * index of another slot in this array, to form one linked list for each file stored on
    222  * device in the FATFS file system. This index in the FAT array is also the index of the
    223  * FATFS cluster on the device. One FATFS cluster is supposed to contain one PPM page.
    224  * For a given file, the entry point in the FAT is simply the index of the FATFS cluster
    225  * containing the first page of the file. The FAT mapper being a cache, this function
    226  * updates the FAT mapper from informations stored on IOC device in case of miss.
    227  *****************************************************************************************
    228  * @ first_cluster       : [in]  index of first FATFS cluster allocated to the file.
    229  * @ page_id             : [in]  index of searched page in file.
    230  * @ searched_cluster    : [out] found FATFS cluster index.
    231  * @ return 0 if success / return -1 if a FAT mapper miss cannot be solved.
    232  ****************************************************************************************/
    233 error_t fatfs_get_cluster( uint32_t   first_cluster,
    234                            uint32_t   page_id,
    235                            uint32_t * searched_cluster );
    236 
    237 /*****************************************************************************************
    238  * This function display the content of the FATFS context copy in cluster identified
    239  * by the <cxy> argument.
     224 * This debug function display the content of the FATFS context copy in cluster
     225 * identified by the <cxy> argument.
    240226 * This function can be called by a thread running in any cluster.
    241227 *****************************************************************************************
     
    245231
    246232/*****************************************************************************************
    247  * This function access the FAT mapper to display one page of the File Allocation Table.
    248  * It loads the requested page fom IOC device to FAT mapper if required.
     233 * This debug function access the FAT mapper to display the current FAT state,
     234 * as defined by the <page_id>, <min_slot>, and <nb_slots> arguments.
     235 * It loads the missing pages from IOC to mapper if required.
    249236 * This function can be called by a thread running in any cluster.
    250237 *****************************************************************************************
    251  * @ page_id     : page index in FAT mapper (one page is 4 Kbytes).
    252  * @ nb_entries  : number of entries (one entry is 4 bytes).
     238 * @ page_id   : page index in FAT mapper (one page is 4 Kbytes = 1024 slots).
     239 * @ min_slot  : first slot in page
     240 * @ nb_slots  : number of slots (one slot is 4 bytes).
    253241 ****************************************************************************************/
    254242void fatfs_display_fat( uint32_t  page_id,
    255                         uint32_t  nb_entries );
     243                        uint32_t  min_slot,
     244                        uint32_t  nb_slots );
    256245
    257246
     
    330319 *****************************************************************************************
    331320 * It scan a parent directory mapper, identified by the <parent_inode> argument to find
    332  * a directory entry identified by the <name> argument.  In case of success, it
    333  * initializes the inode/dentry couple, identified by the  <child_inode_xp> argument
    334  * in the Inode Tree. The child inode descriptor, and the associated dentry descriptor
    335  * must have been previously allocated by the caller.
     321 * a directory entry identified by the <name> argument.  In case of success, it completes
     322 * initialization the inode/dentry couple, identified by the  <child_inode_xp> argument.
     323 * The child inode descriptor, and the associated dentry descriptor must have been
     324 * previously allocated by the caller.
    336325 * - It set the "type", "size", and "extend" fields in the child inode descriptor.
    337326 * - It set the " extend" field in the dentry descriptor.
     
    421410 * TODO : the current implementation check ALL pages in the FAT region, even if most
    422411 * pages are empty, and not copied in mapper. It is sub-optimal.
    423  * - A first solution is to maintain in the FAT context two "dirty_min" and "dirty_max"
    424  *  variables defining the smallest/largest dirty page index in FAT mapper...
     412 * A solution is to maintain in the FAT context two "dirty_min" and "dirty_max"
     413 * variables defining the smallest/largest dirty page index in FAT mapper...
    425414 *****************************************************************************************
    426415 * @ return 0 if success / return -1 if failure during IOC device access.
     
    448437 * in <searched_cluster> the FATFS cluster index of a free cluster.
    449438 * It can be called by a thread running in any cluster, as it uses remote access
    450  * primitives when the FAT mapper is remote. It takes the queuelock stored in the FATFS
     439 * primitives when the FAT mapper is remote. It takes the rwlock stored in the FATFS
    451440 * context located in the same cluster as the FAT mapper itself, to get exclusive
    452441 * access to the FAT. It uses and updates the <free_cluster_hint> and <free_clusters>
     
    457446 * - it returns the allocated cluster index.
    458447 *****************************************************************************************
    459  * @ searched_cluster    : [out] found FATFS cluster index.
     448 * @ searched_cluster_id  : [out] allocated FATFS cluster index.
    460449 * @ return 0 if success / return -1 if no more free clusters on IOC device.
    461450 ****************************************************************************************/
    462 error_t fatfs_cluster_alloc( uint32_t * searched_cluster );
     451error_t fatfs_cluster_alloc( uint32_t * searched_cluster_id );
    463452
    464453/*****************************************************************************************
    465454 * This function implements the generic vfs_fs_release_inode() function for the FATFS.
    466   *****************************************************************************************
     455 *****************************************************************************************
     456 * This function is used to remove a given file or directory from FATFS the file system.
    467457 * It releases all clusters allocated to a file/directory identified by the <inode_xp>
    468458 * argument. All released clusters are marked FREE_CLUSTER in the FAT mapper.
     
    470460 * the clusters in reverse order of the linked list (from last to first).
    471461 * When the FAT mapper has been updated, it calls the fatfs_sync_fat() function to
    472  * synchronously update all dirty pages in the FAT mapper to the IOC device.
     462 * synchronously update all modified pages in the FAT mapper to the IOC device.
    473463 * Finally the FS-INFO sector on the IOC device is updated.
    474464 *****************************************************************************************
     
    485475 * The pointer on the mapper and the page index in file are found in the page descriptor.
    486476 * It is used for both a regular file/directory mapper, and the FAT mapper.
    487  * - For the FAT mapper, it updates the FAT region on IOC device.
    488  * - For a regular file, it access the FAT mapper to get the cluster index on IOC device.
     477 * - For the FAT mapper, it read/write the FAT region on IOC device.
     478 * - For a regular file, it scan the FAT mapper to get the cluster_id on IOC device,
     479 *   and read/write this cluster.
    489480 * It can be called by any thread running in any cluster.
    490481 *
    491482 * WARNING : For the FAT mapper, the inode field in the mapper MUST be NULL, as this
    492  * is used to indicate that the corresponding mapper is the FAT mapper.
     483 *           is used to indicate that the corresponding mapper is the FAT mapper.
     484 *
     485 * TODO : In this first implementation, the entry point in the FAT to get the cluster_id
     486 *        is always the cluster_id of the first page, registered in the inode extension.
     487 *        This can introduce a quadratic cost when trying of acessing all pages of a
     488 *        big file. An optimisation would be to introduce in the inode extension two
     489 *        new fields <other_page_id> & <other_cluster_id>, defining a second entry point
     490 *        in the FAT.
    493491 *****************************************************************************************
    494492 * @ page_xp   : extended pointer on page descriptor.
  • trunk/kernel/fs/vfs.c

    r651 r656  
    235235
    236236#if DEBUG_VFS_INODE_CREATE
    237 char           name[CONFIG_VFS_MAX_NAME_LENGTH];
    238237uint32_t       cycle      = (uint32_t)hal_get_cycles();
    239238thread_t *     this       = CURRENT_THREAD;
    240 vfs_inode_get_name( *inode_xp , name );
    241239if( DEBUG_VFS_INODE_CREATE < cycle )
    242 printk("\n[%s] thread[%x,%x] created <%s> / inode [%x,%x] / cycle %d\n",
    243 __FUNCTION__, this->process->pid, this->trdid, name, local_cxy, inode, cycle );
     240printk("\n[%s] thread[%x,%x] created inode (%x,%x) / cycle %d\n",
     241__FUNCTION__, this->process->pid, this->trdid, local_cxy, inode, cycle );
    244242#endif
    245243 
     
    513511uint32_t   cycle = (uint32_t)hal_get_cycles();
    514512if( DEBUG_VFS_DENTRY_CREATE < cycle )
    515 printk("\n[%s] thread[%x,%x] created <%s> / dentry [%x,%x] / cycle %d\n",
     513printk("\n[%s] thread[%x,%x] created dentry <%s> : (%x,%x) / cycle %d\n",
    516514__FUNCTION__, this->process->pid, this->trdid, name, local_cxy, dentry, cycle );
    517515#endif
     
    777775}  // end vfs_open()
    778776
    779 //////////////////////////////////////
    780 int vfs_user_move( bool_t   to_buffer,
    781                    xptr_t   file_xp,
    782                    void   * buffer,
    783                    uint32_t size )
    784 {
    785     cxy_t              file_cxy;     // remote file descriptor cluster
    786     vfs_file_t       * file_ptr;     // remote file descriptor local pointer
    787     vfs_inode_type_t   inode_type;
    788     uint32_t           file_offset;  // current offset in file
    789     mapper_t         * mapper;
     777///////////////////////////////////////////
     778uint32_t vfs_user_move( bool_t   to_buffer,
     779                        xptr_t   file_xp,
     780                        void   * buffer,
     781                        uint32_t count )
     782{
     783    cxy_t              file_cxy;       // remote file descriptor cluster
     784    vfs_file_t       * file_ptr;       // remote file descriptor local pointer
     785    mapper_t         * mapper;         // local pointer on file mapper
     786    vfs_inode_t      * inode;          // local pointer on file inode
     787    vfs_inode_type_t   type;           // inode type
     788    uint32_t           offset;         // current offset in file
     789    uint32_t           size;           // current file size
     790    uint32_t           nbytes;         // number of bytes actually transfered
    790791    error_t            error;
    791792
     
    797798    file_ptr  = GET_PTR( file_xp );
    798799
    799     // get inode type from remote file descriptor
    800     inode_type = hal_remote_l32( XPTR( file_cxy , &file_ptr->type   ) );
     800    // get various infos from remote file descriptor
     801    type   = hal_remote_l32( XPTR( file_cxy , &file_ptr->type   ) );
     802    offset = hal_remote_l32( XPTR( file_cxy , &file_ptr->offset ) );
     803    mapper = hal_remote_lpt( XPTR( file_cxy , &file_ptr->mapper ) );
     804    inode  = hal_remote_lpt( XPTR( file_cxy , &file_ptr->inode  ) );
     805    size   = hal_remote_l32( XPTR( file_cxy , &inode->size      ) );
    801806   
    802807// check inode type
    803 assert( (inode_type == INODE_TYPE_FILE), "bad inode type" );
    804 
    805     // get mapper pointer and file offset from file descriptor
    806     file_offset = hal_remote_l32( XPTR( file_cxy , &file_ptr->offset ) );
    807     mapper      = hal_remote_lpt( XPTR( file_cxy , &file_ptr->mapper ) );
     808assert( (type == INODE_TYPE_FILE), "bad inode type" );
    808809
    809810#if DEBUG_VFS_USER_MOVE
     
    811812uint32_t      cycle      = (uint32_t)hal_get_cycles();
    812813thread_t    * this       = CURRENT_THREAD;
    813 vfs_inode_t * inode      = hal_remote_lpt( XPTR( file_cxy , &file_ptr->inode ) );
    814814vfs_inode_get_name( XPTR( file_cxy , inode ) , name );
    815815if( cycle > DEBUG_VFS_USER_MOVE )
     
    817817    if( to_buffer )
    818818    printk("\n[%s] thread[%x,%x] enter / %d bytes / map(%s) -> buf(%x) / offset %d / cycle %d\n",
    819     __FUNCTION__ , this->process->pid, this->trdid, size, name, buffer, file_offset, cycle );
     819    __FUNCTION__ , this->process->pid, this->trdid, count, name, buffer, offset, cycle );
    820820    else           
    821821    printk("\n[%s] thread[%x,%x] enter / %d bytes / buf(%x) -> map(%s) / offset %d / cycle %d\n",
    822     __FUNCTION__ , this->process->pid, this->trdid, size, buffer, name, file_offset, cycle );
     822    __FUNCTION__ , this->process->pid, this->trdid, count, buffer, name, offset, cycle );
    823823}
    824824#endif
    825825
    826     // move data between mapper and buffer
    827     error = mapper_move_user( XPTR( file_cxy , mapper ),
    828                               to_buffer,
    829                               file_offset,
    830                               buffer,
    831                               size );
     826    if( to_buffer ) // => compute the number of bytes to move and make the move
     827    {
     828        // compute number of bytes to move
     829        if     ( size <= offset )         nbytes = 0;
     830        else if( size < offset + count )  nbytes = size - offset;
     831        else                              nbytes = count;
     832
     833        // move data from mapper to buffer when required
     834        if( nbytes > 0 )
     835        {     
     836            error = mapper_move_user( XPTR( file_cxy , mapper ),
     837                                      to_buffer,
     838                                      offset,
     839                                      buffer,
     840                                      nbytes );
     841        }
     842        else
     843        {
     844            error = 0;
     845        }
     846    }
     847    else // to mapper => make the move and update the file size if required
     848    {
     849        nbytes = count;
     850
     851        // move data from buffer to mapper
     852        error = mapper_move_user( XPTR( file_cxy , mapper ),
     853                                  to_buffer,
     854                                  offset,
     855                                  buffer,
     856                                  count );
     857
     858        // update file size in inode if required
     859        if( offset + count > size )
     860        {
     861            vfs_inode_update_size( XPTR( file_cxy , inode ) , offset + count );
     862        }
     863    }
     864       
    832865    if( error )
    833866    {
     
    837870
    838871    // update file offset in file descriptor
    839     hal_remote_atomic_add( XPTR( file_cxy , &file_ptr->offset ) , size );
     872    hal_remote_atomic_add( XPTR( file_cxy , &file_ptr->offset ) , nbytes );
    840873
    841874#if DEBUG_VFS_USER_MOVE
     
    844877{
    845878    if( to_buffer )
    846     printk("\n[%s] thread[%x,%x] exit / cycle %d\n",
    847     __FUNCTION__ , this->process->pid, cycle );
     879    printk("\n[%s] thread[%x,%x] exit / %d bytes moved from mapper to buffer\n",
     880    __FUNCTION__ , this->process->pid, nbytes );
    848881    else           
    849     printk("\n[%s] thread[%x,%x] exit / cycle %d\n",
    850     __FUNCTION__ , this->process->pid, cycle );
     882    printk("\n[%s] thread[%x,%x] exit / %d bytes moved from buffer to mapper / size %d\n",
     883    __FUNCTION__ , this->process->pid, nbytes, hal_remote_l32(XPTR(file_cxy,&inode->size)) );
    851884}
    852885#endif
    853886
    854     return size;
     887    return nbytes;
    855888
    856889}  // end vfs_user_move()
     
    24832516#endif
    24842517
     2518#if ( DEBUG_VFS_LOOKUP & 1 )
     2519if( DEBUG_VFS_LOOKUP < cycle )
     2520vfs_display( root_xp );
     2521#endif
     2522
    24852523    // compute lookup flags
    24862524    create = (lookup_mode & VFS_LOOKUP_CREATE) == VFS_LOOKUP_CREATE;
     
    25272565#endif
    25282566
    2529         // search the child dentry matching name in parent inode
     2567        // search the child dentry matching name in parent inode XHTAB
    25302568        found = vfs_get_child( parent_xp,
    25312569                               name,
     
    25932631#if (DEBUG_VFS_LOOKUP & 1)
    25942632if( DEBUG_VFS_LOOKUP < cycle )
    2595 printk("\n[%s] thread[%x,%x] created missing inode for <%s> in cluster %x\n",
     2633printk("\n[%s] thread[%x,%x] created missing inode <%s> in cluster %x\n",
    25962634__FUNCTION__, process->pid, this->trdid, name, child_cxy );
    25972635#endif
     
    27712809{
    27722810    error_t     error;
    2773     uint32_t    cluster;
     2811    uint32_t    cluster_id;
    27742812    uint32_t    child_type;
    27752813    uint32_t    child_size;
     
    27982836    vfs_inode_t  * child_ptr  = GET_PTR( child_xp );
    27992837
    2800     // 1. allocate one free cluster in file system to child inode,
     2838    // 1. allocate one free cluster_id in file system to child inode,
    28012839    // and update the File Allocation Table in both the FAT mapper and IOC device.
    28022840    // It depends on the child inode FS type.
     
    28042842
    28052843    error = vfs_fs_cluster_alloc( ctx->type,
    2806                                   &cluster );
     2844                                  &cluster_id );
    28072845    if ( error )
    28082846    {
    2809         printk("\n[ERROR] in %s : cannot find a free VFS cluster\n",
     2847        printk("\n[ERROR] in %s : cannot find a free VFS cluster_id\n",
    28102848        __FUNCTION__ );
    28112849        return -1;
     
    28142852#if( DEBUG_VFS_NEW_DENTRY_INIT & 1)
    28152853if( DEBUG_VFS_NEW_DENTRY_INIT < cycle )
    2816 printk("\n[%s] thread[%x,%x] allocated FS cluster %x to <%s>\n",
    2817 __FUNCTION__ , this->process->pid, this->trdid, cluster, child_name );
     2854printk("\n[%s] thread[%x,%x] allocated FS cluster_id %x to <%s>\n",
     2855__FUNCTION__ , this->process->pid, this->trdid, cluster_id, child_name );
    28182856#endif
    28192857
    28202858    // 2. update the child inode descriptor size and extend
    28212859    child_type = hal_remote_l32( XPTR( child_cxy , &child_ptr->type ) );
    2822     child_size = (child_type == INODE_TYPE_DIR) ? 4096 : 0;
     2860    child_size = 0;
    28232861   
    28242862    hal_remote_s32( XPTR( child_cxy , &child_ptr->size )   , child_size );
    2825     hal_remote_spt( XPTR( child_cxy , &child_ptr->extend ) , (void*)(intptr_t)cluster );
     2863    hal_remote_spt( XPTR( child_cxy , &child_ptr->extend ) , (void*)(intptr_t)cluster_id );
    28262864
    28272865    // 3. update the parent inode mapper, and
     
    32853323#if(DEBUG_VFS_ADD_CHILD & 1)
    32863324if( DEBUG_VFS_ADD_CHILD < cycle )
    3287 printk("\n[%s] thread[%x,%x] / dentry <%s> created (%x,%x)\n",
     3325printk("\n[%s] thread[%x,%x] created dentry <%s> : (%x,%x)\n",
    32883326__FUNCTION__, this->process->pid, this->trdid, name, parent_cxy, new_dentry_ptr );
    32893327#endif
     
    33323370#if(DEBUG_VFS_ADD_CHILD & 1)
    33333371if( DEBUG_VFS_ADD_CHILD < cycle )
    3334 printk("\n[%s] thread[%x,%x] / inode <%s> created (%x,%x)\n",
     3372printk("\n[%s] thread[%x,%x] created inode <%s> : (%x,%x)\n",
    33353373__FUNCTION__ , this->process->pid, this->trdid, name , child_cxy, new_inode_ptr );
    33363374#endif
     
    33453383#if(DEBUG_VFS_ADD_CHILD & 1)
    33463384if( DEBUG_VFS_ADD_CHILD < cycle )
    3347 printk("\n[%s] thread[%x,%x] link dentry(%x,%x) to child inode(%x,%x)\n",
     3385printk("\n[%s] thread[%x,%x] linked dentry(%x,%x) to child inode(%x,%x)\n",
    33483386__FUNCTION__, this->process->pid, this->trdid,
    33493387parent_cxy, new_dentry_ptr, child_cxy, new_inode_ptr );
     
    33573395#if(DEBUG_VFS_ADD_CHILD & 1)
    33583396if( DEBUG_VFS_ADD_CHILD < cycle )
    3359 printk("\n[%s] thread[%x,%x] link dentry(%x,%x) to parent inode(%x,%x)\n",
     3397printk("\n[%s] thread[%x,%x] linked dentry(%x,%x) to parent inode(%x,%x)\n",
    33603398__FUNCTION__, this->process->pid, this->trdid,
    33613399parent_cxy, new_dentry_ptr, parent_cxy, parent_inode_ptr );
     
    38143852    return error;
    38153853
    3816 } // end vfs_fs_alloc_cluster()
     3854} // end vfs_fs_cluster_alloc()
    38173855
    38183856////////////////////////////////////////////////
  • trunk/kernel/fs/vfs.h

    r635 r656  
    602602 *    and synchronously update the IOC device).
    603603 * 2. It set the "size", and "extend" fields in child inode descriptor.
     604 *    The size is 4096 for a directory, the size is 0 for a file.
    604605 * 3. It updates the parent directory mapper to introduce the new child,
    605606 *    and synchronously update the IOC device.
     
    660661 * account the offset in <file_xp>. The transfer direction is defined by <to_buffer>.
    661662 * It is called by the sys_read() and sys_write() functions.
     663 * - for a read, it checks the actual file size (registered in the inode descriptor),
     664 *   against the (offset + count), and moves only the significant bytes.
     665 * - for a write, it updates the the file size in inode descriptor if required.
     666 * In case of write to the mapper, the "inode.size" field is updated as required.
    662667 ******************************************************************************************
    663668 * @ to_buffer : mapper -> buffer if true / buffer -> mapper if false.
    664669 * @ file_xp   : extended pointer on the remote file descriptor.
    665670 * @ buffer    : user space pointer on buffer (can be physically distributed).
    666  * @ size      : requested number of bytes from offset.
     671 * @ count     : requested number of bytes from offset.
    667672 * @ returns number of bytes actually moved if success / -1 if error.
    668673 *****************************************************************************************/
    669 int vfs_user_move( bool_t   to_buffer,
    670                    xptr_t   file_xp,
    671                    void   * buffer,
    672                    uint32_t size );
     674uint32_t vfs_user_move( bool_t   to_buffer,
     675                        xptr_t   file_xp,
     676                        void   * buffer,
     677                        uint32_t count );
    673678
    674679/******************************************************************************************
     
    745750 *    to the file, and removes these pages from the dirty list, using an RPC if required.
    746751 * 2) It updates the file size in all parent directory mapper(s), and update the modified
    747  *    pages on the block device, using RPCs if required.
     752 *    pages on the block device, using RPCs if required, only if the size is modified.
    748753 * 3) All entries in the fd_array copies are directly reset by the calling thread,
    749754 *    using remote accesses.
     
    895900 * argument to/from the IOC device from/to the mapper, as defined by the <cmd_type>.
    896901 * Depending on the file system type, it calls the proper, FS specific function.
    897  * It is used in case of MISS on the mapper, or when a dirty page in the mapper must
    898  * be updated in the File System.
    899  * The mapper pointer is obtained from the page descriptor.
     902 * It is used in case of MISS on the mapper (read), or when a dirty page in the mapper
     903 * must be updated in the File System (write).
     904 * The mapper pointer, and the page index in file are obtained from the page descriptor.
    900905 * It can be executed by any thread running in any cluster.
    901906 * This function does NOT take any lock.
  • trunk/kernel/kernel_config.h

    r651 r656  
    8484#define DEBUG_FATFS_RELEASE_INODE         0
    8585#define DEBUG_FATFS_REMOVE_DENTRY         0
     86#define DEBUG_FATFS_SCAN_DIRECTORY        0
    8687#define DEBUG_FATFS_SYNC_FAT              0
    8788#define DEBUG_FATFS_SYNC_FSINFO           0
     
    8990#define DEBUG_FATFS_UPDATE_DENTRY         0
    9091#define DEBUG_FATFS_UPDATE_IOC            0
     92
     93#define DEBUG_GRDXT_INSERT                0
    9194
    9295#define DEBUG_HAL_CONTEXT_FORK            0
     
    119122#define DEBUG_MAPPER_GET_PAGE             0
    120123#define DEBUG_MAPPER_HANDLE_MISS          0
     124#define DEBUG_MAPPER_MOVE_KERNEL          0
    121125#define DEBUG_MAPPER_MOVE_USER            0
    122 #define DEBUG_MAPPER_MOVE_KERNEL          0
    123126#define DEBUG_MAPPER_SYNC                 0
    124127
     
    243246#define DEBUG_VFS_FILE_CREATE             0
    244247#define DEBUG_VFS_GET_PATH                0
    245 #define DEBUG_VFS_INODE_CREATE            0
     248#define DEBUG_VFS_INODE_CREATE            0 
    246249#define DEBUG_VFS_INODE_LOAD_ALL          0
    247250#define DEBUG_VFS_KERNEL_MOVE             0
  • trunk/kernel/libk/grdxt.c

    r635 r656  
    315315                else                   
    316316                {
    317                     *found_key = (ix1 << (w2+w3)) | (ix2 << w1) | ix3;
     317                    *found_key = (ix1 << (w2+w3)) | (ix2 << w3) | ix3;
    318318                    return ptr3[ix3];
    319319                }
     
    343343    grdxt_t * rt_ptr = GET_PTR( rt_xp );
    344344
     345#if DEBUG_GRDXT_INSERT
     346uint32_t cycle = (uint32_t)hal_get_cycles();
     347if(DEBUG_GRDXT_INSERT < cycle)
     348printk("\n[%s] enter / rt_xp (%x,%x) / key %x / value %x\n",
     349__FUNCTION__, rt_cxy, rt_ptr, key, (intptr_t)value );
     350#endif
     351
    345352    // get widths
    346353    uint32_t        w1 = hal_remote_l32( XPTR( rt_cxy , &rt_ptr->ix1_width ) );
     
    348355    uint32_t        w3 = hal_remote_l32( XPTR( rt_cxy , &rt_ptr->ix3_width ) );
    349356
     357#if DEBUG_GRDXT_INSERT
     358if(DEBUG_GRDXT_INSERT < cycle)
     359printk("\n[%s] get widths : w1 %d / w2 %d / w3 %d\n",
     360__FUNCTION__, w1, w2, w3 );
     361#endif
     362
    350363// Check key value
    351364assert( ((key >> (w1 + w2 + w3)) == 0 ), "illegal key value %x\n", key );
     
    356369        uint32_t        ix3 = key & ((1 << w3) - 1);         // index in level 3 array
    357370
     371#if DEBUG_GRDXT_INSERT
     372if(DEBUG_GRDXT_INSERT < cycle)
     373printk("\n[%s] compute indexes : ix1 %d / ix2 %d / ix3 %d\n",
     374__FUNCTION__, ix1, ix2, ix3 );
     375#endif
     376
    358377    // get ptr1
    359378    void ** ptr1 = hal_remote_lpt( XPTR( rt_cxy , &rt_ptr->root ) );
     
    361380    if( ptr1 == NULL ) return -1;
    362381
     382#if DEBUG_GRDXT_INSERT
     383if(DEBUG_GRDXT_INSERT < cycle)
     384printk("\n[%s] compute ptr1 = %x\n",
     385__FUNCTION__, (intptr_t)ptr1 );
     386#endif
     387
    363388    // get ptr2
    364389    void ** ptr2 = hal_remote_lpt( XPTR( rt_cxy , &ptr1[ix1] ) );
     390
     391#if DEBUG_GRDXT_INSERT
     392if(DEBUG_GRDXT_INSERT < cycle)
     393printk("\n[%s] get current ptr2 = %x\n",
     394__FUNCTION__, (intptr_t)ptr2 );
     395#endif
    365396
    366397    // allocate memory for the missing level_2 array if required
     
    374405
    375406        if( ptr2 == NULL ) return -1;
    376 
     407       
    377408        // update level_1 entry
    378409        hal_remote_spt( XPTR( rt_cxy , &ptr1[ix1] ) , ptr2 );
     410
     411#if DEBUG_GRDXT_INSERT
     412if(DEBUG_GRDXT_INSERT < cycle)
     413printk("\n[%s] update ptr1[%d] : &ptr1[%d] = %x / ptr2 = %x\n",
     414__FUNCTION__, ix1, ix1, &ptr1[ix1], ptr2 );
     415#endif
     416
    379417    }
    380418
    381419    // get ptr3
    382420    void ** ptr3 = hal_remote_lpt( XPTR( rt_cxy , &ptr2[ix2] ) );
     421
     422#if DEBUG_GRDXT_INSERT
     423if(DEBUG_GRDXT_INSERT < cycle)
     424printk("\n[%s] get current ptr3 = %x\n",
     425__FUNCTION__, (intptr_t)ptr3 );
     426#endif
    383427
    384428    // allocate memory for the missing level_3 array if required
     
    395439        // update level_2 entry
    396440        hal_remote_spt( XPTR( rt_cxy , &ptr2[ix2] ) , ptr3 );
     441
     442#if DEBUG_GRDXT_INSERT
     443if(DEBUG_GRDXT_INSERT < cycle)
     444printk("\n[%s] update  ptr2[%d] : &ptr2[%d] %x / ptr3 %x\n",
     445__FUNCTION__, ix2, ix2, &ptr2[ix2], ptr3 );
     446#endif
     447
    397448    }
    398449
    399450    // register value in level_3 array
    400451    hal_remote_spt( XPTR( rt_cxy , &ptr3[ix3] ) , value );
     452
     453#if DEBUG_GRDXT_INSERT
     454if(DEBUG_GRDXT_INSERT < cycle)
     455printk("\n[%s] update  ptr3[%d] : &ptr3[%d] %x / value %x\n",
     456__FUNCTION__, ix3, ix3, &ptr3[ix3], value );
     457#endif
    401458
    402459    hal_fence();
     
    498555        uint32_t       ix3;
    499556
     557    void        ** ptr1;
     558    void        ** ptr2;
     559    void        ** ptr3;
     560
    500561// check rt_xp
    501562assert( (rt_xp != XPTR_NULL) , "pointer on radix tree is NULL\n" );
     
    510571    uint32_t       w3 = hal_remote_l32( XPTR( rt_cxy , &rt_ptr->ix3_width ) );
    511572
    512     void ** ptr1 = hal_remote_lpt( XPTR( rt_cxy , &rt_ptr->root ) );
     573    ptr1 = hal_remote_lpt( XPTR( rt_cxy , &rt_ptr->root ) );
    513574
    514575        printk("\n***** Generic Radix Tree for <%s>\n", name );
     
    516577        for( ix1=0 ; ix1 < (uint32_t)(1<<w1) ; ix1++ )
    517578        {
    518             void ** ptr2 = hal_remote_lpt( XPTR( rt_cxy , &ptr1[ix1] ) );
     579            ptr2 = hal_remote_lpt( XPTR( rt_cxy , &ptr1[ix1] ) );
    519580        if( ptr2 == NULL )  continue;
    520581   
    521582        for( ix2=0 ; ix2 < (uint32_t)(1<<w2) ; ix2++ )
    522583        {
    523                 void ** ptr3 = hal_remote_lpt( XPTR( rt_cxy , &ptr2[ix2] ) );
     584                ptr3 = hal_remote_lpt( XPTR( rt_cxy , &ptr2[ix2] ) );
    524585            if( ptr3 == NULL ) continue;
    525586
     
    530591
    531592                uint32_t key = (ix1<<(w2+w3)) + (ix2<<w3) + ix3;
    532                 printk(" - key = %x / value = %x\n", key , (intptr_t)value );
     593                printk(" - key = %x / value = %x / ptr1 = %x / ptr2 = %x / ptr3 = %x\n",
     594                key, (intptr_t)value, (intptr_t)ptr1, (intptr_t)ptr2, (intptr_t)ptr3 );
    533595            }
    534596        }
  • trunk/kernel/libk/grdxt.h

    r635 r656  
    6161/*******************************************************************************************
    6262 * This function initialises the radix-tree descriptor,
     63 * and allocates memory for the first level array of pointers.
    6364 * It must be called by a local thread.
    64  * and allocates memory for the first level array of pointers.
    6565 *******************************************************************************************
    6666 * @ rt        : pointer on the radix-tree descriptor.
     
    7777/*******************************************************************************************
    7878 * This function releases all memory allocated to the radix-tree infrastructure.
     79 * A warning message is printed on the kernel TXT0 if the radix tree is not empty.
    7980 * It must be called by a local thread.
    80  * A warning message is printed on the kernel TXT0 if the radix tree is not empty.
    8181 *******************************************************************************************
    8282 * @ rt      : pointer on the radix-tree descriptor.
     
    8686/*******************************************************************************************
    8787 * This function insert a new item in the radix-tree.
     88 * It dynamically allocates memory for new second and third level arrays if required.
    8889 * It must be called by a local thread.
    89  * It dynamically allocates memory for new second and third level arrays if required.
    9090 *******************************************************************************************
    9191 * @ rt      : pointer on the radix-tree descriptor.
     
    100100/*******************************************************************************************
    101101 * This function removes an item identified by its key from the radix tree,
     102 * and returns a pointer on the removed item. No memory is released.
    102103 * It must be called by a local thread.
    103  * and returns a pointer on the removed item. No memory is released.
    104104 *******************************************************************************************
    105105 * @ rt      : pointer on the radix-tree descriptor.
     
    124124/*******************************************************************************************
    125125 * This function scan all radix-tree entries in increasing key order, starting from
     126 * the value defined by the <start_key> argument, and return a pointer on the first valid
     127 * registered item, and the found item key value.
    126128 * It must be called by a local thread.
    127  * the value defined by the <key> argument, and return a pointer on the first valid
    128  * registered item, and the found item key value.
    129129 *******************************************************************************************
    130130 * @ rt         : pointer on the radix-tree descriptor.
    131131 * @ start_key  : key starting value for the scan.
    132132 * @ found_key  : [out] buffer for found key value.
    133  * @ return pointer on first valid item if found / return NULL if not found.
     133 * @ return pointer on first valid item if found / return NULL if no item found.
    134134 ******************************************************************************************/
    135135void * grdxt_get_first( grdxt_t  * rt,
  • trunk/kernel/libk/list.h

    r651 r656  
    11/*
    2  * list.h - Double circular linked list
     2 * list.h - Local double circular linked list, using local pointers.
    33 *
    44 * Authors Ghassan Almaless  (2008,2009,2010,2011,2012)
     
    9191
    9292/***************************************************************************
    93  * This macro returns t pointer on the first element of a list.
     93 * This macro returns a pointer on the first element of a list.
    9494 ***************************************************************************
    9595 * @ root     : pointer on the list root
     
    171171                                   list_entry_t * entry )
    172172{
    173     list_entry_t * next = root->next; 
    174 
    175         entry->next = next;
     173    list_entry_t * first = root->next; 
     174
     175        entry->next = first;
    176176        entry->pred = root;
    177177 
    178         root->next = entry;
    179         next->pred = entry;
     178        root->next  = entry;
     179        first->pred = entry;
    180180}
    181181
     
    190190                                  list_entry_t * entry )
    191191{
    192     list_entry_t * pred = root->pred;
     192    list_entry_t * last = root->pred;
    193193
    194194        entry->next = root;
    195         entry->pred = pred;
     195        entry->pred = last;
    196196 
    197197        root->pred = entry;
    198         pred->next = entry;
     198        last->next = entry;
    199199}
    200200
     
    366366                                          list_entry_t * entry )
    367367{
    368     list_entry_t * next = hal_remote_lpt( XPTR( cxy , &root->next ) );
     368    list_entry_t * first = hal_remote_lpt( XPTR( cxy , &root->next ) );
    369369       
    370         hal_remote_spt( XPTR( cxy , &entry->next ) , next );
     370        hal_remote_spt( XPTR( cxy , &entry->next ) , first );
    371371        hal_remote_spt( XPTR( cxy , &entry->pred ) , root );
    372372 
    373         hal_remote_spt( XPTR( cxy , &root->next ) , entry );
    374         hal_remote_spt( XPTR( cxy , &next->pred ) , entry );
     373        hal_remote_spt( XPTR( cxy , &root->next )  , entry );
     374        hal_remote_spt( XPTR( cxy , &first->pred ) , entry );
    375375}
    376376
     
    387387                                         list_entry_t * entry )
    388388{
    389     list_entry_t * pred = hal_remote_lpt( XPTR( cxy , &root->pred ) );
     389    list_entry_t * last = hal_remote_lpt( XPTR( cxy , &root->pred ) );
    390390       
    391391        hal_remote_spt( XPTR( cxy , &entry->next ) , root );
    392         hal_remote_spt( XPTR( cxy , &entry->pred ) , pred );
     392        hal_remote_spt( XPTR( cxy , &entry->pred ) , last );
    393393 
    394394        hal_remote_spt( XPTR( cxy , &root->pred ) , entry );
    395         hal_remote_spt( XPTR( cxy , &pred->next ) , entry );
     395        hal_remote_spt( XPTR( cxy , &last->next ) , entry );
    396396}
    397397
     
    401401 ***************************************************************************
    402402 * @ cxy     : remote list cluster identifier
    403  * @ entry   : pointer on the entry to be removed.
     403 * @ entry   : local pointer on the remote entry to be removed.
    404404 **************************************************************************/
    405405static inline void list_remote_unlink( cxy_t          cxy,
  • trunk/kernel/libk/xlist.h

    r636 r656  
    11/*
    2  * xlist.h - Double Circular Linked lists, using extended pointers.
     2 * xlist.h - Trans-cluster double circular linked list, using extended pointers.
    33 *
    44 * Author : Alain Greiner (2016,2017,2018,2019)
  • trunk/kernel/mm/kcm.c

    r635 r656  
    509509        {
    510510        // get one 4 Kbytes page from remote PPM
    511         page_t * page = ppm_remote_alloc_pages( kcm_cxy , 0 );
    512 
    513             if( page == NULL )
     511        xptr_t page_xp = ppm_remote_alloc_pages( kcm_cxy , 0 );
     512
     513            if( page_xp == XPTR_NULL )
    514514            {
    515515                    printk("\n[ERROR] in %s : failed to allocate page in cluster %x\n",
     
    519519        }
    520520
    521             // get remote page base address
    522             xptr_t base_xp = ppm_page2base( XPTR( kcm_cxy , page ) );
     521            // get extended pointer on allocated buffer
     522            xptr_t base_xp = ppm_page2base( page_xp );
    523523
    524524        // get local pointer on kcm_page
     
    529529            hal_remote_s64( XPTR( kcm_cxy , &kcm_page->status ) , 0 );
    530530            hal_remote_spt( XPTR( kcm_cxy , &kcm_page->kcm )    , kcm_ptr );
    531             hal_remote_spt( XPTR( kcm_cxy , &kcm_page->page )   , page );
     531            hal_remote_spt( XPTR( kcm_cxy , &kcm_page->page )   , GET_PTR( page_xp ) );
    532532
    533533            // introduce new page in remote KCM active_list
  • trunk/kernel/mm/kmem.c

    r635 r656  
    4545        flags = req->flags;
    4646
    47     ////////////////////////////////// PPM
     47    //////////////////////
    4848        if( type == KMEM_PPM )
    4949        {
     
    7676        return ptr;
    7777        }
    78     ///////////////////////////////////// KCM
     78    ///////////////////////////
    7979        else if( type == KMEM_KCM )
    8080        {
     
    102102        return ptr;
    103103        }
    104     //////////////////////////////////// KHM
     104    ///////////////////////////
    105105        else if( type == KMEM_KHM )
    106106        {
     
    140140    uint32_t type = req->type;
    141141
     142    //////////////////////
    142143        if( type == KMEM_PPM )
    143144        {
     
    146147        ppm_free_pages( page );
    147148    }
     149    ///////////////////////////
    148150    else if( type == KMEM_KCM )
    149151    {
    150152        kcm_free( req->ptr );
    151153        }
     154    ///////////////////////////
    152155    else if( type == KMEM_KHM )
    153156    {
     
    172175        flags = req->flags;
    173176
    174         ///////////////////////////////// PPM
    175         if( type == KMEM_PPM )
    176         {
    177                 // allocate the number of requested pages
    178                 page_t * page_ptr = ppm_remote_alloc_pages( cxy , order );
    179 
    180                 if( page_ptr == NULL )
     177        //////////////////////
     178        if( type == KMEM_PPM )
     179        {
     180                // allocate the number of requested pages from remote cluster
     181                xptr_t page_xp = ppm_remote_alloc_pages( cxy , order );
     182
     183                if( page_xp == XPTR_NULL )
    181184                {
    182185                        printk("\n[ERROR] in %s : failed for PPM / order %d in cluster %x\n",
     
    185188                }
    186189
    187         xptr_t page_xp = XPTR( cxy , page_ptr );
    188 
    189         // get pointer on buffer from the page descriptor
     190        // get extended pointer on remote buffer
    190191        xptr_t base_xp = ppm_page2base( page_xp );
    191192
     
    193194                if( flags & AF_ZERO ) hal_remote_memset( base_xp , 0 , CONFIG_PPM_PAGE_SIZE );
    194195
    195         void * ptr = GET_PTR( base_xp );
    196196
    197197#if DEBUG_KMEM_REMOTE
     
    201201printk("\n[%s] thread[%x,%x] from PPM / %d page(s) / ppn %x / cxy %x / cycle %d\n",
    202202__FUNCTION__, this->process->pid, this->trdid,
    203 1<<order, ppm_page2ppn(XPTR(local_cxy,ptr)), cxy, cycle );
    204 #endif
    205         return ptr;
    206         }
    207     /////////////////////////////////// KCM
     2031<<order, ppm_page2ppn( page_xp ), cxy, cycle );
     204#endif
     205        return GET_PTR( base_xp );
     206        }
     207    ///////////////////////////
    208208        else if( type == KMEM_KCM )
    209209        {
     
    231231        return ptr;
    232232        }
    233         /////////////////////////////////// KHM
     233        ///////////////////////////
    234234        else if( type == KMEM_KHM )               
    235235        {
     
    250250    uint32_t type = req->type;
    251251
     252    //////////////////////
    252253        if( type == KMEM_PPM )
    253254        {
     
    256257        ppm_remote_free_pages( cxy , page );
    257258    }
     259    ///////////////////////////
    258260    else if( type == KMEM_KCM )
    259261    {
    260262        kcm_remote_free( cxy , req->ptr );
    261263        }
     264    ///////////////////////////
    262265    else if( type == KMEM_KHM )
    263266    {
  • trunk/kernel/mm/kmem.h

    r635 r656  
    2929
    3030/*************************************************************************************
    31  * This enum defines the three Kernel Memory Allocaror types:
     31 * This enum defines the three Kernel Memory Allocaror types
    3232 ************************************************************************************/
    3333
     
    7171 * - KHM (Kernel Heap Manager) allocates physical memory buffers of M bytes,
    7272 *       M can have any value, and req.order = M.
     73 *
     74 * WARNING: the physical memory allocated with a given allocator type must be
     75 *          released using the same allocator type.
    7376 *************************************************************************************
    7477 * @ cxy   : target cluster identifier for a remote access.
  • trunk/kernel/mm/mapper.c

    r651 r656  
    2727#include <hal_special.h>
    2828#include <hal_uspace.h>
     29#include <hal_vmm.h>
    2930#include <grdxt.h>
    3031#include <string.h>
     
    141142    error_t    error;
    142143
     144    uint32_t   inode_size;   
     145    uint32_t   inode_type;
     146
    143147    thread_t * this = CURRENT_THREAD;
    144148
    145149    // get target mapper cluster and local pointer
    146     cxy_t      mapper_cxy = GET_CXY( mapper_xp );
    147     mapper_t * mapper_ptr = GET_PTR( mapper_xp );
     150    cxy_t         mapper_cxy = GET_CXY( mapper_xp );
     151    mapper_t    * mapper_ptr = GET_PTR( mapper_xp );
     152
     153    // get inode pointer
     154    vfs_inode_t * inode = hal_remote_lpt( XPTR( mapper_cxy , &mapper_ptr->inode ) );
     155
     156    // get inode size and type if relevant
     157    if( inode != NULL )
     158    {
     159        inode_size = hal_remote_l32( XPTR( mapper_cxy , &inode->size ) );
     160        inode_type = hal_remote_l32( XPTR( mapper_cxy , &inode->type ) );
     161    }
     162    else
     163    {
     164        inode_size = 0;
     165        inode_type = 0;
     166    }
    148167
    149168#if DEBUG_MAPPER_HANDLE_MISS
    150169uint32_t      cycle = (uint32_t)hal_get_cycles();
    151170char          name[CONFIG_VFS_MAX_NAME_LENGTH];
    152 vfs_inode_t * inode = mapper->inode;
    153171if( (DEBUG_MAPPER_HANDLE_MISS < cycle) && (inode != NULL) )
    154172{
    155     vfs_inode_get_name( XPTR( local_cxy , inode ) , name );
    156     printk("\n[%s] thread[%x,%x] enter for page %d in <%s> / cluster %x / cycle %d",
     173    vfs_inode_get_name( XPTR( mapper_cxy , inode ) , name );
     174    printk("\n[%s] thread[%x,%x] enter for page %d in <%s> / cxy %x / cycle %d\n",
    157175    __FUNCTION__, this->process->pid, this->trdid, page_id, name, mapper_cxy, cycle );
    158     if( DEBUG_MAPPER_HANDLE_MISS & 1 ) grdxt_display( XPTR(local_cxy,&mapper->rt), name );
    159176}
    160177if( (DEBUG_MAPPER_HANDLE_MISS < cycle) && (inode == NULL) )
    161178{
    162     printk("\n[%s] thread[%x,%x] enter for page %d in FAT / cluster %x / cycle %d",
     179    printk("\n[%s] thread[%x,%x] enter for page %d in FAT / cxy %x / cycle %d\n",
    163180    __FUNCTION__, this->process->pid, this->trdid, page_id, mapper_cxy, cycle );
    164     if( DEBUG_MAPPER_HANDLE_MISS & 1 ) grdxt_display( XPTR(local_cxy,&mapper->rt), "FAT" );
     181}
     182#endif
     183
     184#if( DEBUG_MAPPER_HANDLE_MISS & 2 )
     185if( DEBUG_MAPPER_HANDLE_MISS < cycle )
     186{
     187    if (inode != NULL) grdxt_remote_display( XPTR( mapper_cxy , &mapper_ptr->rt ) , name );
     188    else               grdxt_remote_display( XPTR( mapper_cxy , &mapper_ptr->rt ) , "FAT" );
    165189}
    166190#endif
    167191
    168192    // allocate one 4 Kbytes page from the remote mapper cluster
    169     page_t * page_ptr = ppm_remote_alloc_pages( mapper_cxy , 0 );
     193    xptr_t page_xp = ppm_remote_alloc_pages( mapper_cxy , 0 );
     194    page_t * page_ptr = GET_PTR( page_xp );
    170195                           
    171     if( page_ptr == NULL )
     196    if( page_xp == XPTR_NULL )
    172197    {
    173198        printk("\n[ERROR] in %s : thread [%x,%x] cannot allocate page in cluster %x\n",
     
    176201    }
    177202
    178     // build extended pointer on new page descriptor
    179     xptr_t page_xp = XPTR( mapper_cxy , page_ptr );
    180 
    181203    // initialize the page descriptor
    182204    page_remote_init( page_xp );
    183205
     206    // initialize specific page descriptor fields
    184207    hal_remote_s32( XPTR( mapper_cxy , &page_ptr->refcount ) , 1          );
    185208    hal_remote_s32( XPTR( mapper_cxy , &page_ptr->index )    , page_id    );
     
    200223    }
    201224
    202     // launch I/O operation to load page from IOC device to mapper
    203     error = vfs_fs_move_page( page_xp , IOC_SYNC_READ );
    204 
    205     if( error )
    206     {
    207         printk("\n[ERROR] in %s : thread[%x,%x] cannot load page from device\n",
    208         __FUNCTION__ , this->process->pid, this->trdid );
    209         mapper_remote_release_page( mapper_xp , page_ptr );
    210         return -1;
     225    // launch I/O operation to load page from IOC device when required:
     226    // - it is the FAT mapper
     227    // - it is a directory mapper
     228    // - it is a file mapper, and it exist data on IOC device for this page
     229    if( (inode == NULL) || (inode_type == INODE_TYPE_DIR) || (inode_size > (page_id << 10) ) )
     230    {
     231        error = vfs_fs_move_page( page_xp , IOC_SYNC_READ );
     232
     233        if( error )
     234        {
     235            printk("\n[ERROR] in %s : thread[%x,%x] cannot load page from device\n",
     236            __FUNCTION__ , this->process->pid, this->trdid );
     237            mapper_remote_release_page( mapper_xp , page_ptr );
     238            return -1;
     239         }
    211240    }
    212241
     
    215244
    216245#if DEBUG_MAPPER_HANDLE_MISS
    217 cycle = (uint32_t)hal_get_cycles();
     246ppn_t ppn = ppm_page2ppn( page_xp );
    218247if( (DEBUG_MAPPER_HANDLE_MISS < cycle) && (inode != NULL) )
    219248{
    220     printk("\n[%s] thread[%x,%x] exit for page %d in <%s> / ppn %x / cycle %d",
    221     __FUNCTION__, this->process->pid, this->trdid,
    222     page_id, name, ppm_page2ppn( page_xp ), cycle );
    223     if( DEBUG_MAPPER_HANDLE_MISS & 1 ) grdxt_display( XPTR(local_cxy,&mapper->rt) , name );
     249    printk("\n[%s] thread[%x,%x] exit for page %d in <%s> / page %x / ppn %x\n",
     250    __FUNCTION__, this->process->pid, this->trdid, page_id, name, page_ptr, ppn );
    224251}
    225252if( (DEBUG_MAPPER_HANDLE_MISS < cycle) && (inode == NULL) )
    226253{
    227     printk("\n[%s] thread[%x,%x] exit for page %d in FAT / ppn %x / cycle %d",
    228     __FUNCTION__, this->process->pid, this->trdid,
    229     page_id, ppm_page2ppn( page_xp ), cycle );
    230     if( DEBUG_MAPPER_HANDLE_MISS & 1 ) grdxt_display( XPTR(local_cxy,&mapper->rt ), "FAT" );
     254    printk("\n[%s] thread[%x,%x] exit for page %d in FAT / page %x / ppn %x\n",
     255    __FUNCTION__, this->process->pid, this->trdid, page_id, page_ptr, ppn );
     256}
     257#endif
     258
     259#if( DEBUG_MAPPER_HANDLE_MISS & 2 )
     260if( DEBUG_MAPPER_HANDLE_MISS < cycle )
     261{
     262    if (inode != NULL) grdxt_remote_display( XPTR( mapper_cxy , &mapper_ptr->rt ) , name );
     263    else               grdxt_remote_display( XPTR( mapper_cxy , &mapper_ptr->rt ) , "FAT" );
    231264}
    232265#endif
     
    241274{
    242275    error_t       error;
    243     mapper_t    * mapper_ptr;
    244     cxy_t         mapper_cxy;
    245     xptr_t        lock_xp;        // extended pointer on mapper lock
    246     xptr_t        page_xp;        // extended pointer on searched page descriptor
    247     xptr_t        rt_xp;          // extended pointer on radix tree in mapper
    248276
    249277    thread_t * this = CURRENT_THREAD;
    250278
    251279    // get mapper cluster and local pointer
    252     mapper_ptr = GET_PTR( mapper_xp );
    253     mapper_cxy = GET_CXY( mapper_xp );
     280    mapper_t * mapper_ptr = GET_PTR( mapper_xp );
     281    cxy_t      mapper_cxy = GET_CXY( mapper_xp );
    254282
    255283#if DEBUG_MAPPER_GET_PAGE
     
    270298#endif
    271299
     300#if( DEBUG_MAPPER_GET_PAGE & 2 )
     301if( DEBUG_MAPPER_GET_PAGE < cycle )
     302ppm_remote_display( local_cxy );
     303#endif
     304
    272305    // check thread can yield
    273306    thread_assert_can_yield( this , __FUNCTION__ );
    274307
    275308    // build extended pointer on mapper lock and mapper rt
    276     lock_xp  = XPTR( mapper_cxy , &mapper_ptr->lock );
    277     rt_xp    = XPTR( mapper_cxy , &mapper_ptr->rt );
     309    xptr_t lock_xp  = XPTR( mapper_cxy , &mapper_ptr->lock );
     310    xptr_t rt_xp    = XPTR( mapper_cxy , &mapper_ptr->rt );
    278311
    279312    // take mapper lock in READ_MODE
     
    281314
    282315    // search page in radix tree
    283     page_xp  = grdxt_remote_lookup( rt_xp , page_id );
     316    xptr_t page_xp  = grdxt_remote_lookup( rt_xp , page_id );
    284317
    285318    // test mapper miss
     
    310343
    311344#if (DEBUG_MAPPER_GET_PAGE & 1)
    312 if( DEBUG_MAPPER_GET_PAGE < cycle )
    313 printk("\n[%s] thread[%x,%x] load missing page from FS : ppn %x\n",
    314 __FUNCTION__, this->process->pid, this->trdid, ppm_page2ppn(page_xp) );
     345if( (DEBUG_MAPPER_GET_PAGE < cycle) && (inode != NULL) )
     346{
     347    printk("\n[%s] thread[%x,%x] introduced missing page in <%s> mapper / ppn %x\n",
     348    __FUNCTION__, this->process->pid, this->trdid, name, ppm_page2ppn(page_xp) );
     349}
     350if( (DEBUG_MAPPER_GET_PAGE < cycle) && (inode == NULL) )
     351{
     352    printk("\n[%s] thread[%x,%x] introduced missing page in FAT mapper / ppn %x\n",
     353    __FUNCTION__, this->process->pid, this->trdid, ppm_page2ppn(page_xp) );
     354}
    315355#endif
    316356       
     
    328368if( (DEBUG_MAPPER_GET_PAGE < cycle) && (inode != NULL) )
    329369{
    330     printk("\n[%s] thread[%x,%x] exit for page %d of <%s> mapper / ppn %x / cycle %d\n",
    331     __FUNCTION__, this->process->pid, this->trdid, page_id,
    332     name, ppm_page2ppn(page_xp), cycle );
     370    printk("\n[%s] thread[%x,%x] exit for page %d of <%s> mapper / ppn %x\n",
     371    __FUNCTION__, this->process->pid, this->trdid, page_id, name, ppm_page2ppn(page_xp) );
    333372}
    334373if( (DEBUG_MAPPER_GET_PAGE < cycle) && (inode == NULL) )
    335374{
    336     printk("\n[%s] thread[%x,%x] exit for page %d of FAT mapper  / ppn %x / cycle %d\n",
    337     __FUNCTION__, this->process->pid, this->trdid, page_id,
    338     ppm_page2ppn(page_xp), cycle );
    339 }
     375    printk("\n[%s] thread[%x,%x] exit for page %d of FAT mapper  / ppn %x\n",
     376    __FUNCTION__, this->process->pid, this->trdid, page_id, ppm_page2ppn(page_xp) );
     377}
     378#endif
     379
     380#if( DEBUG_MAPPER_GET_PAGE & 2)
     381if( DEBUG_MAPPER_GET_PAGE < cycle )
     382ppm_remote_display( local_cxy );
    340383#endif
    341384
     
    476519__FUNCTION__, this->process->pid, this->trdid, page_bytes,
    477520local_cxy, buf_ptr, name, GET_CXY(map_xp), GET_PTR(map_xp) );
    478 mapper_display_page(  mapper_xp , page_id, 128 );
     521mapper_display_page(  mapper_xp , page_xp , 128 );
    479522#endif
    480523
     
    600643{
    601644    if( to_buffer )
    602     printk("\n[%s] mapper <%s> page %d => buffer(%x,%x) / %d bytes\n",
     645    printk("\n[%s] mapper <%s> page %d => buffer (%x,%x) / %d bytes\n",
    603646    __FUNCTION__, name, page_id, dst_cxy, dst_ptr, page_bytes );
    604647    else
    605     printk("\n[%s] buffer(%x,%x) => mapper <%s> page %d / %d bytes\n",
     648    printk("\n[%s] buffer (%x,%x) => mapper <%s> page %d / %d bytes\n",
    606649    __FUNCTION__, src_cxy, src_ptr, name, page_id, page_bytes );
    607650}
     
    617660cycle  = (uint32_t)hal_get_cycles();
    618661if( DEBUG_MAPPER_MOVE_KERNEL < cycle )
    619 printk("\n[%s] thread[%x,%x] exit / cycle %d\n",
    620 __FUNCTION__, this->process->pid, this->trdid, cycle );
     662printk("\n[%s] thread[%x,%x] exit / mapper <%s> / buffer (%x,%x) / cycle %d\n",
     663__FUNCTION__, this->process->pid, this->trdid, name, buffer_cxy, buffer_ptr, cycle );
    621664#endif
    622665
     
    707750        if( page == NULL ) break;
    708751
    709 assert( (page->index == found_key ), "wrong page descriptor index" );
    710 assert( (page->order == 0),          "mapper page order must be 0" );
     752assert( (page->index == found_key ), "page_index (%d) != key (%d)", page->index, found_key );
     753assert( (page->order == 0), "page_order (%d] != 0", page->order );
    711754
    712755        // build extended pointer on page descriptor
     
    753796}  // end mapper_sync()
    754797
    755 //////////////////////////////////////////////////
    756 error_t mapper_display_page( xptr_t     mapper_xp,
    757                              uint32_t   page_id,
    758                              uint32_t   nbytes )
    759 {
    760     xptr_t        page_xp;        // extended pointer on page descriptor
    761     xptr_t        base_xp;        // extended pointer on page base
     798///////////////////////////////////////////////
     799void mapper_display_page( xptr_t     mapper_xp,
     800                          xptr_t     page_xp,
     801                          uint32_t   nbytes )
     802{
    762803    char          buffer[4096];   // local buffer
    763     uint32_t    * tabi;           // pointer on uint32_t to scan buffer
    764804    uint32_t      line;           // line index
    765805    uint32_t      word;           // word index
    766     cxy_t         mapper_cxy;     // mapper cluster identifier
    767     mapper_t    * mapper_ptr;     // mapper local pointer
    768     vfs_inode_t * inode_ptr;      // inode local pointer
    769806 
    770807    char       name[CONFIG_VFS_MAX_NAME_LENGTH];
    771808
    772     if( nbytes > 4096)
    773     {
    774         printk("\n[ERROR] in %s : nbytes (%d) cannot be larger than 4096\n",
    775         __FUNCTION__, nbytes );
    776         return -1;
    777     }
    778    
    779     // get extended pointer on page descriptor
    780     page_xp = mapper_remote_get_page( mapper_xp , page_id );
    781 
    782     if( page_xp == XPTR_NULL)
    783     {
    784         printk("\n[ERROR] in %s : cannot access page %d in mapper\n",
    785         __FUNCTION__, page_id );
    786         return -1;
    787     }
    788 
    789     // get cluster and local pointer
    790     mapper_cxy = GET_CXY( mapper_xp );
    791     mapper_ptr = GET_PTR( mapper_xp );
     809assert( (nbytes <= 4096)         , "nbytes cannot be larger than 4096");
     810assert( (mapper_xp != XPTR_NULL) , "mapper_xp argument cannot be null");
     811assert( (page_xp   != XPTR_NULL) , "page_xp argument cannot be null");
     812
     813    // get mapper cluster and local pointer
     814    cxy_t      mapper_cxy = GET_CXY( mapper_xp );
     815    mapper_t * mapper_ptr = GET_PTR( mapper_xp );
     816
     817    // get page cluster an local pointer
     818    cxy_t    page_cxy = GET_CXY( page_xp );
     819    page_t * page_ptr = GET_PTR( page_xp );
     820
     821    // get page_id and mapper from page descriptor
     822    uint32_t   page_id = hal_remote_l32( XPTR( page_cxy , &page_ptr->index ) );
     823    mapper_t * mapper  = hal_remote_lpt( XPTR( page_cxy , &page_ptr->mapper ) );
     824
     825assert( (mapper_cxy == page_cxy ) , "mapper and page must be in same cluster");
     826assert( (mapper_ptr == mapper   ) , "unconsistent mapper_xp & page_xp arguments");
    792827
    793828    // get inode
    794     inode_ptr = hal_remote_lpt( XPTR( mapper_cxy , &mapper_ptr->inode ) );
     829    vfs_inode_t * inode_ptr = hal_remote_lpt( XPTR( mapper_cxy , &mapper_ptr->inode ) );
    795830
    796831    // get inode name
    797     if( inode_ptr == NULL ) strcpy( name , "fat" );
     832    if( inode_ptr == NULL ) strcpy( name , "FAT" );
    798833    else  vfs_inode_get_name( XPTR( mapper_cxy , inode_ptr ) , name );
    799834   
    800835    // get extended pointer on page base
    801     base_xp = ppm_page2base( page_xp );
     836    xptr_t base_xp = ppm_page2base( page_xp );
    802837   
    803838    // copy remote page to local buffer
    804839    hal_remote_memcpy( XPTR( local_cxy , buffer ) , base_xp , nbytes );
    805840
     841    // display header
     842    uint32_t * tabi = (uint32_t *)buffer;
     843    printk("\n***** mapper <%s> / page_id %d / cxy %x / mapper %x / buffer %x\n",
     844    name, page_id, mapper_cxy, mapper_ptr, GET_PTR( base_xp ) );
     845
    806846    // display 8 words per line
    807     tabi = (uint32_t *)buffer;
    808     printk("\n***** mapper <%s> / %d bytes in page %d (%x,%x)\n",
    809     name, nbytes, page_id, GET_CXY(base_xp), GET_PTR(base_xp) );
    810847    for( line = 0 ; line < (nbytes >> 5) ; line++ )
    811848    {
     
    815852    }
    816853
    817     return 0;
    818 
    819 }  // end mapper_display_page
    820 
    821 
     854}  // end mapper_display_page()
     855
     856
  • trunk/kernel/mm/mapper.h

    r635 r656  
    6262 *   and the  allocated memory is only released when the mapper/inode is destroyed.
    6363 *
    64  * TODO (1) the mapper being only used to implement the VFS cache(s), the mapper.c
    65  *          and mapper.h file should be trandfered to the fs directory.
    66  * TODO (2) the "type" field in mapper descriptor is redundant and probably unused.
     64 * TODO the "type" field in mapper descriptor is redundant and probably unused.
    6765 ******************************************************************************************/
    6866
     
    161159
    162160/********************************************************************************************
    163  * This function move data between a remote mapper, identified by the <mapper_xp> argument,
    164  * and a localised remote kernel buffer. It can be called by a thread running any cluster.
     161 * This function move <size> bytes from/to a remote mapper, identified by the <mapper_xp>
     162 * argument, to/from a remote kernel buffer, identified by the <buffer_xp> argument.
     163 * It can be called by a thread running in any cluster.
    165164 * If required, the data transfer is split in "fragments", where one fragment contains
    166  * contiguous bytes in the same mapper page.
    167  * It uses a "remote_memcpy" to move a fragment to/from the kernel buffer.
    168  * In case of write, the dirty bit is set for all pages written in the mapper.
     165 * contiguous bytes in the same mapper page. Each fragment uses a "remote_memcpy".
     166 * In case of write to mapper, the dirty bit is set for all pages written in the mapper.
    169167 *******************************************************************************************
    170168 * @ mapper_xp    : extended pointer on mapper.
     
    248246
    249247/*******************************************************************************************
    250  * This debug function displays the content of a given page of a given mapper.
    251  * - the mapper is identified by the <mapper_xp> argument.
    252  * - the page is identified by the <page_id> argument.
    253  * - the number of bytes to display in page is defined by the <nbytes> argument.
     248 * This debug function displays the content of a given page of a given mapper, identified
     249 * by the <mapper_xp> and <page_xp> arguments.
     250 * The number of bytes to display in page is defined by the <nbytes> argument.
    254251 * The format is eigth (32 bits) words per line in hexadecimal.
    255252 * It can be called by any thread running in any cluster.
    256  * In case of miss in mapper, it load the missing page from device to mapper.
    257253 *******************************************************************************************
    258254 * @ mapper_xp  : [in]  extended pointer on the mapper.
    259  * @ page_id    : [in]  page index in file.
    260  * @ nbytes     : [in]  value to be written.
    261  * @ returns 0 if success / return -1 if error.
    262  ******************************************************************************************/
    263 error_t mapper_display_page( xptr_t     mapper_xp,
    264                              uint32_t   page_id,
    265                              uint32_t   nbytes );
     255 * @ page_xp    : [in]  extended pointer on page descriptor.
     256 * @ nbytes     : [in]  number of bytes in page.
     257 * @ returns 0 if success / return -1 if error.
     258 ******************************************************************************************/
     259void mapper_display_page( xptr_t     mapper_xp,
     260                          xptr_t     page_xp,
     261                          uint32_t   nbytes );
    266262
    267263
  • trunk/kernel/mm/page.h

    r635 r656  
    4949 * - The remote_busylock is used to allows any remote thread to atomically
    5050 *   test/modify the forks counter or the flags.
    51  * - The list entry is used to register the page in a free list or in dirty list.
    52  *   The refcount is used for page release to KMEM.
     51 * - The list field is used to register the page in a free list, or in dirty list,
     52 *   as a given page cannot be simultaneously dirty and free.
     53 * - The refcount is used to release the page to the PPM.
    5354 * NOTE: the size is 48 bytes for a 32 bits core.
    5455 ************************************************************************************/
  • trunk/kernel/mm/ppm.c

    r651 r656  
    151151        page_t   * buddy;               // searched buddy page descriptor
    152152        uint32_t   buddy_index;         // buddy page index in page_tbl[]
    153         page_t   * current;             // current (merged) page descriptor
     153        page_t   * current_ptr;         // current (merged) page descriptor
    154154        uint32_t   current_index;       // current (merged) page index in page_tbl[]
    155155        uint32_t   current_order;       // current (merged) page order
     
    168168
    169169    // initialise loop variables
    170     current       = page;
     170    current_ptr   = page;
    171171    current_order = page->order;
    172172        current_index = page - ppm->pages_tbl;
     
    191191                buddy->order = 0;
    192192
    193                 // compute next (merged) page index in page_tbl[]
     193                // compute next values for loop variables
    194194                current_index &= buddy_index;
    195 
    196         // compute next (merged) page order
    197195        current_order++;
    198 
    199         // compute next (merged) page descripror
    200         current = pages_tbl + current_index;
     196        current_ptr = pages_tbl + current_index;
    201197    }
    202198
    203199        // update order field for merged page descriptor
    204         current->order = current_order;
     200        current_ptr->order = current_order;
    205201
    206202        // insert merged page in relevant free list
    207         list_add_first( &ppm->free_pages_root[current_order] , &current->list );
     203        list_add_first( &ppm->free_pages_root[current_order] , &current_ptr->list );
    208204        ppm->free_pages_nr[current_order] ++;
    209205
    210206}  // end ppm_free_pages_nolock()
    211 
    212207
    213208////////////////////////////////////////////
     
    221216    thread_t * this = CURRENT_THREAD;
    222217
     218        ppm_t    * ppm = &LOCAL_CLUSTER->ppm;
     219
    223220#if DEBUG_PPM_ALLOC_PAGES
    224221uint32_t cycle = (uint32_t)hal_get_cycles();
    225222#endif
    226223
    227 #if (DEBUG_PPM_ALLOC_PAGES & 1)
     224#if DEBUG_PPM_ALLOC_PAGES
    228225if( DEBUG_PPM_ALLOC_PAGES < cycle )
    229226{
    230227    printk("\n[%s] thread[%x,%x] enter for %d page(s) in cluster %x / cycle %d\n",
    231228    __FUNCTION__, this->process->pid, this->trdid, 1<<order, local_cxy, cycle );
    232     ppm_remote_display( local_cxy );
    233 }
    234 #endif
    235 
    236         ppm_t    * ppm = &LOCAL_CLUSTER->ppm;
     229    if( DEBUG_PPM_ALLOC_PAGES & 1 ) ppm_remote_display( local_cxy );
     230}
     231#endif
    237232
    238233// check order
     
    316311    dqdt_increment_pages( local_cxy , order );
    317312
     313    hal_fence();
     314
    318315#if DEBUG_PPM_ALLOC_PAGES
    319316if( DEBUG_PPM_ALLOC_PAGES < cycle )
     
    322319    __FUNCTION__, this->process->pid, this->trdid,
    323320    1<<order, local_cxy, ppm_page2ppn(XPTR( local_cxy , found_block )), cycle );
    324     ppm_remote_display( local_cxy );
     321    if( DEBUG_PPM_ALLOC_PAGES & 1 ) ppm_remote_display( local_cxy );
    325322}
    326323#endif
     
    340337#endif
    341338
    342 #if ( DEBUG_PPM_FREE_PAGES & 1 )
     339#if DEBUG_PPM_FREE_PAGES
    343340if( DEBUG_PPM_FREE_PAGES < cycle )
    344341{
     
    346343    __FUNCTION__, this->process->pid, this->trdid,
    347344    1<<page->order, local_cxy, ppm_page2ppn(XPTR(local_cxy , page)), cycle );
    348     ppm_remote_display( local_cxy );
     345    if( DEBUG_PPM_FREE_PAGES & 1 ) ppm_remote_display( local_cxy );
     346}
    349347#endif
    350348
     
    362360    // update DQDT
    363361    dqdt_decrement_pages( local_cxy , page->order );
     362
     363    hal_fence();
    364364
    365365#if DEBUG_PPM_FREE_PAGES
     
    369369    __FUNCTION__, this->process->pid, this->trdid,
    370370    1<<page->order, local_cxy, ppm_page2ppn(XPTR(local_cxy , page)) , cycle );
    371     ppm_remote_display( local_cxy );
     371    if( DEBUG_PPM_FREE_PAGES & 1 ) ppm_remote_display( local_cxy );
    372372}
    373373#endif
     
    376376
    377377
    378 
    379 
    380378/////////////////////////////////////////////
    381 void * ppm_remote_alloc_pages( cxy_t     cxy,
     379xptr_t ppm_remote_alloc_pages( cxy_t     cxy,
    382380                               uint32_t  order )
    383381{
     
    389387    thread_t * this  = CURRENT_THREAD;
    390388
     389// check order
     390assert( (order < CONFIG_PPM_MAX_ORDER) , "illegal order argument = %d\n" , order );
     391
     392    // get local pointer on PPM (same in all clusters)
     393        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
     394
    391395#if DEBUG_PPM_REMOTE_ALLOC_PAGES
    392396uint32_t   cycle = (uint32_t)hal_get_cycles();
    393397#endif
    394398
    395 #if ( DEBUG_PPM_REMOTE_ALLOC_PAGES & 1 )
     399#if DEBUG_PPM_REMOTE_ALLOC_PAGES
    396400if( DEBUG_PPM_REMOTE_ALLOC_PAGES < cycle )
    397401{
    398     printk("\n[%s] thread[%x,%x] enter for %d small page(s) in cluster %x / cycle %d\n",
     402    printk("\n[%s] thread[%x,%x] enter for %d page(s) in cluster %x / cycle %d\n",
    399403    __FUNCTION__, this->process->pid, this->trdid, 1<<order, cxy, cycle );
    400     ppm_remote_display( cxy );
    401 }
    402 #endif
    403 
    404 // check order
    405 assert( (order < CONFIG_PPM_MAX_ORDER) , "illegal order argument = %d\n" , order );
    406 
    407     // get local pointer on PPM (same in all clusters)
    408         ppm_t * ppm = &LOCAL_CLUSTER->ppm;
     404    if( DEBUG_PPM_REMOTE_ALLOC_PAGES & 1 ) ppm_remote_display( cxy );
     405}
     406#endif
    409407
    410408    //build extended pointer on lock protecting remote PPM
     
    489487    dqdt_increment_pages( cxy , order );
    490488
     489    hal_fence();
     490
    491491#if DEBUG_PPM_REMOTE_ALLOC_PAGES
    492492if( DEBUG_PPM_REMOTE_ALLOC_PAGES < cycle )
     
    495495    __FUNCTION__, this->process->pid, this->trdid,
    496496    1<<order, cxy, ppm_page2ppn(XPTR( cxy , found_block )), cycle );
    497     ppm_remote_display( cxy );
    498 }
    499 #endif
    500 
    501         return found_block;
     497    if( DEBUG_PPM_REMOTE_ALLOC_PAGES & 1 ) ppm_remote_display( cxy );
     498}
     499#endif
     500
     501        return XPTR( cxy , found_block );
    502502
    503503}  // end ppm_remote_alloc_pages()
     
    515515        uint32_t   current_order;    // current (merged) page order
    516516
     517    // get local pointer on PPM (same in all clusters)
     518        ppm_t * ppm = &LOCAL_CLUSTER->ppm;
     519
     520    // get page ppn and order
     521    uint32_t   order = hal_remote_l32( XPTR( page_cxy , &page_ptr->order ) );
     522
    517523#if DEBUG_PPM_REMOTE_FREE_PAGES
    518524thread_t * this  = CURRENT_THREAD;
    519525uint32_t   cycle = (uint32_t)hal_get_cycles();
    520 #endif
    521 
    522 #if ( DEBUG_PPM_REMOTE_FREE_PAGES & 1 )
     526ppn_t      ppn   = ppm_page2ppn( XPTR( page_cxy , page_ptr ) );
     527#endif
     528
     529#if DEBUG_PPM_REMOTE_FREE_PAGES
    523530if( DEBUG_PPM_REMOTE_FREE_PAGES < cycle )
    524531{
    525532    printk("\n[%s] thread[%x,%x] enter for %d page(s) in cluster %x / ppn %x / cycle %d\n",
    526     __FUNCTION__, this->process->pid, this->trdid,
    527     1<<page_ptr->order, page_cxy, ppm_page2ppn(XPTR( page_cxy , page_ptr )), cycle );
    528     ppm_remote_display( page_cxy );
     533    __FUNCTION__, this->process->pid, this->trdid, 1<<order, page_cxy, ppn, cycle );
     534    if( DEBUG_PPM_REMOTE_FREE_PAGES & 1 ) ppm_remote_display( page_cxy );
    529535}
    530536#endif
     
    533539    page_xp = XPTR( page_cxy , page_ptr );
    534540   
    535     // get local pointer on PPM (same in all clusters)
    536         ppm_t * ppm = &LOCAL_CLUSTER->ppm;
    537 
    538541    // build extended pointer on lock protecting remote PPM
    539542    xptr_t lock_xp = XPTR( page_cxy , &ppm->free_lock );
     
    556559    // initialise loop variables
    557560    current_ptr   = page_ptr;
    558     current_order = hal_remote_l32( XPTR( page_cxy , &page_ptr->order ) );
     561    current_order = order;
    559562        current_index = page_ptr - ppm->pages_tbl;
    560563
     
    582585        hal_remote_s32( XPTR( page_cxy , &buddy_ptr->order ) , 0 );
    583586
    584                 // compute next (merged) page index in page_tbl[]
     587                // compute next values for loop variables
    585588                current_index &= buddy_index;
    586 
    587         // compute next (merged) page order
    588589        current_order++;
    589 
    590         // compute next (merged) page descripror
    591590        current_ptr = pages_tbl + current_index;
    592591
     
    594593
    595594        // update current (merged) page descriptor order field
    596         current_ptr = pages_tbl + current_index;
    597595    hal_remote_s32( XPTR( page_cxy , &current_ptr->order ) , current_order );
    598596
    599597        // insert current (merged) page into relevant free list
    600         list_remote_add_first( page_cxy , &ppm->free_pages_root[current_order] , &current_ptr->list );
     598        list_remote_add_first( page_cxy, &ppm->free_pages_root[current_order], &current_ptr->list );
    601599    hal_remote_atomic_add( XPTR( page_cxy , &ppm->free_pages_nr[current_order] ) , 1 );
    602600
     
    607605    dqdt_decrement_pages( page_cxy , page_ptr->order );
    608606
     607    hal_fence();
     608
    609609#if DEBUG_PPM_REMOTE_FREE_PAGES
    610610if( DEBUG_PPM_REMOTE_FREE_PAGES < cycle )
    611611{
    612612    printk("\n[%s] thread[%x,%x] released %d page(s) in cluster %x / ppn %x / cycle %d\n",
    613     __FUNCTION__, this->process->pid, this->trdid,
    614     1<<page_ptr->order, page_cxy, ppm_page2ppn(XPTR( page_cxy , page_ptr ) ), cycle );
    615     ppm_remote_display( page_cxy );
     613    __FUNCTION__, this->process->pid, this->trdid, 1<<order, page_cxy, ppn, cycle );
     614    if( DEBUG_PPM_REMOTE_FREE_PAGES & 1 ) ppm_remote_display( page_cxy );
    616615}
    617616#endif
     
    658657        uint32_t n = hal_remote_l32( XPTR( cxy , &ppm->free_pages_nr[order] ) );
    659658
    660         // display direct free_list[order]
    661                 nolock_printk("- forward  : order = %d / n = %d\t: ", order , n );
     659        // display forward free_list[order]
     660                nolock_printk("- forward  : order = %d / n = %d : ", order , n );
    662661                LIST_REMOTE_FOREACH( cxy , &ppm->free_pages_root[order] , iter )
     662                {
     663            page_xp = XPTR( cxy , LIST_ELEMENT( iter , page_t , list ) );
     664                        nolock_printk("%x," , ppm_page2ppn( page_xp ) );
     665                }
     666                nolock_printk("\n");
     667
     668        // display backward free_list[order]
     669                nolock_printk("- backward : order = %d / n = %d : ", order , n );
     670                LIST_REMOTE_FOREACH_BACKWARD( cxy , &ppm->free_pages_root[order] , iter )
    663671                {
    664672            page_xp = XPTR( cxy , LIST_ELEMENT( iter , page_t , list ) );
  • trunk/kernel/mm/ppm.h

    r635 r656  
    8484/*****************************************************************************************
    8585 * This local allocator must be called by a thread running in local cluster.
    86  * It allocates n contiguous physical 4 Kbytes pages from the local cluster, where
    87  * n is a power of 2 defined by the <order> argument.
     86 * It allocates N contiguous physical 4 Kbytes pages from the local cluster, where
     87 * N is a power of 2 defined by the <order> argument.
    8888 * In normal use, it should not be called directly, as the recommended way to allocate
    8989 * physical pages is to call the generic allocator defined in kmem.h.
     
    116116/*****************************************************************************************
    117117 * This remote  allocator can be called by any thread running in any cluster.
    118  * It allocates n contiguous physical 4 Kbytes pages from cluster identified
    119  * by the <cxy> argument, where n is a power of 2 defined by the <order> argument.
     118 * It allocates N contiguous physical 4 Kbytes pages from cluster identified
     119 * by the <cxy> argument, where N is a power of 2 defined by the <order> argument.
    120120 * In normal use, it should not be called directly, as the recommended way to allocate
    121121 * physical pages is to call the generic allocator defined in kmem.h.
     
    123123 * @ cxy       : remote cluster identifier.
    124124 * @ order     : ln2( number of 4 Kbytes pages)
    125  * @ returns a local pointer on remote page descriptor if success / XPTR_NULL if error.
    126  ****************************************************************************************/
    127 void *  ppm_remote_alloc_pages( cxy_t    cxy,
     125 * @ returns an extended pointer on page descriptor if success / XPTR_NULL if error.
     126 ****************************************************************************************/
     127xptr_t  ppm_remote_alloc_pages( cxy_t    cxy,
    128128                                uint32_t order );
    129129
  • trunk/kernel/mm/vmm.c

    r651 r656  
    17451745
    17461746////////////////////////////////////////////////////////////////////////////////////////////
    1747 // This static function is called by the vmm_remove_vseg() and vmm_resize_vseg() functions.
    1748 // Depending on the vseg <type>, it decrements the physical page refcount, and
    1749 // conditionnally release to the relevant kmem the physical page identified by <ppn>.
     1747// This static function is called by the vmm_remove_vseg() and vmm_resize_vseg() functions
     1748// to update the physical page descriptor identified by the <ppn> argument.
     1749// It decrements the refcount, set the dirty bit when required, and releases the physical
     1750// page to kmem depending on the vseg type.
     1751// - KERNEL : refcount decremented / not released to kmem    / dirty bit not set
     1752// - FILE   : refcount decremented / not released to kmem    / dirty bit set when required.
     1753// - CODE   : refcount decremented / released to kmem        / dirty bit not set.
     1754// - STAK   : refcount decremented / released to kmem        / dirty bit not set.
     1755// - DATA   : refcount decremented / released to kmem if ref / dirty bit not set.
     1756// - MMAP   : refcount decremented / released to kmem if ref / dirty bit not set.
    17501757////////////////////////////////////////////////////////////////////////////////////////////
    17511758// @ process  : local pointer on process.
    17521759// @ vseg     : local pointer on vseg.
    17531760// @ ppn      : released pysical page index.
     1761// @ dirty    : set the dirty bit in page descriptor when non zero.
    17541762////////////////////////////////////////////////////////////////////////////////////////////
    17551763static void vmm_ppn_release( process_t * process,
    17561764                             vseg_t    * vseg,
    1757                              ppn_t       ppn )
     1765                             ppn_t       ppn,
     1766                             uint32_t    dirty )
    17581767{
    1759     bool_t do_release;
     1768    bool_t do_kmem_release;
    17601769
    17611770    // get vseg type
    17621771    vseg_type_t type = vseg->type;
    17631772
    1764     // compute is_ref
     1773    // compute is_ref <=> this vseg is the reference vseg
    17651774    bool_t is_ref = (GET_CXY( process->ref_xp ) == local_cxy);
    17661775
     
    17741783    hal_remote_atomic_add( count_xp , -1 );
    17751784
    1776     // compute the do_release condition depending on vseg type
    1777     if( (type == VSEG_TYPE_FILE)  ||
    1778         (type == VSEG_TYPE_KCODE) ||
     1785    // compute the do_kmem_release condition depending on vseg type
     1786    if( (type == VSEG_TYPE_KCODE) ||
    17791787        (type == VSEG_TYPE_KDATA) ||
    17801788        (type == VSEG_TYPE_KDEV) )           
    17811789    {
    1782         // no physical page release for FILE and KERNEL
    1783         do_release = false;
    1784     }
     1790        // no physical page release for KERNEL
     1791        do_kmem_release = false;
     1792    }
     1793    else if( type == VSEG_TYPE_FILE )
     1794    {
     1795        // no physical page release for KERNEL
     1796        do_kmem_release = false;
     1797
     1798        // set dirty bit if required
     1799        if( dirty ) ppm_page_do_dirty( page_xp );
     1800    }   
    17851801    else if( (type == VSEG_TYPE_CODE)  ||
    17861802             (type == VSEG_TYPE_STACK) )
    17871803    {
    17881804        // always release physical page for private vsegs
    1789         do_release = true;
     1805        do_kmem_release = true;
    17901806    }
    17911807    else if( (type == VSEG_TYPE_ANON)  ||
     
    17931809    {
    17941810        // release physical page if reference cluster
    1795         do_release = is_ref;
     1811        do_kmem_release = is_ref;
    17961812    }
    17971813    else if( is_ref )  // vseg_type == DATA in reference cluster
     
    18141830
    18151831        // release physical page if forks == 0
    1816         do_release = (forks == 0);
     1832        do_kmem_release = (forks == 0);
    18171833    }
    18181834    else              // vseg_type == DATA not in reference cluster
    18191835    {
    18201836        // no physical page release if not in reference cluster
    1821         do_release = false;
     1837        do_kmem_release = false;
    18221838    }
    18231839
    18241840    // release physical page to relevant kmem when required
    1825     if( do_release )
    1826     {
    1827         ppm_remote_free_pages( page_cxy , page_ptr );
     1841    if( do_kmem_release )
     1842    {
     1843        kmem_req_t req;
     1844        req.type = KMEM_PPM;
     1845        req.ptr  = GET_PTR( ppm_ppn2base( ppn ) );
     1846
     1847        kmem_remote_free( page_cxy , &req );
    18281848
    18291849#if DEBUG_VMM_PPN_RELEASE
     
    18921912            hal_gpt_reset_pte( gpt_xp , vpn );
    18931913
    1894             // release physical page when required
    1895             vmm_ppn_release( process , vseg , ppn );
     1914            // release physical page depending on vseg type
     1915            vmm_ppn_release( process , vseg , ppn , attr & GPT_DIRTY );
    18961916        }
    18971917    }
     
    19862006
    19872007            // release physical page when required
    1988             vmm_ppn_release( process , vseg , ppn );
     2008            vmm_ppn_release( process , vseg , ppn , attr & GPT_DIRTY );
    19892009        }
    19902010    }
     
    20082028
    20092029            // release physical page when required
    2010             vmm_ppn_release( process , vseg , ppn );
     2030            vmm_ppn_release( process , vseg , ppn , attr & GPT_DIRTY );
    20112031        }
    20122032    }
     
    21702190// @ vseg   : local pointer on vseg.
    21712191// @ vpn    : unmapped vpn.
    2172 // @ return an extended pointer on the allocated page
     2192// @ return an extended pointer on the allocated page descriptor.
    21732193//////////////////////////////////////////////////////////////////////////////////////
    21742194static xptr_t vmm_page_allocate( vseg_t * vseg,
     
    21862206    xptr_t       page_xp;
    21872207    cxy_t        page_cxy;
    2188     page_t     * page_ptr;
    21892208    uint32_t     index;
    21902209
     
    21972216assert( ( type != VSEG_TYPE_FILE ) , "illegal vseg type\n" );
    21982217
     2218    // compute target cluster identifier
    21992219    if( flags & VSEG_DISTRIB )    // distributed => cxy depends on vpn LSB
    22002220    {
     
    22142234
    22152235    // allocate one small physical page from target cluster
    2216     page_ptr = ppm_remote_alloc_pages( page_cxy , 0 );
    2217 
    2218     page_xp = XPTR( page_cxy , page_ptr );
     2236    kmem_req_t req;
     2237    req.type  = KMEM_PPM;
     2238    req.order = 0;
     2239    req.flags = AF_ZERO;
     2240
     2241    // get local pointer on page base
     2242    void * ptr = kmem_remote_alloc( page_cxy , &req );
     2243
     2244    // get extended pointer on page descriptor
     2245    page_xp = ppm_base2page( XPTR( page_cxy , ptr ) );
    22192246
    22202247#if DEBUG_VMM_PAGE_ALLOCATE
     
    22452272uint32_t   cycle = (uint32_t)hal_get_cycles();
    22462273thread_t * this  = CURRENT_THREAD;
    2247 if( (DEBUG_VMM_GET_ONE_PPN < cycle) && (vpn == 0x40b) )
    2248 printk("\n[%s] thread[%x,%x] enter for vpn %x / type %s / page_id  %d / cycle %d\n",
     2274if( DEBUG_VMM_GET_ONE_PPN < cycle )
     2275printk("\n[%s] thread[%x,%x] enter for vpn %x / vseg %s / page_id  %d / cycle %d\n",
    22492276__FUNCTION__, this->process->pid, this->trdid, vpn, vseg_type_str(type), page_id, cycle );
     2277#endif
     2278
     2279#if (DEBUG_VMM_GET_ONE_PPN & 2)
     2280if( DEBUG_VMM_GET_ONE_PPN < cycle )
     2281hal_vmm_display( XPTR( local_cxy , this->process ) , true );
    22502282#endif
    22512283
     
    22912323
    22922324#if (DEBUG_VMM_GET_ONE_PPN & 0x1)
    2293 if( (DEBUG_VMM_GET_ONE_PPN < cycle) && (vpn == 0x40b) )
     2325if( DEBUG_VMM_GET_ONE_PPN < cycle )
    22942326printk("\n[%s] thread[%x,%x] for vpn = %x / elf_offset = %x\n",
    22952327__FUNCTION__, this->process->pid, this->trdid, vpn, elf_offset );
     
    23052337
    23062338#if (DEBUG_VMM_GET_ONE_PPN & 0x1)
    2307 if( (DEBUG_VMM_GET_ONE_PPN < cycle) && (vpn == 0x40b) )
     2339if( DEBUG_VMM_GET_ONE_PPN < cycle )
    23082340printk("\n[%s] thread[%x,%x] for vpn  %x / fully in BSS\n",
    23092341__FUNCTION__, this->process->pid, this->trdid, vpn );
     
    23222354
    23232355#if (DEBUG_VMM_GET_ONE_PPN & 0x1)
    2324 if( (DEBUG_VMM_GET_ONE_PPN < cycle) && (vpn == 0x40b) )
     2356if( DEBUG_VMM_GET_ONE_PPN < cycle )
    23252357printk("\n[%s] thread[%x,%x] for vpn  %x / fully in mapper\n",
    23262358__FUNCTION__, this->process->pid, this->trdid, vpn );
     
    23392371
    23402372#if (DEBUG_VMM_GET_ONE_PPN & 0x1)
    2341 if( (DEBUG_VMM_GET_ONE_PPN < cycle) && (vpn == 0x40b) )
     2373if( DEBUG_VMM_GET_ONE_PPN < cycle )
    23422374printk("\n[%s] thread[%x,%x] for vpn  %x / both mapper & BSS\n"
    23432375"      %d bytes from mapper / %d bytes from BSS\n",
     
    23652397                }
    23662398            }   
    2367         }  // end initialisation for CODE or DATA types   
     2399
     2400        }  // end if CODE or DATA types   
    23682401    }
    23692402
     
    23722405
    23732406#if DEBUG_VMM_GET_ONE_PPN
    2374 cycle = (uint32_t)hal_get_cycles();
    2375 if( (DEBUG_VMM_GET_ONE_PPN < cycle) && (vpn == 0x40b) )
     2407if( DEBUG_VMM_GET_ONE_PPN < cycle )
    23762408printk("\n[%s] thread[%x,%x] exit for vpn %x / ppn %x / cycle %d\n",
    23772409__FUNCTION__ , this->process->pid, this->trdid , vpn , *ppn, cycle );
     2410#endif
     2411
     2412#if (DEBUG_VMM_GET_ONE_PPN & 2)
     2413if( DEBUG_VMM_GET_ONE_PPN < cycle )
     2414hal_vmm_display( XPTR( local_cxy , this->process ) , true );
    23782415#endif
    23792416
     
    24042441
    24052442#if DEBUG_VMM_HANDLE_PAGE_FAULT
    2406 if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) )
     2443if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) & (vpn > 0) )
    24072444printk("\n[%s] thread[%x,%x] enter for vpn %x / cycle %d\n",
    24082445__FUNCTION__, this->process->pid, this->trdid, vpn, start_cycle );
    24092446#endif
    24102447
    2411 #if (DEBUG_VMM_HANDLE_PAGE_FAULT & 1)
     2448#if (DEBUG_VMM_HANDLE_PAGE_FAULT & 2)
    24122449if( (start_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) )
    2413 hal_vmm_display( this->process , true );
     2450hal_vmm_display( XPTR( local_cxy , this->process ) , true );
    24142451#endif
    24152452
     
    25042541#if (CONFIG_INSTRUMENTATION_PGFAULTS || DEBUG_VMM_HANDLE_PAGE_FAULT)
    25052542uint32_t end_cycle = (uint32_t)hal_get_cycles();
    2506 uint32_t cost      = end_cycle - start_cycle;
    25072543#endif
    25082544
     
    25132549#endif
    25142550
     2551#if (DEBUG_VMM_HANDLE_PAGE_FAULT & 2)
     2552if( (end_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) )
     2553hal_vmm_display( XPTR( local_cxy , this->process ) , true );
     2554#endif
     2555
    25152556#if CONFIG_INSTRUMENTATION_PGFAULTS
     2557uint32_t cost      = end_cycle - start_cycle;
    25162558this->info.local_pgfault_nr++;
    25172559this->info.local_pgfault_cost += cost;
     
    25842626#if (CONFIG_INSTRUMENTATION_PGFAULTS || DEBUG_VMM_HANDLE_PAGE_FAULT)
    25852627uint32_t end_cycle = (uint32_t)hal_get_cycles();
    2586 uint32_t cost      = end_cycle - start_cycle;
    25872628#endif
    25882629
     
    25932634#endif
    25942635
     2636#if (DEBUG_VMM_HANDLE_PAGE_FAULT & 2)
     2637if( (end_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) )
     2638hal_vmm_display( XPTR( local_cxy , this->process ) , true );
     2639#endif
     2640
    25952641#if CONFIG_INSTRUMENTATION_PGFAULTS
     2642uint32_t cost      = end_cycle - start_cycle;
    25962643this->info.false_pgfault_nr++;
    25972644this->info.false_pgfault_cost += cost;
     
    26512698#if (CONFIG_INSTRUMENTATION_PGFAULTS || DEBUG_VMM_HANDLE_PAGE_FAULT)
    26522699uint32_t end_cycle = (uint32_t)hal_get_cycles();
    2653 uint32_t cost      = end_cycle - start_cycle;
    26542700#endif
    26552701
     
    26602706#endif
    26612707
     2708#if (DEBUG_VMM_HANDLE_PAGE_FAULT & 2)
     2709if( (end_cycle > DEBUG_VMM_HANDLE_PAGE_FAULT) && (vpn > 0) )
     2710hal_vmm_display( XPTR( local_cxy , this->process ) , true );
     2711#endif
     2712
    26622713#if CONFIG_INSTRUMENTATION_PGFAULTS
     2714uint32_t cost      = end_cycle - start_cycle;
    26632715this->info.global_pgfault_nr++;
    26642716this->info.global_pgfault_cost += cost;
     
    26762728#if (CONFIG_INSTRUMENTATION_PGFAULTS || DEBUG_VMM_HANDLE_PAGE_FAULT)
    26772729uint32_t end_cycle = (uint32_t)hal_get_cycles();
    2678 uint32_t cost      = end_cycle - start_cycle;
    26792730#endif
    26802731
     
    26862737
    26872738#if CONFIG_INSTRUMENTATION_PGFAULTS
     2739uint32_t cost      = end_cycle - start_cycle;
    26882740this->info.false_pgfault_nr++;
    26892741this->info.false_pgfault_cost += cost;
     
    27202772#endif
    27212773
    2722 #if ((DEBUG_VMM_HANDLE_COW & 3) == 3 )
     2774#if (DEBUG_VMM_HANDLE_COW & 2)
    27232775hal_vmm_display( XPTR( local_cxy , process ) , true );
    27242776#endif
     
    29022954#endif
    29032955
    2904 #if ((DEBUG_VMM_HANDLE_COW & 3) == 3)
     2956#if (DEBUG_VMM_HANDLE_COW & 2)
    29052957hal_vmm_display( XPTR( local_cxy , process ) , true );
    29062958#endif
  • trunk/kernel/mm/vmm.h

    r651 r656  
    312312
    313313/*********************************************************************************************
    314  * This function removes from the VMM of a process descriptor identified by the <process>
    315  * argument the vseg identified by the <vseg> argument. 
    316  * It is called by the vmm_user_reset(), vmm_global_delete_vseg() and vmm_destroy() functions.
     314 * This function removes from the VMM of a local process descriptor, identified by
     315 * the <process> argument, the vseg identified by the <vseg> argument. 
     316 * It is called by the vmm_user_reset(), vmm_global_delete_vseg(), vmm_destroy() functions.
    317317 * It must be called by a local thread, running in the cluster containing the modified VMM.
    318318 * Use the RPC_VMM_REMOVE_VSEG if required.
     
    324324 *   . for ANON and REMOTE, the vseg is not released, but registered in local zombi_list.
    325325 *   . for STACK the vseg is released to the local stack allocator.
    326  *   . for all other types, the vseg is released to the local kmem.
     326 *   . for all other types, the vseg descriptor is released to the local kmem.
    327327 * Regarding the physical pages release:
    328328 *   . for KERNEL and FILE, the pages are not released to kmem.
    329  *   . for CODE and STACK, the pages are released to local kmem when they are not COW.
     329 *   . for CODE and STACK, the pages are released to local kmem.
    330330 *   . for DATA, ANON and REMOTE, the pages are released to relevant kmem only when
    331331 *     the local cluster is the reference cluster.
  • trunk/kernel/syscalls/sys_display.c

    r640 r656  
    300300            xptr_t        mapper_xp;
    301301            mapper_t    * mapper_ptr;
     302            xptr_t        page_xp;
    302303
    303304            char          kbuf[CONFIG_VFS_MAX_PATH_LENGTH];
     
    315316 __FUNCTION__ );
    316317#endif
    317                 this->errno = ENFILE;
    318                 return -1;
    319             }
    320 
     318                this->errno = EINVAL;
     319                return -1;
     320            }
     321
     322            // check nbytes
     323            if( nbytes >= 4096 )
     324            {
     325
     326#if DEBUG_SYSCALLS_ERROR
     327printk("\n[ERROR] in %s for MAPPER : nbytes cannot be larger than 4096\n",
     328 __FUNCTION__ );
     329#endif
     330                this->errno = EINVAL;
     331                return -1;
     332            }
     333           
    321334            // copy pathname in kernel space
    322335            hal_strcpy_from_uspace( XPTR( local_cxy , kbuf ),
     
    366379            mapper_xp  = XPTR( inode_cxy , mapper_ptr );
    367380
    368             // display mapper
    369             error = mapper_display_page( mapper_xp , page_id , nbytes );
    370 
    371             if( error )
     381            // get extended pointer on target page
     382            page_xp = mapper_remote_get_page( mapper_xp , page_id );
     383
     384            if( page_xp == XPTR_NULL )
    372385                {
    373386
    374387#if DEBUG_SYSCALLS_ERROR
    375 printk("\n[ERROR] in %s for MAPPER : cannot display page %d\n",
     388printk("\n[ERROR] in %s for MAPPER : cannot get page %d\n",
    376389__FUNCTION__ , page_id );
    377390#endif
     
    379392                        return -1;
    380393                }
     394
     395            // display mapper
     396            mapper_display_page( mapper_xp , page_xp , nbytes );
     397
    381398
    382399            break;
     
    463480                uint32_t page = (uint32_t)arg0;
    464481
    465                 fatfs_display_fat( page , entries );
     482                fatfs_display_fat( page , 0 , entries );
    466483            }
    467484
  • trunk/kernel/syscalls/sys_read.c

    r635 r656  
    6363    cxy_t         file_cxy;        // remote file cluster identifier
    6464    uint32_t      file_type;       // file type
    65     uint32_t      file_attr;       // file_attribute
     65    uint32_t      file_offset;     // file offset
     66    uint32_t      file_attr;       // file attributes
     67    vfs_inode_t * inode_ptr;       // local pointer on file inode
    6668    uint32_t      nbytes;          // number of bytes actually read
    6769    reg_t         save_sr;         // required to enable IRQs during syscall
     
    129131    file_cxy = GET_CXY( file_xp );
    130132
    131     // get file type and attributes
    132     file_type   = hal_remote_l32( XPTR( file_cxy , &file_ptr->type ) );
    133     file_attr   = hal_remote_l32( XPTR( file_cxy , &file_ptr->attr ) );
     133    // get inode, file type, offset and attributes
     134    inode_ptr   = hal_remote_lpt( XPTR( file_cxy , &file_ptr->inode  ) );
     135    file_type   = hal_remote_l32( XPTR( file_cxy , &file_ptr->type   ) );
     136    file_offset = hal_remote_l32( XPTR( file_cxy , &file_ptr->offset ) );
     137    file_attr   = hal_remote_l32( XPTR( file_cxy , &file_ptr->attr   ) );
    134138
    135139    // enable IRQs
    136140    hal_enable_irq( &save_sr );
    137141
    138     // action depend on file type
     142    // action depend on file type:
     143
    139144    if( file_type == INODE_TYPE_FILE )      // read from file mapper
    140145    {
     
    152157            }
    153158
    154         // move count bytes from mapper
     159        // try to move count bytes from mapper
    155160        nbytes = vfs_user_move( true,               // from mapper to buffer
    156161                                file_xp,
    157162                                vaddr,
    158163                                count );
    159         if( nbytes != count )
    160         {
    161 
    162 #if DEBUG_SYSCALLS_ERROR
    163 printk("\n[ERROR] in %s : thread[%x,‰x] cannot read %d bytes from file %d\n",
    164 __FUNCTION__, process->pid, this->trdid, count, file_id );
    165 #endif
    166             this->errno = EIO;
    167             hal_restore_irq( save_sr );
    168             return -1;
    169         }
    170164    }
    171165    else if( file_type == INODE_TYPE_DEV )  // read from TXT device
     
    184178            txt_owner_xp  = hal_remote_l64( XPTR( chdev_cxy , &chdev_ptr->ext.txt.owner_xp ) );
    185179
    186             // check TXT_RX ownership
     180            // wait for TXT_RX ownership
    187181            if ( process_owner_xp != txt_owner_xp )
    188182            {
     
    202196        }
    203197
    204         // move count bytes from device
     198        // try to move count bytes from TXT device
    205199        nbytes = devfs_user_move( true,             // from device to buffer
    206200                                  file_xp,
    207201                                  vaddr,
    208202                                  count );
    209         if( nbytes != count )
    210         {
    211 
    212 #if DEBUG_SYSCALLS_ERROR
    213 printk("\n[ERROR] in %s : thread[%x,‰x] cannot read data from file %d\n",
    214 __FUNCTION__, process->pid, this->trdid, file_id );
    215 #endif
    216             this->errno = EIO;
    217             hal_restore_irq( save_sr );
    218             return -1;
    219         }
    220203    }
    221204    else    // not FILE and not DEV
     
    229212        hal_restore_irq( save_sr );
    230213                return -1;
     214    }
     215
     216    // check error
     217    if( nbytes == 0xFFFFFFFF )
     218    {
     219
     220#if DEBUG_SYSCALLS_ERROR
     221printk("\n[ERROR] in %s : thread[%x,‰x] cannot read data from file %d\n",
     222__FUNCTION__, process->pid, this->trdid, file_id );
     223#endif
     224        this->errno = EIO;
     225        hal_restore_irq( save_sr );
     226        return -1;
    231227    }
    232228
  • trunk/kernel/syscalls/sys_write.c

    r635 r656  
    6262    cxy_t         file_cxy;        // remote file cluster identifier
    6363    uint32_t      file_type;       // file type
    64     uint32_t      file_offset;     // current file offset
    65     uint32_t      file_attr;       // file_attribute
     64    uint32_t      file_offset;     // file offset
     65    uint32_t      file_attr;       // file attributes
    6666    vfs_inode_t * inode_ptr;       // local pointer on associated inode
    6767    uint32_t      nbytes;          // number of bytes actually written
     
    138138    hal_enable_irq( &save_sr );
    139139
    140     // action depend on file type
     140    // action depend on file type:
     141
    141142    if( file_type == INODE_TYPE_FILE )  // write to a file mapper
    142143    {
     
    159160                                vaddr,
    160161                                count );
    161         if ( nbytes != count )
    162         {
    163 
    164 #if DEBUG_SYSCALLS_ERROR
    165 printk("\n[ERROR] in %s : thread[%x,%x] cannot write %d bytes into file %d\n",
    166 __FUNCTION__ , process->pid, this->trdid, count, file_id );
    167 #endif
    168             hal_restore_irq( save_sr );
    169             this->errno = EIO;
    170             return -1;
    171 
    172         }
    173 
    174         // update file size in inode descriptor
    175         // only if (file_offset + count) > current_size
    176         // note: the parent directory entry in mapper will
    177         // be updated by the close syscall     
    178         xptr_t inode_xp = XPTR( file_cxy , inode_ptr );
    179         vfs_inode_update_size( inode_xp , file_offset + count );
    180162    }
    181163    else if( file_type == INODE_TYPE_DEV )  // write to TXT device
     
    186168                                  vaddr,
    187169                                  count );
    188         if( nbytes != count )
    189         {
     170    }
     171    else  // not FILE and not DEV
     172    {
     173
     174#if DEBUG_SYSCALLS_ERROR
     175printk("\n[ERROR] in %s : thread[%x,%x] / illegal inode type %\n",
     176__FUNCTION__, vfs_inode_type_str( file_type ) );
     177#endif
     178        hal_restore_irq( save_sr );
     179                this->errno = EBADFD;
     180                return -1;
     181    }
     182
     183    // chek error
     184    if( nbytes == 0xFFFFFFFF )
     185    {
    190186
    191187#if DEBUG_SYSCALLS_ERROR
     
    193189__FUNCTION__ , process->pid, this->trdid, file_id );
    194190#endif
    195             hal_restore_irq( save_sr );
    196             this->errno = EIO;
    197             return -1;
    198         }
    199     }
    200     else  // not FILE and not DEV
    201     {
    202 
    203 #if DEBUG_SYSCALLS_ERROR
    204 printk("\n[ERROR] in %s : thread[%x,%x] / illegal inode type %\n",
    205 __FUNCTION__, vfs_inode_type_str( file_type ) );
    206 #endif
    207191        hal_restore_irq( save_sr );
    208                 this->errno = EBADFD;
    209                 return -1;
     192        this->errno = EIO;
     193        return -1;
    210194    }
    211195
Note: See TracChangeset for help on using the changeset viewer.