Changeset 640 for trunk/kernel


Ignore:
Timestamp:
Oct 1, 2019, 1:19:00 PM (5 years ago)
Author:
alain
Message:

Remove all RPCs in page-fault handling.

Location:
trunk/kernel
Files:
15 edited

Legend:

Unmodified
Added
Removed
  • trunk/kernel/kern/kernel_init.c

    r637 r640  
    163163    "PROCESS_FDARRAY",       // 27
    164164    "PROCESS_DIR",           // 28
    165     "unused_29",             // 29
     165    "VMM_VSL",               // 29
    166166
    167167    "PROCESS_THTBL",         // 30
     
    170170    "VFS_SIZE",              // 32
    171171    "VFS_FILE",              // 33
    172     "VMM_VSL",               // 34
    173     "VFS_MAIN",              // 35
    174     "FATFS_FAT",             // 36
     172    "VFS_MAIN",              // 34
     173    "FATFS_FAT",             // 35
    175174};       
    176175
     
    14181417#endif
    14191418
    1420 #if (DEBUG_KERNEL_INIT & 1)
    1421 if( (core_lid ==  0) & (local_cxy == 0) )
    1422 sched_display( 0 );
    1423 #endif
    1424 
    14251419    if( (core_lid == 0) && (local_cxy == 0) )
    14261420    {
     
    14451439                   " - khm manager        : %d bytes\n"
    14461440                   " - vmm manager        : %d bytes\n"
    1447                    " - gpt root           : %d bytes\n"
    14481441                   " - vfs inode          : %d bytes\n"
    14491442                   " - vfs dentry         : %d bytes\n"
     
    14731466                   sizeof( khm_t              ),
    14741467                   sizeof( vmm_t              ),
    1475                    sizeof( gpt_t              ),
    14761468                   sizeof( vfs_inode_t        ),
    14771469                   sizeof( vfs_dentry_t       ),
  • trunk/kernel/kern/rpc.c

    r637 r640  
    7373    &rpc_vfs_inode_load_all_pages_server,  // 19
    7474
    75     &rpc_vmm_get_vseg_server,              // 20
    76     &rpc_vmm_global_update_pte_server,     // 21
     75    &rpc_undefined,                        // 20
     76    &rpc_undefined,                        // 21
    7777    &rpc_undefined,                        // 22
    7878    &rpc_undefined,                        // 23
    7979    &rpc_mapper_sync_server,               // 24
    80     &rpc_undefined,                        // 25
    81     &rpc_vmm_delete_vseg_server,           // 26
     80    &rpc_vmm_resize_vseg_server,           // 25
     81    &rpc_vmm_remove_vseg_server,           // 26
    8282    &rpc_vmm_create_vseg_server,           // 27
    8383    &rpc_vmm_set_cow_server,               // 28
     
    109109    "VFS_INODE_LOAD_ALL_PAGES",  // 19
    110110
    111     "GET_VSEG",                  // 20
    112     "GLOBAL_UPDATE_PTE",         // 21
     111    "VMM_GLOBAL_RESIZE_VSEG",    // 20
     112    "VMM_GLOBAL_UPDATE_PTE",     // 21
    113113    "undefined_22",              // 22
    114114    "undefined_23",              // 23
    115115    "MAPPER_SYNC",               // 24
    116116    "undefined_25",              // 25
    117     "VMM_DELETE_VSEG",           // 26
     117    "VMM_REMOVE_VSEG",           // 26
    118118    "VMM_CREATE_VSEG",           // 27
    119119    "VMM_SET_COW",               // 28
     
    10721072
    10731073/////////////////////////////////////////////////////////////////////////////////////////
    1074 // [8]   Marshaling functions attached to RPC_VRS_FS_UPDATE_DENTRY
     1074// [8]   Marshaling functions attached to RPC_VFS_FS_UPDATE_DENTRY
    10751075/////////////////////////////////////////////////////////////////////////////////////////
    10761076
     
    20592059
    20602060/////////////////////////////////////////////////////////////////////////////////////////
    2061 // [20]          Marshaling functions attached to RPC_VMM_GET_VSEG
    2062 /////////////////////////////////////////////////////////////////////////////////////////
    2063 
     2061// [20]          RPC_VMM_GET_VSEG deprecated [AG] sept 2019
     2062/////////////////////////////////////////////////////////////////////////////////////////
     2063
     2064/*
    20642065//////////////////////////////////////////////////
    20652066void rpc_vmm_get_vseg_client( cxy_t       cxy,     
     
    21442145#endif
    21452146}
    2146 
    2147 
    2148 /////////////////////////////////////////////////////////////////////////////////////////
    2149 // [21]    Marshaling functions attached to RPC_VMM_GLOBAL_UPDATE_PTE
    2150 /////////////////////////////////////////////////////////////////////////////////////////
    2151 
    2152 ///////////////////////////////////////////////////////
    2153 void rpc_vmm_global_update_pte_client( cxy_t       cxy,   
    2154                                        process_t * process,  // in
    2155                                        vpn_t       vpn,      // in
    2156                                        uint32_t    attr,     // in
    2157                                        ppn_t       ppn )     // in
    2158 {
    2159 #if DEBUG_RPC_VMM_GLOBAL_UPDATE_PTE
    2160 thread_t * this = CURRENT_THREAD;
    2161 uint32_t cycle = (uint32_t)hal_get_cycles();
    2162 if( cycle > DEBUG_RPC_VMM_GLOBAL_UPDATE_PTE )
    2163 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n",
    2164 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle );
    2165 #endif
    2166 
    2167     uint32_t responses = 1;
    2168 
    2169     // initialise RPC descriptor header
    2170     rpc_desc_t  rpc;
    2171     rpc.index    = RPC_VMM_GLOBAL_UPDATE_PTE;
    2172     rpc.blocking = true;
    2173     rpc.rsp      = &responses;
    2174 
    2175     // set input arguments in RPC descriptor
    2176     rpc.args[0] = (uint64_t)(intptr_t)process;
    2177     rpc.args[1] = (uint64_t)vpn;
    2178     rpc.args[2] = (uint64_t)attr;
    2179     rpc.args[3] = (uint64_t)ppn;
    2180 
    2181     // register RPC request in remote RPC fifo
    2182     rpc_send( cxy , &rpc );
    2183 
    2184 #if DEBUG_RPC_VMM_GLOBAL_UPDATE_PTE
    2185 cycle = (uint32_t)hal_get_cycles();
    2186 if( cycle > DEBUG_RPC_VMM_GLOBAL_UPDATE_PTE )
    2187 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n",
    2188 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle );
    2189 #endif
    2190 }
    2191 
    2192 //////////////////////////////////////////////////
    2193 void rpc_vmm_global_update_pte_server( xptr_t xp )
    2194 {
    2195 #if DEBUG_RPC_VMM_GLOBAL_UPDATE_PTE
    2196 thread_t * this = CURRENT_THREAD;
    2197 uint32_t cycle = (uint32_t)hal_get_cycles();
    2198 if( cycle > DEBUG_RPC_VMM_GLOBAL_UPDATE_PTE )
    2199 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n",
    2200 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle );
    2201 #endif
    2202 
    2203     process_t   * process;
    2204     vpn_t         vpn;
    2205     uint32_t      attr;
    2206     ppn_t         ppn;
    2207 
    2208     // get client cluster identifier and pointer on RPC descriptor
    2209     cxy_t        client_cxy  = GET_CXY( xp );
    2210     rpc_desc_t * desc        = GET_PTR( xp );
    2211 
    2212     // get input argument "process" & "vpn" from client RPC descriptor
    2213     process = (process_t *)(intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) );
    2214     vpn     = (vpn_t)                hal_remote_l64( XPTR( client_cxy , &desc->args[1] ) );
    2215     attr    = (uint32_t)             hal_remote_l64( XPTR( client_cxy , &desc->args[2] ) );
    2216     ppn     = (ppn_t)                hal_remote_l64( XPTR( client_cxy , &desc->args[3] ) );
    2217    
    2218     // call local kernel function
    2219     vmm_global_update_pte( process , vpn , attr , ppn );
    2220 
    2221 #if DEBUG_RPC_VMM_GLOBAL_UPDATE_PTE
    2222 cycle = (uint32_t)hal_get_cycles();
    2223 if( cycle > DEBUG_RPC_VMM_GLOBAL_UPDATE_PTE )
    2224 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n",
    2225 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle );
    2226 #endif
    2227 }
    2228 
    2229 /////////////////////////////////////////////////////////////////////////////////////////
    2230 // [22]          Marshaling functions attached to RPC_KCM_ALLOC
     2147*/
     2148
     2149/////////////////////////////////////////////////////////////////////////////////////////
     2150// [21]    undefined
     2151/////////////////////////////////////////////////////////////////////////////////////////
     2152
     2153/////////////////////////////////////////////////////////////////////////////////////////
     2154// [22]          RPC_KCM_ALLOC deprecated [AG] sept 2019
    22312155/////////////////////////////////////////////////////////////////////////////////////////
    22322156
     
    23082232
    23092233/////////////////////////////////////////////////////////////////////////////////////////
    2310 // [23]          Marshaling functions attached to RPC_KCM_FREE
     2234// [23]          RPC_KCM_FREE deprecated [AG] sept 2019
    23112235/////////////////////////////////////////////////////////////////////////////////////////
    23122236
     
    24602384
    24612385/////////////////////////////////////////////////////////////////////////////////////////
    2462 // [25]          Marshaling functions attached to RPC_MAPPER_HANDLE_MISS
    2463 /////////////////////////////////////////////////////////////////////////////////////////
    2464 
    2465 /*
     2386// [25]          Marshaling functions attached to RPC_VMM_RESIZE_VSEG
     2387/////////////////////////////////////////////////////////////////////////////////////////
     2388
    24662389//////////////////////////////////////////////////////////
    2467 void rpc_mapper_handle_miss_client( cxy_t             cxy,
    2468                                     struct mapper_s * mapper,
    2469                                     uint32_t          page_id,
    2470                                     xptr_t          * page_xp,
    2471                                     error_t         * error )
    2472 {
    2473 #if DEBUG_RPC_MAPPER_HANDLE_MISS
    2474 thread_t * this = CURRENT_THREAD;
    2475 uint32_t cycle = (uint32_t)hal_get_cycles();
    2476 if( cycle > DEBUG_RPC_MAPPER_HANDLE_MISS )
     2390void rpc_vmm_resize_vseg_client( cxy_t             cxy,
     2391                                 struct process_s * process,
     2392                                 struct vseg_s    * vseg,
     2393                                 intptr_t           new_base,
     2394                                 intptr_t           new_size )
     2395{
     2396#if DEBUG_RPC_VMM_RESIZE_VSEG
     2397thread_t * this = CURRENT_THREAD;
     2398uint32_t cycle = (uint32_t)hal_get_cycles();
     2399if( cycle > DEBUG_RPC_VMM_RESIZE_VSEG )
    24772400printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n",
    24782401__FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle );
     
    24832406    // initialise RPC descriptor header
    24842407    rpc_desc_t  rpc;
    2485     rpc.index    = RPC_MAPPER_HANDLE_MISS;
     2408    rpc.index    = RPC_VMM_RESIZE_VSEG;
    24862409    rpc.blocking = true;
    24872410    rpc.rsp      = &responses;
    24882411
    24892412    // set input arguments in RPC descriptor
    2490     rpc.args[0] = (uint64_t)(intptr_t)mapper;
    2491     rpc.args[1] = (uint64_t)page_id;
     2413    rpc.args[0] = (uint64_t)(intptr_t)process;
     2414    rpc.args[1] = (uint64_t)(intptr_t)vseg;
     2415    rpc.args[2] = (uint64_t)new_base;
     2416    rpc.args[3] = (uint64_t)new_size;
    24922417
    24932418    // register RPC request in remote RPC fifo
    24942419    rpc_send( cxy , &rpc );
    24952420
    2496     // get output values from RPC descriptor
    2497     *page_xp = (xptr_t)rpc.args[2];
    2498     *error   = (error_t)rpc.args[3];
    2499 
    2500 #if DEBUG_RPC_MAPPER_HANDLE_MISS
    2501 cycle = (uint32_t)hal_get_cycles();
    2502 if( cycle > DEBUG_RPC_MAPPER_HANDLE_MISS )
    2503 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n",
    2504 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle );
    2505 #endif
    2506 }
    2507 
    2508 ///////////////////////////////////////////////
    2509 void rpc_mapper_handle_miss_server( xptr_t xp )
    2510 {
    2511 #if DEBUG_RPC_MAPPER_HANDLE_MISS
    2512 thread_t * this = CURRENT_THREAD;
    2513 uint32_t cycle = (uint32_t)hal_get_cycles();
    2514 if( cycle > DEBUG_RPC_MAPPER_HANDLE_MISS )
    2515 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n",
    2516 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle );
    2517 #endif
    2518 
    2519     mapper_t * mapper;
    2520     uint32_t   page_id;
    2521     xptr_t     page_xp;
    2522     error_t    error;
     2421#if DEBUG_RPC_VMM_RESIZE_VSEG
     2422cycle = (uint32_t)hal_get_cycles();
     2423if( cycle > DEBUG_RPC_VMM_RESIZE_VSEG )
     2424printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n",
     2425__FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle );
     2426#endif
     2427}
     2428
     2429////////////////////////////////////////////
     2430void rpc_vmm_resize_vseg_server( xptr_t xp )
     2431{
     2432#if DEBUG_RPC_VMM_RESIZE_VSEG
     2433thread_t * this = CURRENT_THREAD;
     2434uint32_t cycle = (uint32_t)hal_get_cycles();
     2435if( cycle > DEBUG_RPC_VMM_RESIZE_VSEG )
     2436printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n",
     2437__FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle );
     2438#endif
     2439
     2440    process_t * process;
     2441    vseg_t    * vseg;
     2442    intptr_t    new_base;
     2443    intptr_t    new_size;
    25232444
    25242445    // get client cluster identifier and pointer on RPC descriptor
     
    25272448
    25282449    // get arguments from client RPC descriptor
    2529     mapper  = (mapper_t *)(intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) );
    2530     page_id =                       hal_remote_l64( XPTR( client_cxy , &desc->args[1] ) );
    2531 
    2532     // call local kernel function
    2533     error = mapper_handle_miss( mapper,
    2534                                 page_id,
    2535                                 &page_xp );
    2536 
    2537     // set output argument to client RPC descriptor
    2538     hal_remote_s64( XPTR( client_cxy , &desc->args[2] ) , (uint64_t)page_xp );
    2539     hal_remote_s64( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)error );
    2540 
    2541 #if DEBUG_RPC_MAPPER_HANDLE_MISS
    2542 cycle = (uint32_t)hal_get_cycles();
    2543 if( cycle > DEBUG_RPC_MAPPER_HANDLE_MISS )
    2544 printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n",
    2545 __FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle );
    2546 #endif
    2547 }
    2548 */
    2549 
    2550 /////////////////////////////////////////////////////////////////////////////////////////
    2551 // [26]  Marshaling functions attached to RPC_VMM_DELETE_VSEG
    2552 /////////////////////////////////////////////////////////////////////////////////////////
    2553 
    2554 //////////////////////////////////////////////////
    2555 void rpc_vmm_delete_vseg_client( cxy_t        cxy,
    2556                                  pid_t        pid,
    2557                                  intptr_t     vaddr )
    2558 {
    2559 #if DEBUG_RPC_VMM_DELETE_VSEG
     2450    process  = (process_t *)(intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) );
     2451    vseg     = (vseg_t    *)(intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[1] ) );
     2452    new_base =              (intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[2] ) );
     2453    new_size =              (intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[3] ) );
     2454
     2455    // call relevant kernel function
     2456    vmm_resize_vseg( process,
     2457                     vseg,
     2458                     new_base,
     2459                     new_size );
     2460
     2461#if DEBUG_RPC_VMM_RESIZE_VSEG
     2462cycle = (uint32_t)hal_get_cycles();
     2463if( cycle > DEBUG_RPC_VMM_RESIZE_VSEG )
     2464printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n",
     2465__FUNCTION__, this->process->pid, this->trdid, this->core->lid , cycle );
     2466#endif
     2467}
     2468
     2469
     2470/////////////////////////////////////////////////////////////////////////////////////////
     2471// [26]  Marshaling functions attached to RPC_VMM_REMOVE_VSEG
     2472/////////////////////////////////////////////////////////////////////////////////////////
     2473
     2474/////////////////////////////////////////////////
     2475void rpc_vmm_remove_vseg_client( cxy_t       cxy,
     2476                                 process_t * process,
     2477                                 vseg_t    * vseg )
     2478{
     2479#if DEBUG_RPC_VMM_REMOVE_VSEG
    25602480thread_t * this  = CURRENT_THREAD;
    25612481uint32_t   cycle = (uint32_t)hal_get_cycles();
    2562 if( cycle > DEBUG_RPC_VMM_DELETE_VSEG )
     2482if( cycle > DEBUG_RPC_VMM_REMOVE_VSEG )
    25632483printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n",
    25642484__FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle );
     
    25692489
    25702490    // initialise RPC descriptor header
    2571     rpc.index    = RPC_VMM_DELETE_VSEG;
     2491    rpc.index    = RPC_VMM_REMOVE_VSEG;
    25722492    rpc.blocking = true;
    25732493    rpc.rsp      = &responses;
    25742494
    25752495    // set input arguments in RPC descriptor
    2576     rpc.args[0] = (uint64_t)pid;
    2577     rpc.args[1] = (uint64_t)vaddr;
     2496    rpc.args[0] = (uint64_t)(intptr_t)process;
     2497    rpc.args[1] = (uint64_t)(intptr_t)vseg;
    25782498
    25792499    // register RPC request in remote RPC fifo
    25802500    rpc_send( cxy , &rpc );
    25812501
    2582 #if DEBUG_RPC_VMM_DELETE_VSEG
    2583 cycle = (uint32_t)hal_get_cycles();
    2584 if( cycle > DEBUG_RPC_VMM_DELETE_VSEG )
     2502#if DEBUG_RPC_VMM_REMOVE_VSEG
     2503cycle = (uint32_t)hal_get_cycles();
     2504if( cycle > DEBUG_RPC_VMM_REMOVE_VSEG )
    25852505printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n",
    25862506__FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle );
     
    25892509
    25902510////////////////////////////////////////////
    2591 void rpc_vmm_delete_vseg_server( xptr_t xp )
    2592 {
    2593 #if DEBUG_RPC_VMM_DELETE_VSEG
    2594 uint32_t cycle = (uint32_t)hal_get_cycles();
    2595 thread_t * this = CURRENT_THREAD;
    2596 if( DEBUG_RPC_VMM_DELETE_VSEG < cycle )
    2597 printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n",
    2598 __FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle );
    2599 #endif
     2511void rpc_vmm_remove_vseg_server( xptr_t xp )
     2512{
     2513#if DEBUG_RPC_VMM_REMOVE_VSEG
     2514uint32_t cycle = (uint32_t)hal_get_cycles();
     2515thread_t * this = CURRENT_THREAD;
     2516if( DEBUG_RPC_VMM_REMOVE_VSEG < cycle )
     2517printk("\n[%s] thread[%x,%x] on core %d enter / cycle %d\n",
     2518__FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle );
     2519#endif
     2520
     2521    process_t * process;
     2522    vseg_t    * vseg;
    26002523
    26012524    // get client cluster identifier and pointer on RPC descriptor
     
    26042527
    26052528    // get arguments from RPC descriptor
    2606     pid_t    pid   = (pid_t)   hal_remote_l64( XPTR(client_cxy , &desc->args[0]) );
    2607     intptr_t vaddr = (intptr_t)hal_remote_l64( XPTR(client_cxy , &desc->args[1]) );
     2529    process  = (process_t *)(intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[0] ) );
     2530    vseg     = (vseg_t    *)(intptr_t)hal_remote_l64( XPTR( client_cxy , &desc->args[1] ) );
    26082531
    26092532    // call relevant kernel function
    2610     vmm_delete_vseg( pid , vaddr );
    2611 
    2612 #if DEBUG_RPC_VMM_DELETE_VSEG
    2613 cycle = (uint32_t)hal_get_cycles();
    2614 if( DEBUG_RPC_VMM_DELETE_VSEG < cycle )
     2533    vmm_remove_vseg( process,
     2534                     vseg );
     2535
     2536#if DEBUG_RPC_VMM_REMOVE_VSEG
     2537cycle = (uint32_t)hal_get_cycles();
     2538if( DEBUG_RPC_VMM_REMOVE_VSEG < cycle )
    26152539printk("\n[%s] thread[%x,%x] on core %d exit / cycle %d\n",
    26162540__FUNCTION__, this->process->pid, this->trdid, this->core->lid, cycle );
  • trunk/kernel/kern/rpc.h

    r635 r640  
    6060typedef enum
    6161{
    62     RPC_UNDEFINED_0               = 0,   // RPC_PMEM_GET_PAGES       deprecated [AG]
    63     RPC_UNDEFINED_1               = 1,   // RPC_PMEM_RELEASE_PAGES   deprecated [AG]
    64     RPC_UNDEFINED_2               = 2,   // RPC_PMEM_DISPLAY         deprecated [AG]     
     62    RPC_UNDEFINED_0               = 0,   //
     63    RPC_UNDEFINED_1               = 1,   //
     64    RPC_UNDEFINED_2               = 2,   //      
    6565    RPC_PROCESS_MAKE_FORK         = 3,
    6666    RPC_USER_DIR_CREATE           = 4,
     
    8282    RPC_VFS_INODE_LOAD_ALL_PAGES  = 19,
    8383
    84     RPC_VMM_GET_VSEG              = 20,
    85     RPC_VMM_GLOBAL_UPDATE_PTE     = 21,
    86     RPC_UNDEFINED_22              = 22,   // RPC_KCM_ALLOC           deprecated [AG]
    87     RPC_UNDEFINED_23              = 23,   // RPC_KCM_FREE            deprecated [AG]
     84    RPC_UNDEFINED_20              = 20,   //
     85    RPC_UNDEFINED_21              = 21,   //
     86    RPC_UNDEFINED_22              = 22,   //
     87    RPC_UNDEFINED_23              = 23,   //
    8888    RPC_MAPPER_SYNC               = 24,
    89     RPC_UNDEFUNED_25              = 25,   // RPC_MAPPER_HANDLE_MISS  deprecated [AG]
    90     RPC_VMM_DELETE_VSEG           = 26,
     89    RPC_VMM_RESIZE_VSEG           = 25,
     90    RPC_VMM_REMOVE_VSEG           = 26,
    9191    RPC_VMM_CREATE_VSEG           = 27,
    9292    RPC_VMM_SET_COW               = 28,
    93     RPC_UNDEFINED_29              = 29,   // RPC_VMM_DISPLAY         deprecated [AG]
     93    RPC_UNDEFINED_29              = 29,   //
    9494
    9595    RPC_MAX_INDEX                 = 30,
     
    175175
    176176/***********************************************************************************
    177  * [0] The RPC_PMEM_GET_PAGES allocates one or several pages in a remote cluster,
    178  * and returns the local pointer on the page descriptor.
    179  *         deprecated [AG] may 2019
    180  ***********************************************************************************
    181  * @ cxy     : server cluster identifier
    182  * @ order   : [in]  ln2( number of requested pages )
    183  * @ page    : [out] local pointer on page descriptor / NULL if failure
    184  **********************************************************************************/
    185 
    186 /*
    187 void rpc_pmem_get_pages_client( cxy_t             cxy,
    188                                 uint32_t          order,
    189                                 struct page_s  ** page );
    190 
    191 void rpc_pmem_get_pages_server( xptr_t xp );
    192 */
    193 
    194 /***********************************************************************************
    195  * [1] The RPC_PMEM_RELEASE_PAGES release one or several pages to a remote cluster.
    196  *         deprecated [AG] may 2019
    197  ***********************************************************************************
    198  * @ cxy     : server cluster identifier
    199  * @ page    : [in] local pointer on page descriptor to release.
    200  **********************************************************************************/
    201 
    202 /*
    203 void rpc_pmem_release_pages_client( cxy_t            cxy,
    204                                     struct page_s  * page );
    205 
    206 void rpc_pmem_release_pages_server( xptr_t xp );
    207 */
    208 
    209 /***********************************************************************************
    210  * [2] The RPC_PPM_DISPLAY allows any client thread to require any remote cluster
    211  * identified by the <cxy> argumentto display the physical memory allocator state.
    212  *         deprecated [AG] may 2019
    213  **********************************************************************************/
    214 
    215 /*
    216 void rpc_ppm_display_client( cxy_t  cxy );
    217 
    218 void rpc_ppm_display_server( xptr_t xp );
    219 */
     177 * [0] undefined
     178 **********************************************************************************/
     179
     180/***********************************************************************************
     181 * [1] undefined
     182 **********************************************************************************/
     183
     184/***********************************************************************************
     185 * [2] undefined
     186 **********************************************************************************/
    220187
    221188/***********************************************************************************
     
    523490
    524491/***********************************************************************************
    525  * [20] The RPC_VMM_GET_VSEG returns an extended pointer
    526  * on the vseg containing a given virtual address in a given process.
    527  * The server cluster is supposed to be the reference cluster.
    528  * It returns a non zero error value if no vseg has been founded.
    529  ***********************************************************************************
    530  * @ cxy     : server cluster identifier.
    531  * @ process : [in]   pointer on process descriptor in server cluster.
    532  * @ vaddr   : [in]   virtual address to be searched.
    533  * @ vseg_xp : [out]  buffer for extended pointer on vseg in client cluster.
    534  * @ error   : [out] local pointer on buffer for error code (in client cluster).
    535  **********************************************************************************/
    536 void rpc_vmm_get_vseg_client( cxy_t              cxy,
    537                               struct process_s * process,
    538                               intptr_t           vaddr,
    539                               xptr_t           * vseg_xp,
    540                               error_t          * error );
    541 
    542 void rpc_vmm_get_vseg_server( xptr_t xp );
    543 
    544 /***********************************************************************************
    545  * [21] The RPC_VMM_GLOBAL_UPDATE_PTE can be used by a thread that is not running
    546  * in reference cluster, to ask the reference cluster to update a specific entry,
    547  * identified by the <vpn> argument in all GPT copies of a process identified by
    548  * the <process> argument, using the values defined by <attr> and <ppn> arguments.
    549  * The server cluster is supposed to be the reference cluster.
    550  * It does not return any error code as the called function vmm_global_update_pte()
    551  * cannot fail.
    552  ***********************************************************************************
    553  * @ cxy     : server cluster identifier.
    554  * @ process : [in]  pointer on process descriptor in server cluster.
    555  * @ vpn     : [in]  virtual address to be searched.
    556  * @ attr    : [in]  PTE attributes.
    557  * @ ppn     : [it]  PTE PPN.
    558  **********************************************************************************/
    559 void rpc_vmm_global_update_pte_client( cxy_t              cxy,
    560                                        struct process_s * process,
    561                                        vpn_t              vpn,
    562                                        uint32_t           attr,
    563                                        ppn_t              ppn );
    564 
    565 void rpc_vmm_global_update_pte_server( xptr_t xp );
    566 
    567 /***********************************************************************************
    568  * [22] The RPC_KCM_ALLOC allocates memory from a given KCM in a remote cluster,
    569  * and returns an extended pointer on the allocated object.
    570   It returns XPTR_NULL if physical memory cannot be allocated.
    571  ***********************************************************************************
    572  * @ cxy       : server cluster identifier.
    573  * @ kmem_type : [in]  KCM object type (as defined in kmem.h).
    574  * @ buf_xp    : [out] buffer for extended pointer on allocated buffer.
    575  **********************************************************************************/
    576 
    577 /*
    578 void rpc_kcm_alloc_client( cxy_t      cxy,
    579                            uint32_t   kmem_type,
    580                            xptr_t   * buf_xp ); 
    581 
    582 void rpc_kcm_alloc_server( xptr_t xp );
    583 */
    584 
    585 /***********************************************************************************
    586  * [23] The RPC_KCM_FREE releases memory allocated for a KCM object of a given type,
    587  * in a remote cluster.
    588  ***********************************************************************************
    589  * @ cxy       : server cluster identifier.
    590  * @ buf       : [in] local pointer on allocated buffer.
    591  * @ kmem_type : [in]  KCM object type (as defined in kmem.h).
    592  **********************************************************************************/
    593 
    594 /*
    595 void rpc_kcm_free_client( cxy_t     cxy,
    596                           void    * buf,
    597                           uint32_t  kmem_type );
    598 
    599 void rpc_kcm_free_server( xptr_t xp );
    600 */
     492 * [20] undefined
     493 **********************************************************************************/
     494
     495/***********************************************************************************
     496 * [21] undefined
     497 **********************************************************************************/
     498
     499/***********************************************************************************
     500 * [22] undefined
     501 **********************************************************************************/
     502
     503/***********************************************************************************
     504 * [23] undefined
     505 **********************************************************************************/
    601506
    602507/***********************************************************************************
     
    615520
    616521/***********************************************************************************
    617  * [25] The RPC__MAPPER_HANDLE_MISS allows a client thread to request a remote
    618  * mapper to load a missing page from the IOC device.
    619  * On the server side, this RPC call the mapper_handle_miss() function and return
    620  * an extended pointer on the allocated page descriptor and an error status.
     522 * [25] The RPC_VMM_RESIZE_VSEG allows a client thread to request a remote vseg
     523 * resize. Both the VSL and the GPT are updated in the remote cluster.
    621524 ***********************************************************************************
    622525 * @ cxy         : server cluster identifier.
    623  * @ mapper      : [in]  local pointer on mapper.
    624  * @ page_id     : [in]  missing page index in mapper
    625  * @ buffer      : [in]  user space pointer / kernel extended pointer
    626  * @ page_xp     : [out] pointer on buffer for extended pointer on page descriptor.
    627  * @ error       : [out] error status (0 if success).
    628  **********************************************************************************/
    629 /*
    630 void rpc_mapper_handle_miss_client( cxy_t             cxy,
    631                                     struct mapper_s * mapper,
    632                                     uint32_t          page_id,
    633                                     xptr_t          * page_xp,
    634                                     error_t         * error );
     526 * @ process     : [in] local pointer on remote process.
     527 * @ vseg        : [in] local pointer on remote vseg.
     528 * @ new_base    : [in] new vseg base address.
     529 * @ new_size    : [in] new vseg size.
     530 **********************************************************************************/
     531void rpc_vmm_resize_vseg_client( cxy_t              cxy,
     532                                 struct process_s * process,
     533                                 struct vseg_s    * vseg,
     534                                 intptr_t           new_base,
     535                                 intptr_t           new_size );
    635536 
    636 void rpc_mapper_handle_miss_server( xptr_t xp );
    637 */
    638 /***********************************************************************************
    639  * [26] The RPC_VMM_DELETE_VSEG allows any client thread  to request a remote
    640  * cluster to delete from a given VMM, identified by the <pid> argument
    641  * a given vseg, identified by the <vaddr> argument.
     537void rpc_vmm_resize_vseg_server( xptr_t xp );
     538
     539/***********************************************************************************
     540 * [26] The RPC_VMM_REMOVE_VSEG allows a client thread  to request a remote vseg
     541 * delete. Bothe the VSL and the GPT are updated in the remote cluster.
    642542 ***********************************************************************************
    643543 * @ cxy         : server cluster identifier.
    644  * @ pid         : [in] target process identifier.
    645  * @ vaddr       : [in] vseg base address.
    646  **********************************************************************************/
    647 void rpc_vmm_delete_vseg_client( cxy_t       cxy,
    648                                  pid_t       pid,
    649                                  intptr_t    vaddr );
     544 * @ process     : [in] local pointer on remote process.
     545 * @ vseg        : [in] local pointer on remote vseg.
     546 **********************************************************************************/
     547void rpc_vmm_remove_vseg_client( cxy_t              cxy,
     548                                 struct process_s * process,
     549                                 struct vseg_s    * vseg );
    650550 
    651 void rpc_vmm_delete_vseg_server( xptr_t xp );
     551void rpc_vmm_remove_vseg_server( xptr_t xp );
    652552
    653553/***********************************************************************************
     
    698598
    699599/***********************************************************************************
    700  * [29] The RPC_VMM_DISPLAY allows any client thread to display the VMM state
    701  * of a remote reference process identified by the <cxy> and <process> arguments.
    702  * The type of display is defined by the <detailed> boolean argument.
    703  ***********************************************************************************
    704  * @ cxy         : server cluster identifier.
    705  * @ process     : [in]  local pointer on reference process descriptor.
    706  * @ detailed    : [in]  detailed display if true.
    707  **********************************************************************************/
    708 
    709 /*
    710 void rpc_hal_vmm_display_client( cxy_t              cxy,
    711                              struct process_s * process,
    712                              bool_t             detailed );
    713 
    714 void rpc_hal_vmm_display_server( xptr_t xp );
    715 */
     600 * [29] undefined
     601 **********************************************************************************/
     602
    716603
    717604#endif
  • trunk/kernel/kern/scheduler.c

    r635 r640  
    507507#if (DEBUG_SCHED_YIELD & 0x1)
    508508if( sched->trace || (cycle > DEBUG_SCHED_YIELD) )
    509 sched_display( lid );
     509sched_remote_display( local_cxy , lid );
    510510#endif
    511511
     
    593593}  // end sched_yield()
    594594
    595 
    596 ///////////////////////////////
    597 void sched_display( lid_t lid )
    598 {
    599     list_entry_t * iter;
    600     thread_t     * thread;
    601 
    602     core_t       * core    = &LOCAL_CLUSTER->core_tbl[lid];
    603     scheduler_t  * sched   = &core->scheduler;
    604    
    605     // get pointers on TXT0 chdev
    606     xptr_t    txt0_xp  = chdev_dir.txt_tx[0];
    607     cxy_t     txt0_cxy = GET_CXY( txt0_xp );
    608     chdev_t * txt0_ptr = GET_PTR( txt0_xp );
    609 
    610     // get extended pointer on remote TXT0 lock
    611     xptr_t  lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
    612 
    613     // get TXT0 lock
    614     remote_busylock_acquire( lock_xp );
    615 
    616     nolock_printk("\n***** threads on core[%x,%d] / current %x / rpc_threads %d / cycle %d\n",
    617     local_cxy , lid, sched->current, LOCAL_CLUSTER->rpc_threads[lid],
    618     (uint32_t)hal_get_cycles() );
    619 
    620     // display kernel threads
    621     LIST_FOREACH( &sched->k_root , iter )
    622     {
    623         thread = LIST_ELEMENT( iter , thread_t , sched_list );
    624         if (thread->type == THREAD_DEV)
    625         {
    626             nolock_printk(" - %s / pid %X / trdid %X / desc %X / block %X / flags %X / %s\n",
    627             thread_type_str( thread->type ), thread->process->pid, thread->trdid,
    628             thread, thread->blocked, thread->flags, thread->chdev->name );
    629         }
    630         else
    631         {
    632             nolock_printk(" - %s / pid %X / trdid %X / desc %X / block %X / flags %X\n",
    633             thread_type_str( thread->type ), thread->process->pid, thread->trdid,
    634             thread, thread->blocked, thread->flags );
    635         }
    636     }
    637 
    638     // display user threads
    639     LIST_FOREACH( &sched->u_root , iter )
    640     {
    641         thread = LIST_ELEMENT( iter , thread_t , sched_list );
    642         nolock_printk(" - %s / pid %X / trdid %X / desc %X / block %X / flags %X\n",
    643         thread_type_str( thread->type ), thread->process->pid, thread->trdid,
    644         thread, thread->blocked, thread->flags );
    645     }
    646 
    647     // release TXT0 lock
    648     remote_busylock_release( lock_xp );
    649 
    650 }  // end sched_display()
    651595
    652596/////////////////////////////////////
     
    684628    nolock_printk("\n***** threads on core[%x,%d] / current %x / rpc_threads %d / cycle %d\n",
    685629    cxy , lid, current, rpcs, (uint32_t)hal_get_cycles() );
     630    nolock_printk("  type | pid        | trdid      | desc       | block      | flags      | func\n");
    686631
    687632    // display kernel threads
     
    706651            hal_remote_strcpy( XPTR( local_cxy , name ), XPTR( cxy , chdev->name ) );
    707652
    708             nolock_printk(" - %s / pid %X / trdid %X / desc %X / block %X / flags %X / %s\n",
     653            nolock_printk(" - %s | %X | %X | %X | %X | %X | %s\n",
    709654            thread_type_str( type ), pid, trdid, thread, blocked, flags, name );
    710655        }
    711656        else
    712657        {
    713             nolock_printk(" - %s / pid %X / trdid %X / desc %X / block %X / flags %X\n",
     658            nolock_printk(" - %s | %X | %X | %X | %X | %X |\n",
    714659            thread_type_str( type ), pid, trdid, thread, blocked, flags );
    715660        }
     
    732677        process_t *   process = hal_remote_lpt ( XPTR( cxy , &thread->process ) );
    733678        pid_t         pid     = hal_remote_l32 ( XPTR( cxy , &process->pid ) );
    734 
    735         nolock_printk(" - %s / pid %X / trdid %X / desc %X / block %X / flags %X\n",
    736         thread_type_str( type ), pid, trdid, thread, blocked, flags );
     679        void      *   func    = hal_remote_lpt ( XPTR( cxy , &thread->entry_func ) );
     680
     681        nolock_printk(" - %s | %X | %X | %X | %X | %X | %x\n",
     682        thread_type_str( type ), pid, trdid, thread, blocked, flags, (uint32_t)func );
    737683
    738684        // get next user thread list_entry
  • trunk/kernel/kern/scheduler.h

    r637 r640  
    8989
    9090/*********************************************************************************************
    91  * This debug function displays on TXT0 the internal state of a local scheduler,
    92  * identified by the core local index <lid>. It must be called by a local thread.
    93  *********************************************************************************************
    94  * @ lid      : local index of target core.
    95  ********************************************************************************************/
    96 void sched_display( lid_t lid );
    97 
    98 /*********************************************************************************************
    9991 * This debug function displays on TXT0 the internal state of a scheduler,
    10092 * identified by the target cluster identifier <cxy> and the core local index <lid>.
  • trunk/kernel/kern/thread.c

    r637 r640  
    890890    core_t        * core    = thread->core;
    891891
    892 #if DEBUG_THREAD_DESTROY
    893 uint32_t   cycle = (uint32_t)hal_get_cycles();
     892
     893#if DEBUG_THREAD_DESTROY || CONFIG_INSTRUMENTATION_PGFAULTS
     894uint32_t   cycle;
    894895thread_t * this  = CURRENT_THREAD;
     896#endif
     897
     898#if (DEBUG_THREAD_DESTROY & 1)
     899cycle = (uint32_t)hal_get_cycles();
    895900if( DEBUG_THREAD_DESTROY < cycle )
    896901printk("\n[%s] thread[%x,%x] enter to destroy thread[%x,%x] / cycle %d\n",
     
    902907
    903908#if CONFIG_INSTRUMENTATION_PGFAULTS
    904         process->vmm.false_pgfault_nr    += thread->info.false_pgfault_nr;
    905         process->vmm.local_pgfault_nr    += thread->info.local_pgfault_nr;
    906         process->vmm.global_pgfault_nr   += thread->info.global_pgfault_nr;
    907         process->vmm.false_pgfault_cost  += thread->info.false_pgfault_cost;
    908         process->vmm.local_pgfault_cost  += thread->info.local_pgfault_cost;
    909         process->vmm.global_pgfault_cost += thread->info.global_pgfault_cost;
     909process->vmm.false_pgfault_nr    += thread->info.false_pgfault_nr;
     910process->vmm.local_pgfault_nr    += thread->info.local_pgfault_nr;
     911process->vmm.global_pgfault_nr   += thread->info.global_pgfault_nr;
     912process->vmm.false_pgfault_cost  += thread->info.false_pgfault_cost;
     913process->vmm.local_pgfault_cost  += thread->info.local_pgfault_cost;
     914process->vmm.global_pgfault_cost += thread->info.global_pgfault_cost;
     915#endif
     916
     917#if (CONFIG_INSTRUMENTATION_PGFAULTS & 1)
     918uint32_t false_nr    = thread->info.false_pgfault_nr;
     919uint32_t local_nr    = thread->info.local_pgfault_nr;
     920uint32_t global_nr   = thread->info.global_pgfault_nr;
     921uint32_t false_cost  = thread->info.false_pgfault_cost;
     922uint32_t local_cost  = thread->info.local_pgfault_cost;
     923uint32_t global_cost = thread->info.global_pgfault_cost;
     924printk("***** thread[%x,%x] page-faults\n"
     925       " - false    %d ( %d cycles )\n"
     926       " - local    %d ( %d cycles )\n"
     927       " - global   %d ( %d cycles )\n",
     928       this->process->pid, this->trdid,
     929       false_nr , false_cost / false_nr,
     930       local_nr , local_cost / local_nr,
     931       global_nr, global_cost / global_nr );
    910932#endif
    911933
     
    12701292cycle = (uint32_t)hal_get_cycles();
    12711293if( DEBUG_THREAD_IDLE < cycle )
    1272 sched_display( CURRENT_THREAD->core->lid );
     1294sched_remote_display( local_cxy , CURRENT_THREAD->core->lid );
    12731295#endif     
    12741296        // search a runable thread
  • trunk/kernel/kernel_config.h

    r637 r640  
    9696#define DEBUG_HAL_GPT_CREATE              0
    9797#define DEBUG_HAL_GPT_DESTROY             0
    98 #define DEBUG_HAL_GPT_LOCK_PTE            0
     98#define DEBUG_HAL_GPT_LOCK_PTE               2
    9999#define DEBUG_HAL_GPT_SET_COW             0
    100100#define DEBUG_HAL_GPT_SET_PTE             0
     
    259259#define DEBUG_VMM_GET_ONE_PPN             0
    260260#define DEBUG_VMM_GET_PTE                 0
     261#define DEBUG_VMM_GLOBAL_DELETE_VSEG      0
     262#define DEBUG_VMM_GLOBAL_RESIZE_VSEG      0
    261263#define DEBUG_VMM_HANDLE_PAGE_FAULT       0
    262264#define DEBUG_VMM_HANDLE_COW              0
     
    309311#define LOCK_PROCESS_FDARRAY  27   // remote (Q)  protect array of open files in owner process
    310312#define LOCK_PROCESS_DIR      28   // remote (Q)  protect xlist of open directories in process
     313#define LOCK_VMM_VSL          29   // remote (Q)  protect VSL (local list of vsegs)
    311314
    312315#define LOCK_PROCESS_THTBL    30   // local  (RW) protect local array of threads in a process
     
    315318#define LOCK_VFS_SIZE         32   // remote (RW) protect inode state and associated mapper
    316319#define LOCK_VFS_FILE         33   // remote (RW) protect file descriptor state
    317 #define LOCK_VMM_VSL          34   // remote (RW) protect VSL (local list of vsegs)
    318 #define LOCK_VFS_MAIN         35   // remote (RW) protect vfs traversal (in root inode)
    319 #define LOCK_FATFS_FAT        36   // remote (RW) protect exclusive access to the FATFS FAT
     320#define LOCK_VFS_MAIN         34   // remote (RW) protect vfs traversal (in root inode)
     321#define LOCK_FATFS_FAT        35   // remote (RW) protect exclusive access to the FATFS FAT
    320322
    321323////////////////////////////////////////////////////////////////////////////////////////////
     
    462464#define CONFIG_INSTRUMENTATION_PGFAULTS    0
    463465#define CONFIG_INSTRUMENTATION_FOOTPRINT   0
     466#define CONFIG_INSTRUMENTATION_GPT         1
    464467
    465468
  • trunk/kernel/libk/remote_barrier.h

    r623 r640  
    5656 *    If the (x_size, y_size, nthreads) arguments are defined in the barrier attributes,
    5757 *    the barrier is implemented as a hierarchical quad-tree covering all clusters in the
    58  *    (x_size * ysize) mesh, including cluster (0,0), with nthreads per cluster, and called
    59  *    DQT : Distributed Quad Tree. This DQT implementation supposes a regular architecture,
    60                      uint32_t arity = hal_remote_l32( XPTR( node_cxy , &node_ptr->arity       ));
     58 *    (x_size * ysize) mesh, including cluster (0,0), with nthreads per cluster.
     59 *    This DQT (Distributed Quad Tree) implementation assumes a regular architecture,
    6160 *    and a strong contraint on the threads placement: exactly "nthreads" threads per
    6261 *    cluster in the (x_size * y_size) mesh.
     
    7776 * It is implemented in the reference process cluster, and contains
    7877 * - the barrier identifier,
    79  * - the implementation type (simple or QDT),
     78 * - the implementation type (simple or dqt),
    8079 * - an xlist implementing the set of barriers dynamically created by a given process,
    8180 * - a pointer on the implementation specific descriptor (simple_barrier / sqt_barrier).
  • trunk/kernel/libk/user_dir.c

    r635 r640  
    294294
    295295            // delete the vseg
    296             if( ref_cxy == local_cxy)
    297                 vmm_delete_vseg( ref_pid, vpn_base << CONFIG_PPM_PAGE_SHIFT );
    298             else
    299                 rpc_vmm_delete_vseg_client( ref_cxy, ref_pid, vpn_base << CONFIG_PPM_PAGE_SHIFT );
    300 
     296            if( ref_cxy == local_cxy)  vmm_remove_vseg( ref_ptr, vseg );
     297            else                       rpc_vmm_remove_vseg_client( ref_cxy, ref_ptr, vseg );
     298         
    301299            // release the user_dir descriptor
    302300            req.type = KMEM_KCM;
     
    459457    rpc.rsp       = &responses;
    460458    rpc.blocking  = false;
    461     rpc.index     = RPC_VMM_DELETE_VSEG;
     459    rpc.index     = RPC_VMM_REMOVE_VSEG;
    462460    rpc.thread    = this;
    463461    rpc.lid       = this->core->lid;
  • trunk/kernel/mm/vmm.c

    r635 r640  
    3232#include <printk.h>
    3333#include <memcpy.h>
    34 #include <remote_rwlock.h>
    3534#include <remote_queuelock.h>
    3635#include <list.h>
     
    313312
    314313    // initialize the lock protecting the VSL
    315         remote_rwlock_init( XPTR( local_cxy , &vmm->vsl_lock ) , LOCK_VMM_VSL );
     314        remote_queuelock_init( XPTR( local_cxy , &vmm->vsl_lock ) , LOCK_VMM_VSL );
    316315
    317316
     
    425424
    426425    // take the VSL lock
    427         remote_rwlock_wr_acquire( lock_xp );
     426        remote_queuelock_acquire( lock_xp );
    428427
    429428    // scan the VSL to delete all non kernel vsegs
     
    474473
    475474    // release the VSL lock
    476         remote_rwlock_wr_release( lock_xp );
     475        remote_queuelock_release( lock_xp );
    477476
    478477// FIXME il faut gérer les process copies...
     
    491490
    492491}  // end vmm_user_reset()
     492
     493/////////////////////////////////////////////////
     494void vmm_global_delete_vseg( process_t * process,
     495                             intptr_t    base )
     496{
     497    pid_t           pid;
     498    cxy_t           owner_cxy;
     499    lpid_t          owner_lpid;
     500
     501    xlist_entry_t * process_root_ptr;
     502    xptr_t          process_root_xp;
     503    xptr_t          process_iter_xp;
     504
     505    xptr_t          remote_process_xp;
     506    cxy_t           remote_process_cxy;
     507    process_t     * remote_process_ptr;
     508
     509    xptr_t          vsl_root_xp;
     510    xptr_t          vsl_lock_xp;
     511    xptr_t          vsl_iter_xp;
     512
     513#if DEBUG_VMM_GLOBAL_DELETE_VSEG
     514uint32_t cycle = (uint32_t)hal_get_cycles();
     515thread_t * this = CURRENT_THREAD;
     516#endif
     517
     518#if (DEBUG_VMM_GLOBAL_DELETE_VSEG & 1)
     519if( DEBUG_VMM_GLOBAL_DELETE_VSEG < cycle )
     520printk("\n[%s] thread[%x,%x] : process %x / base %x / cycle %d\n",
     521__FUNCTION__, this->process->pid, this->trdid, process->pid, base, cycle );
     522#endif
     523
     524    // get owner process cluster and local index
     525    pid              = process->pid;
     526    owner_cxy        = CXY_FROM_PID( pid );
     527    owner_lpid       = LPID_FROM_PID( pid );
     528
     529    // get extended pointer on root of process copies xlist in owner cluster
     530    process_root_ptr = &LOCAL_CLUSTER->pmgr.copies_root[owner_lpid];
     531    process_root_xp  = XPTR( owner_cxy , process_root_ptr );
     532
     533    // loop on process copies
     534    XLIST_FOREACH( process_root_xp , process_iter_xp )
     535    {
     536        // get cluster and local pointer on remote process
     537        remote_process_xp  = XLIST_ELEMENT( process_iter_xp , process_t , copies_list );
     538        remote_process_ptr = GET_PTR( remote_process_xp );
     539        remote_process_cxy = GET_CXY( remote_process_xp );
     540
     541        // build extended pointers on remote VSL root and lock
     542        vsl_root_xp = XPTR( remote_process_cxy , &remote_process_ptr->vmm.vsegs_root );
     543        vsl_lock_xp = XPTR( remote_process_cxy , &remote_process_ptr->vmm.vsl_lock );
     544
     545        // get lock on remote VSL
     546        remote_queuelock_acquire( vsl_lock_xp );
     547
     548        // loop on vsegs in remote process VSL
     549        XLIST_FOREACH( vsl_root_xp , vsl_iter_xp )
     550        {
     551            // get pointers on current vseg
     552            xptr_t   vseg_xp  = XLIST_ELEMENT( vsl_iter_xp , vseg_t , xlist );
     553            vseg_t * vseg_ptr = GET_PTR( vseg_xp );
     554
     555            // get current vseg base address
     556            intptr_t vseg_base = (intptr_t)hal_remote_lpt( XPTR( remote_process_cxy,
     557                                                                 &vseg_ptr->min ) );
     558
     559            if( vseg_base == base )   // found searched vseg
     560            {
     561                if( remote_process_cxy == local_cxy )
     562                {
     563                    vmm_remove_vseg( process,
     564                                     vseg_ptr );
     565                }
     566                else
     567                {
     568                    rpc_vmm_remove_vseg_client( remote_process_cxy,
     569                                                remote_process_ptr,
     570                                                vseg_ptr );
     571                }
     572
     573#if (DEBUG_VMM_GLOBAL_DELETE_VSEG & 1)
     574if( DEBUG_VMM_GLOBAL_DELETE_VSEG < cycle )
     575printk("\n[%s] thread[%x,%x] deleted vseg %x for process %x in cluster %x\n",
     576__FUNCTION__, this->process->pid, this->trdid, base, process->pid, remote_process_cxy );
     577#endif
     578
     579            }
     580        }  // end of loop on vsegs
     581
     582#if (DEBUG_VMM_GLOBAL_DELETE_VSEG & 1)
     583if( DEBUG_VMM_GLOBAL_DELETE_VSEG < cycle )
     584hal_vmm_display( remote_process_xp , false );
     585#endif
     586
     587        // release lock on remote VSL
     588        remote_queuelock_release( vsl_lock_xp );
     589
     590    }  // end of loop on process copies
     591
     592#if DEBUG_VMM_GLOBAL_DELETE_VSEG
     593cycle = (uint32_t)hal_get_cycles();
     594if( DEBUG_VMM_GLOBAL_DELETE_VSEG < cycle )
     595printk("\n[%s] thread[%x,%x] exit for process %x / base %x / cycle %d\n",
     596__FUNCTION__, this->process->pid, this->trdid, process->pid , base, cycle );
     597#endif
     598
     599}  // end vmm_global_delete_vseg()
     600
     601////////////////////////////////////////////////
     602void vmm_global_resize_vseg( process_t * process,
     603                             intptr_t    base,
     604                             intptr_t    new_base,
     605                             intptr_t    new_size )
     606{
     607    pid_t           pid;
     608    cxy_t           owner_cxy;
     609    lpid_t          owner_lpid;
     610
     611    xlist_entry_t * process_root_ptr;
     612    xptr_t          process_root_xp;
     613    xptr_t          process_iter_xp;
     614
     615    xptr_t          remote_process_xp;
     616    cxy_t           remote_process_cxy;
     617    process_t     * remote_process_ptr;
     618
     619    xptr_t          vsl_root_xp;
     620    xptr_t          vsl_lock_xp;
     621    xptr_t          vsl_iter_xp;
     622
     623#if DEBUG_VMM_GLOBAL_RESIZE_VSEG
     624uint32_t cycle = (uint32_t)hal_get_cycles();
     625thread_t * this = CURRENT_THREAD;
     626#endif
     627
     628#if (DEBUG_VMM_GLOBAL_RESIZE_VSEG & 1)
     629if( DEBUG_VMM_GLOBAL_RESIZE_VSEG < cycle )
     630printk("\n[%s] thread[%x,%x] : process %x / base %x / new_base %x / new_size %x / cycle %d\n",
     631__FUNCTION__, this->process->pid, this->trdid, process->pid, base, new_base, new_size, cycle );
     632#endif
     633
     634    // get owner process cluster and local index
     635    pid              = process->pid;
     636    owner_cxy        = CXY_FROM_PID( pid );
     637    owner_lpid       = LPID_FROM_PID( pid );
     638
     639    // get extended pointer on root of process copies xlist in owner cluster
     640    process_root_ptr = &LOCAL_CLUSTER->pmgr.copies_root[owner_lpid];
     641    process_root_xp  = XPTR( owner_cxy , process_root_ptr );
     642
     643    // loop on process copies
     644    XLIST_FOREACH( process_root_xp , process_iter_xp )
     645    {
     646        // get cluster and local pointer on remote process
     647        remote_process_xp  = XLIST_ELEMENT( process_iter_xp , process_t , copies_list );
     648        remote_process_ptr = GET_PTR( remote_process_xp );
     649        remote_process_cxy = GET_CXY( remote_process_xp );
     650
     651        // build extended pointers on remote VSL root and lock
     652        vsl_root_xp = XPTR( remote_process_cxy , &remote_process_ptr->vmm.vsegs_root );
     653        vsl_lock_xp = XPTR( remote_process_cxy , &remote_process_ptr->vmm.vsl_lock );
     654
     655        // get lock on remote VSL
     656        remote_queuelock_acquire( vsl_lock_xp );
     657
     658        // loop on vsegs in remote process VSL
     659        XLIST_FOREACH( vsl_root_xp , vsl_iter_xp )
     660        {
     661            // get pointers on current vseg
     662            xptr_t   vseg_xp  = XLIST_ELEMENT( vsl_iter_xp , vseg_t , xlist );
     663            vseg_t * vseg_ptr = GET_PTR( vseg_xp );
     664
     665            // get current vseg base address
     666            intptr_t vseg_base = (intptr_t)hal_remote_lpt( XPTR( remote_process_cxy,
     667                                                                 &vseg_ptr->min ) );
     668
     669            if( vseg_base == base )   // found searched vseg
     670            {
     671                if( remote_process_cxy == local_cxy )
     672                {
     673                    vmm_resize_vseg( remote_process_ptr,
     674                                     vseg_ptr,
     675                                     new_base,
     676                                     new_size );
     677                }
     678                else
     679                {
     680                    rpc_vmm_resize_vseg_client( remote_process_cxy,
     681                                                remote_process_ptr,
     682                                                vseg_ptr,
     683                                                new_base,
     684                                                new_size );
     685                }
     686 
     687#if (DEBUG_VMM_GLOBAL_RESIZE_VSEG & 1)
     688if( DEBUG_VMM_GLOBAL_RESIZE_VSEG < cycle )
     689printk("\n[%s] thread[%x,%x] resized vseg %x for process %x in cluster %x\n",
     690__FUNCTION__, this->process->pid, this->trdid, base, process->pid, remote_process_cxy );
     691#endif
     692
     693            }
     694        }  // end of loop on vsegs
     695
     696#if (DEBUG_VMM_GLOBAL_RESIZE_VSEG & 1)
     697if( DEBUG_VMM_GLOBAL_RESIZE_VSEG < cycle )
     698hal_vmm_display( remote_process_xp , false );
     699#endif
     700
     701        // release lock on remote VSL
     702        remote_queuelock_release( vsl_lock_xp );
     703    }  // end of loop on process copies
     704
     705#if DEBUG_VMM_GLOBAL_RESIZE_VSEG
     706cycle = (uint32_t)hal_get_cycles();
     707if( DEBUG_VMM_GLOBAL_RESIZE_VSEG < cycle )
     708printk("\n[%s] thread[%x,%x] exit for process %x / base %x / cycle %d\n",
     709__FUNCTION__, this->process->pid, this->trdid, process->pid , base, cycle );
     710#endif
     711
     712}  // end vmm_global_resize_vseg()
    493713
    494714////////////////////////////////////////////////
     
    498718                            ppn_t       ppn )
    499719{
     720    pid_t           pid;
     721    cxy_t           owner_cxy;
     722    lpid_t          owner_lpid;
     723
    500724    xlist_entry_t * process_root_ptr;
    501725    xptr_t          process_root_xp;
     
    507731    xptr_t          remote_gpt_xp;
    508732
    509     pid_t           pid;
    510     cxy_t           owner_cxy;
    511     lpid_t          owner_lpid;
    512 
    513 #if DEBUG_VMM_UPDATE_PTE
     733#if DEBUG_VMM_GLOBAL_UPDATE_PTE
    514734uint32_t cycle = (uint32_t)hal_get_cycles();
    515735thread_t * this = CURRENT_THREAD;
    516 if( DEBUG_VMM_UPDATE_PTE < cycle )
     736#endif
     737
     738
     739#if (DEBUG_VMM_GLOBAL_UPDATE_PTE & 1)
     740if( DEBUG_VMM_GLOBAL_UPDATE_PTE < cycle )
    517741printk("\n[%s] thread[%x,%x] enter for process %x / vpn %x / attr %x / ppn %x / ycle %d\n",
    518742__FUNCTION__, this->process->pid, this->trdid, process->pid, vpn, attr, ppn, cycle );
    519743#endif
    520744
    521     // get extended pointer on root of process copies xlist in owner cluster
     745    // get owner process cluster and local index
    522746    pid              = process->pid;
    523747    owner_cxy        = CXY_FROM_PID( pid );
    524748    owner_lpid       = LPID_FROM_PID( pid );
     749
     750    // get extended pointer on root of process copies xlist in owner cluster
    525751    process_root_ptr = &LOCAL_CLUSTER->pmgr.copies_root[owner_lpid];
    526752    process_root_xp  = XPTR( owner_cxy , process_root_ptr );
    527753
    528 // check local cluster is owner cluster
    529 assert( (owner_cxy == local_cxy) , "must be called in owner cluster\n");
    530 
    531     // loop on destination process copies
     754    // loop on process copies
    532755    XLIST_FOREACH( process_root_xp , process_iter_xp )
    533756    {
     
    537760        remote_process_cxy = GET_CXY( remote_process_xp );
    538761
    539 #if (DEBUG_VMM_UPDATE_PTE & 1)
    540 if( DEBUG_VMM_UPDATE_PTE < cycle )
     762#if (DEBUG_VMM_GLOBAL_UPDATE_PTE & 1)
     763if( DEBUG_VMM_GLOBAL_UPDATE_PTE < cycle )
    541764printk("\n[%s] thread[%x,%x] handling vpn %x for process %x in cluster %x\n",
    542765__FUNCTION__, this->process->pid, this->trdid, vpn, process->pid, remote_process_cxy );
     
    550773    } 
    551774
    552 #if DEBUG_VMM_UPDATE_PTE
     775#if DEBUG_VMM_GLOBAL_UPDATE_PTE
    553776cycle = (uint32_t)hal_get_cycles();
    554 if( DEBUG_VMM_UPDATE_PTE < cycle )
     777if( DEBUG_VMM_GLOBAL_UPDATE_PTE < cycle )
    555778printk("\n[%s] thread[%x,%x] exit for process %x / vpn %x / cycle %d\n",
    556779__FUNCTION__, this->process->pid, this->trdid, process->pid , vpn , cycle );
    557780#endif
    558781
    559 #if (DEBUG_VMM_UPDATE_PTE & 1)
     782#if (DEBUG_VMM_GLOBAL_UPDATE_PTE & 1)
    560783hal_vmm_display( process , true );
    561784#endif
     
    772995    parent_lock_xp = XPTR( parent_cxy , &parent_vmm->vsl_lock );
    773996
    774     // take the lock protecting the parent VSL in read mode
    775     remote_rwlock_rd_acquire( parent_lock_xp );
     997    // take the lock protecting the parent VSL
     998    remote_queuelock_acquire( parent_lock_xp );
    776999
    7771000    // loop on parent VSL xlist
     
    8091032            vseg_init_from_ref( child_vseg , parent_vseg_xp );
    8101033
    811             // build extended pointer on VSL lock
    812             xptr_t lock_xp = XPTR( local_cxy , &child_vmm->vsl_lock );
     1034            // build extended pointer on child VSL lock
     1035            xptr_t child_lock_xp = XPTR( local_cxy , &child_vmm->vsl_lock );
    8131036 
    814             // take the VSL lock in write mode
    815             remote_rwlock_wr_acquire( lock_xp );
     1037            // take the child VSL lock
     1038            remote_queuelock_acquire( child_lock_xp );
    8161039
    8171040            // register child vseg in child VSL
    8181041            vmm_attach_vseg_to_vsl( child_vmm , child_vseg );
    8191042
    820             // release the VSL lock
    821             remote_rwlock_wr_release( lock_xp );
     1043            // release the child VSL lock
     1044            remote_queuelock_release( child_lock_xp );
    8221045
    8231046#if DEBUG_VMM_FORK_COPY
     
    8661089
    8671090    // release the parent VSL lock in read mode
    868     remote_rwlock_rd_release( parent_lock_xp );
     1091    remote_queuelock_release( parent_lock_xp );
    8691092
    8701093    // initialize the child VMM STACK allocator
     
    9391162
    9401163    // take the VSL lock
    941     remote_rwlock_wr_acquire( vsl_lock_xp );
     1164    remote_queuelock_acquire( vsl_lock_xp );
    9421165
    9431166    // scan the VSL to delete all registered vsegs
     
    9681191
    9691192    // release the VSL lock
    970     remote_rwlock_wr_release( vsl_lock_xp );
     1193    remote_queuelock_release( vsl_lock_xp );
    9711194
    9721195    // remove all registered MMAP vsegs
     
    10421265
    10431266}  // end vmm_check_conflict()
    1044 
    1045 
    10461267
    10471268////////////////////////////////////////////////
     
    10601281        error_t      error;
    10611282
     1283#if DEBUG_VMM_CREATE_VSEG
     1284thread_t * this  = CURRENT_THREAD;
     1285uint32_t   cycle;
     1286#endif
     1287
    10621288#if (DEBUG_VMM_CREATE_VSEG & 1)
    1063 thread_t * this  = CURRENT_THREAD;
    1064 uint32_t   cycle = (uint32_t)hal_get_cycles();
     1289cycle = (uint32_t)hal_get_cycles();
    10651290if( DEBUG_VMM_CREATE_VSEG < cycle )
    10661291printk("\n[%s] thread[%x,%x] enter / process %x / %s / base %x / cxy %x / cycle %d\n",
     
    11801405 
    11811406    // take the VSL lock in write mode
    1182     remote_rwlock_wr_acquire( lock_xp );
     1407    remote_queuelock_acquire( lock_xp );
    11831408
    11841409    // attach vseg to VSL
     
    11861411
    11871412    // release the VSL lock
    1188     remote_rwlock_wr_release( lock_xp );
     1413    remote_queuelock_release( lock_xp );
    11891414
    11901415#if DEBUG_VMM_CREATE_VSEG
    11911416cycle = (uint32_t)hal_get_cycles();
    1192 if( DEBUG_VMM_CREATE_VSEG < cycle )
     1417// if( DEBUG_VMM_CREATE_VSEG < cycle )
     1418if( type == VSEG_TYPE_REMOTE )
    11931419printk("\n[%s] thread[%x,%x] exit / process %x / %s / base %x / cxy %x / cycle %d\n",
    11941420__FUNCTION__, this->process->pid, this->trdid,
     
    12001426}  // vmm_create_vseg()
    12011427
     1428////////////////////////////////////////////////////////////////////////////////////////////
     1429// This static function is called by the vmm_remove_vseg() and vmm_resize_vseg() functions.
     1430// Depending on the vseg <type>, it decrements the physical page refcount, and
     1431// conditionnally release to the relevant kmem the physical page identified by <ppn>.
     1432////////////////////////////////////////////////////////////////////////////////////////////
     1433// @ process  : local pointer on process.
     1434// @ vseg     : local pointer on vseg.
     1435// @ ppn      : released pysical page index.
     1436////////////////////////////////////////////////////////////////////////////////////////////
     1437static void vmm_ppn_release( process_t * process,
     1438                             vseg_t    * vseg,
     1439                             ppn_t       ppn )
     1440{
     1441    bool_t do_release;
     1442
     1443    // get vseg type
     1444    vseg_type_t type = vseg->type;
     1445
     1446    // compute is_ref
     1447    bool_t is_ref = (GET_CXY( process->ref_xp ) == local_cxy);
     1448
     1449    // get pointers on physical page descriptor
     1450    xptr_t   page_xp  = ppm_ppn2page( ppn );
     1451    cxy_t    page_cxy = GET_CXY( page_xp );
     1452    page_t * page_ptr = GET_PTR( page_xp );
     1453
     1454    // decrement page refcount
     1455    xptr_t count_xp = XPTR( page_cxy , &page_ptr->refcount );
     1456    hal_remote_atomic_add( count_xp , -1 );
     1457
     1458    // compute the do_release condition depending on vseg type
     1459    if( (type == VSEG_TYPE_FILE)  ||
     1460        (type == VSEG_TYPE_KCODE) ||
     1461        (type == VSEG_TYPE_KDATA) ||
     1462        (type == VSEG_TYPE_KDEV) )           
     1463    {
     1464        // no physical page release for FILE and KERNEL
     1465        do_release = false;
     1466    }
     1467    else if( (type == VSEG_TYPE_CODE)  ||
     1468             (type == VSEG_TYPE_STACK) )
     1469    {
     1470        // always release physical page for private vsegs
     1471        do_release = true;
     1472    }
     1473    else if( (type == VSEG_TYPE_ANON)  ||
     1474             (type == VSEG_TYPE_REMOTE) )
     1475    {
     1476        // release physical page if reference cluster
     1477        do_release = is_ref;
     1478    }
     1479    else if( is_ref )  // vseg_type == DATA in reference cluster
     1480    {
     1481        // get extended pointers on forks and lock field in page descriptor
     1482        xptr_t forks_xp = XPTR( page_cxy , &page_ptr->forks );
     1483        xptr_t lock_xp  = XPTR( page_cxy , &page_ptr->lock );
     1484
     1485        // take lock protecting "forks" counter
     1486        remote_busylock_acquire( lock_xp );
     1487
     1488        // get number of pending forks from page descriptor
     1489        uint32_t forks = hal_remote_l32( forks_xp );
     1490
     1491        // decrement pending forks counter if required
     1492        if( forks )  hal_remote_atomic_add( forks_xp , -1 );
     1493
     1494        // release lock protecting "forks" counter
     1495        remote_busylock_release( lock_xp );
     1496
     1497        // release physical page if forks == 0
     1498        do_release = (forks == 0);
     1499    }
     1500    else              // vseg_type == DATA not in reference cluster
     1501    {
     1502        // no physical page release if not in reference cluster
     1503        do_release = false;
     1504    }
     1505
     1506    // release physical page to relevant kmem when required
     1507    if( do_release )
     1508    {
     1509        ppm_remote_free_pages( page_cxy , page_ptr );
     1510
     1511#if DEBUG_VMM_PPN_RELEASE
     1512thread_t * this = CURRENT_THREAD;
     1513if( DEBUG_VMM_PPN_RELEASE < cycle )
     1514printk("\n[%s] thread[%x,%x] released ppn %x to kmem\n",
     1515__FUNCTION__, this->process->pid, this->trdid, ppn );
     1516#endif
     1517
     1518    }
     1519} // end vmm_ppn_release()
    12021520
    12031521//////////////////////////////////////////
     
    12051523                      vseg_t    * vseg )
    12061524{
    1207     vmm_t     * vmm;        // local pointer on process VMM
    1208     xptr_t      gpt_xp;     // extended pointer on GPT
    1209     bool_t      is_ref;     // local process is reference process
    12101525    uint32_t    vseg_type;  // vseg type
    12111526    vpn_t       vpn;        // VPN of current PTE
     
    12141529    ppn_t       ppn;        // current PTE ppn value
    12151530    uint32_t    attr;       // current PTE attributes
    1216     xptr_t      page_xp;    // extended pointer on page descriptor
    1217     cxy_t       page_cxy;   // page descriptor cluster
    1218     page_t    * page_ptr;   // page descriptor pointer
    1219     xptr_t      count_xp;   // extended pointer on page refcount
    12201531
    12211532// check arguments
     
    12231534assert( (vseg    != NULL), "vseg argument is NULL" );
    12241535
    1225     // compute is_ref
    1226     is_ref = (GET_CXY( process->ref_xp ) == local_cxy);
    1227 
    12281536    // get pointers on local process VMM
    1229     vmm = &process->vmm;
     1537    vmm_t * vmm = &process->vmm;
    12301538
    12311539    // build extended pointer on GPT
    1232     gpt_xp = XPTR( local_cxy , &vmm->gpt );
     1540    xptr_t gpt_xp = XPTR( local_cxy , &vmm->gpt );
    12331541
    12341542    // get relevant vseg infos
     
    12401548uint32_t   cycle = (uint32_t)hal_get_cycles();
    12411549thread_t * this  = CURRENT_THREAD;
     1550#endif
     1551
     1552#if (DEBUG_VMM_REMOVE_VSEG & 1 )
    12421553if( DEBUG_VMM_REMOVE_VSEG < cycle )
    12431554printk("\n[%s] thread[%x,%x] enter / process %x / %s / base %x / cycle %d\n",
     
    12461557#endif
    12471558
    1248     // loop on PTEs in GPT
     1559    // loop on PTEs in GPT to unmap all mapped PTE
    12491560        for( vpn = vpn_min ; vpn < vpn_max ; vpn++ )
    12501561    {
     
    12571568#if( DEBUG_VMM_REMOVE_VSEG & 1 )
    12581569if( DEBUG_VMM_REMOVE_VSEG < cycle )
    1259 printk("- unmap vpn %x / ppn %x / %s" , vpn , ppn, vseg_type_str(vseg_type) );
     1570printk("\n[%s] thread[%x,%x] unmap vpn %x / ppn %x / %s",
     1571__FUNCTION__, this->process->pid, this->trdid, vpn , ppn, vseg_type_str(vseg_type) );
    12601572#endif
    12611573            // unmap GPT entry in local GPT
    12621574            hal_gpt_reset_pte( gpt_xp , vpn );
    12631575
    1264             // get pointers on physical page descriptor
    1265             page_xp  = ppm_ppn2page( ppn );
    1266             page_cxy = GET_CXY( page_xp );
    1267             page_ptr = GET_PTR( page_xp );
    1268 
    1269             // decrement page refcount
    1270             count_xp = XPTR( page_cxy , &page_ptr->refcount );
    1271             hal_remote_atomic_add( count_xp , -1 );
    1272 
    1273             // compute the ppn_release condition depending on vseg type
    1274             bool_t ppn_release;
    1275             if( (vseg_type == VSEG_TYPE_FILE)  ||
    1276                 (vseg_type == VSEG_TYPE_KCODE) ||
    1277                 (vseg_type == VSEG_TYPE_KDATA) ||
    1278                 (vseg_type == VSEG_TYPE_KDEV) )           
    1279             {
    1280                 // no physical page release for FILE and KERNEL
    1281                 ppn_release = false;
    1282             }
    1283             else if( (vseg_type == VSEG_TYPE_CODE)  ||
    1284                      (vseg_type == VSEG_TYPE_STACK) )
    1285             {
    1286                 // always release physical page for private vsegs
    1287                 ppn_release = true;
    1288             }
    1289             else if( (vseg_type == VSEG_TYPE_ANON)  ||
    1290                      (vseg_type == VSEG_TYPE_REMOTE) )
    1291             {
    1292                 // release physical page if reference cluster
    1293                 ppn_release = is_ref;
    1294             }
    1295             else if( is_ref )  // vseg_type == DATA in reference cluster
    1296             {
    1297                 // get extended pointers on forks and lock field in page descriptor
    1298                 xptr_t forks_xp = XPTR( page_cxy , &page_ptr->forks );
    1299                 xptr_t lock_xp  = XPTR( page_cxy , &page_ptr->lock );
    1300 
    1301                 // take lock protecting "forks" counter
    1302                 remote_busylock_acquire( lock_xp );
    1303 
    1304                 // get number of pending forks from page descriptor
    1305                 uint32_t forks = hal_remote_l32( forks_xp );
    1306 
    1307                 // decrement pending forks counter if required
    1308                 if( forks )  hal_remote_atomic_add( forks_xp , -1 );
    1309 
    1310                 // release lock protecting "forks" counter
    1311                 remote_busylock_release( lock_xp );
    1312 
    1313                 // release physical page if forks == 0
    1314                 ppn_release = (forks == 0);
    1315             }
    1316             else              // vseg_type == DATA not in reference cluster
    1317             {
    1318                 // no physical page release if not in reference cluster
    1319                 ppn_release = false;
    1320             }
    1321 
    1322             // release physical page to relevant kmem when required
    1323             if( ppn_release ) ppm_remote_free_pages( page_cxy , page_ptr );
    1324 
    1325 #if( DEBUG_VMM_REMOVE_VSEG & 1 )
    1326 if( DEBUG_VMM_REMOVE_VSEG < cycle )
    1327 {
    1328     if( ppn_release ) printk(" / released to kmem\n" );
    1329     else              printk("\n");
    1330 }
    1331 #endif
     1576            // release physical page when required
     1577            vmm_ppn_release( process , vseg , ppn );
    13321578        }
    13331579    }
     
    13681614}  // end vmm_remove_vseg()
    13691615
    1370 
    1371 ///////////////////////////////////
    1372 void vmm_delete_vseg( pid_t    pid,
    1373                       intptr_t vaddr )
     1616/////////////////////////////////////////////
     1617void vmm_resize_vseg( process_t * process,
     1618                      vseg_t    * vseg,
     1619                      intptr_t    new_base,
     1620                      intptr_t    new_size )
    13741621{
    1375     process_t * process;    // local pointer on local process
    1376     vseg_t    * vseg;       // local pointer on local vseg containing vaddr
    1377 
    1378     // get local pointer on local process descriptor
    1379     process = cluster_get_local_process_from_pid( pid );
    1380 
    1381     if( process == NULL )
    1382     {
    1383         printk("\n[WARNING] in %s : cannot get local process descriptor\n",
    1384         __FUNCTION__ );
    1385         return;
    1386     }
    1387 
    1388     // get local pointer on local vseg containing vaddr
    1389     vseg = vmm_vseg_from_vaddr( &process->vmm , vaddr );
    1390 
    1391     if( vseg == NULL )
    1392     {
    1393         printk("\n[WARNING] in %s : cannot get vseg descriptor\n",
    1394         __FUNCTION__ );
    1395         return;
    1396     }
    1397 
    1398     // call relevant function
    1399     vmm_remove_vseg( process , vseg );
    1400 
    1401 }  // end vmm_delete_vseg
    1402 
    1403 
    1404 /////////////////////////////////////////////
    1405 vseg_t * vmm_vseg_from_vaddr( vmm_t    * vmm,
    1406                               intptr_t   vaddr )
    1407 {
    1408     xptr_t   vseg_xp;
    1409     vseg_t * vseg;
    1410     xptr_t   iter_xp;
    1411 
    1412     // get extended pointers on VSL lock and root
    1413     xptr_t lock_xp = XPTR( local_cxy , &vmm->vsl_lock );
    1414     xptr_t root_xp = XPTR( local_cxy , &vmm->vsegs_root );
    1415 
    1416     // get lock protecting the VSL
    1417     remote_rwlock_rd_acquire( lock_xp );
    1418 
    1419     // scan the list of vsegs in VSL
    1420     XLIST_FOREACH( root_xp , iter_xp )
    1421     {
    1422         // get pointers on vseg
    1423         vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist );
    1424         vseg    = GET_PTR( vseg_xp );
    1425 
    1426         // return success when match
    1427         if( (vaddr >= vseg->min) && (vaddr < vseg->max) )
    1428         {
    1429             // return success
    1430             remote_rwlock_rd_release( lock_xp );
    1431             return vseg;
    1432         }
    1433     }
    1434 
    1435     // return failure
    1436     remote_rwlock_rd_release( lock_xp );
    1437     return NULL;
    1438 
    1439 }  // end vmm_vseg_from_vaddr()
    1440 
    1441 /////////////////////////////////////////////
    1442 error_t vmm_resize_vseg( process_t * process,
    1443                          intptr_t    base,
    1444                          intptr_t    size )
    1445 {
    1446     error_t   error;
    1447     vseg_t  * new;
    1448     vpn_t     vpn_min;
    1449     vpn_t     vpn_max;
     1622    vpn_t     vpn;
     1623    ppn_t     ppn;
     1624    uint32_t  attr;
     1625
     1626// check arguments
     1627assert( (process != NULL), "process argument is NULL" );
     1628assert( (vseg    != NULL), "vseg argument is NULL" );
    14501629
    14511630#if DEBUG_VMM_RESIZE_VSEG
    14521631uint32_t   cycle = (uint32_t)hal_get_cycles();
    14531632thread_t * this  = CURRENT_THREAD;
     1633#endif
     1634
     1635#if (DEBUG_VMM_RESIZE_VSEG & 1)
    14541636if( DEBUG_VMM_RESIZE_VSEG < cycle )
    1455 printk("\n[%s] thread[%x,%x] enter / process %x / base %x / size %d / cycle %d\n",
    1456 __FUNCTION__, this->process->pid, this->trdid, process->pid, base, size, cycle );
    1457 #endif
    1458 
    1459     // get pointer on process VMM
    1460     vmm_t * vmm = &process->vmm;
    1461 
    1462     intptr_t addr_min = base;
    1463         intptr_t addr_max = base + size;
    1464 
    1465     // get pointer on vseg
    1466         vseg_t * vseg = vmm_vseg_from_vaddr( vmm , base );
    1467 
    1468         if( vseg == NULL)
    1469     {
    1470         printk("\n[ERROR] in %s : vseg(%x,%d) not found\n",
    1471         __FUNCTION__, base , size );
    1472         return -1;
    1473     }
    1474 
    1475     // resize depends on unmapped region base and size
    1476         if( (vseg->min > addr_min) || (vseg->max < addr_max) )        // not included in vseg
    1477     {
    1478         printk("\n[ERROR] in %s : unmapped region[%x->%x[ not included in vseg[%x->%x[\n",
    1479         __FUNCTION__, addr_min, addr_max, vseg->min, vseg->max );
    1480 
    1481         error = -1;
    1482     }
    1483         else if( (vseg->min == addr_min) && (vseg->max == addr_max) )  // vseg must be deleted
    1484     {
     1637printk("\n[%s] thread[%x,%x] enter / process %x / %s / base %x / cycle %d\n",
     1638__FUNCTION__, this->process->pid, this->trdid,
     1639process->pid, vseg_type_str(vseg->type), old_base, cycle );
     1640#endif
     1641
     1642    // get existing vseg vpn_min and vpn_max
     1643    vpn_t     old_vpn_min = vseg->vpn_base;
     1644    vpn_t     old_vpn_max = old_vpn_min + vseg->vpn_size - 1;
     1645
     1646    // compute new vseg vpn_min & vpn_max 
     1647    intptr_t min          = new_base;
     1648    intptr_t max          = new_base + new_size;
     1649    vpn_t    new_vpn_min  = min >> CONFIG_PPM_PAGE_SHIFT;
     1650    vpn_t    new_vpn_max  = (max - 1) >> CONFIG_PPM_PAGE_SHIFT;
     1651
     1652    // build extended pointer on GPT
     1653    xptr_t gpt_xp = XPTR( local_cxy , &process->vmm.gpt );
     1654
     1655    // loop on PTEs in GPT to unmap PTE if (oldd_vpn_min <= vpn < new_vpn_min)
     1656        for( vpn = old_vpn_min ; vpn < new_vpn_min ; vpn++ )
     1657    {
     1658        // get ppn and attr
     1659        hal_gpt_get_pte( gpt_xp , vpn , &attr , &ppn );
     1660
     1661        if( attr & GPT_MAPPED )  // PTE is mapped
     1662        {
    14851663
    14861664#if( DEBUG_VMM_RESIZE_VSEG & 1 )
    14871665if( DEBUG_VMM_RESIZE_VSEG < cycle )
    1488 printk("\n[%s] unmapped region[%x->%x[ equal vseg[%x->%x[\n",
    1489 __FUNCTION__, addr_min, addr_max, vseg->min, vseg->max );
    1490 #endif
    1491         vmm_delete_vseg( process->pid , vseg->min );
    1492 
    1493 #if( DEBUG_VMM_RESIZE_VSEG & 1 )
     1666printk("\n[%s] thread[%x,%x] unmap vpn %x / ppn %x / %s",
     1667__FUNCTION__, this->process->pid, this->trdid, vpn , ppn, vseg_type_str(vseg_type) );
     1668#endif
     1669            // unmap GPT entry
     1670            hal_gpt_reset_pte( gpt_xp , vpn );
     1671
     1672            // release physical page when required
     1673            vmm_ppn_release( process , vseg , ppn );
     1674        }
     1675    }
     1676
     1677    // loop on PTEs in GPT to unmap PTE if (new vpn_max <= vpn < old_vpn_max)
     1678        for( vpn = new_vpn_max ; vpn < old_vpn_max ; vpn++ )
     1679    {
     1680        // get ppn and attr
     1681        hal_gpt_get_pte( gpt_xp , vpn , &attr , &ppn );
     1682
     1683        if( attr & GPT_MAPPED )  // PTE is mapped
     1684        {
     1685
     1686#if( DEBUG_VMM_REMOVE_VSEG & 1 )
    14941687if( DEBUG_VMM_RESIZE_VSEG < cycle )
    1495 printk("\n[%s] thread[%x,%x] deleted vseg\n",
    1496 __FUNCTION__, this->process->pid, this->trdid );
    1497 #endif
    1498         error = 0;
    1499     }
    1500         else if( vseg->min == addr_min )                               // vseg must be resized
    1501     {
    1502 
    1503 #if( DEBUG_VMM_RESIZE_VSEG & 1 )
     1688printk("\n[%s] thread[%x,%x] unmap vpn %x / ppn %x / %s",
     1689__FUNCTION__, this->process->pid, this->trdid, vpn , ppn, vseg_type_str(vseg_type) );
     1690#endif
     1691            // unmap GPT entry in local GPT
     1692            hal_gpt_reset_pte( gpt_xp , vpn );
     1693
     1694            // release physical page when required
     1695            vmm_ppn_release( process , vseg , ppn );
     1696        }
     1697    }
     1698
     1699    // resize vseg in VSL
     1700    vseg->min      = min;
     1701    vseg->max      = max;
     1702    vseg->vpn_base = new_vpn_min;
     1703    vseg->vpn_size = new_vpn_max - new_vpn_min + 1;
     1704
     1705#if DEBUG_VMM_RESIZE_VSEG
     1706cycle = (uint32_t)hal_get_cycles();
    15041707if( DEBUG_VMM_RESIZE_VSEG < cycle )
    1505 printk("\n[%s] unmapped region[%x->%x[ included in vseg[%x->%x[\n",
    1506 __FUNCTION__, addr_min, addr_max, vseg->min, vseg->max );
    1507 #endif
    1508         // update vseg min address
    1509         vseg->min = addr_max;
    1510 
    1511         // update vpn_base and vpn_size
    1512         vpn_min        = vseg->min >> CONFIG_PPM_PAGE_SHIFT;
    1513         vpn_max        = (vseg->max - 1) >> CONFIG_PPM_PAGE_SHIFT;
    1514         vseg->vpn_base = vpn_min;
    1515         vseg->vpn_size = vpn_max - vpn_min + 1;
    1516 
    1517 #if( DEBUG_VMM_RESIZE_VSEG & 1 )
    1518 if( DEBUG_VMM_RESIZE_VSEG < cycle )
    1519 printk("\n[%s] thread[%x,%x] changed vseg_min\n",
    1520 __FUNCTION__, this->process->pid, this->trdid );
    1521 #endif
    1522         error = 0;
    1523     }
    1524         else if( vseg->max == addr_max )                              // vseg must be resized
    1525     {
    1526 
    1527 #if( DEBUG_VMM_RESIZE_VSEG & 1 )
    1528 if( DEBUG_VMM_RESIZE_VSEG < cycle )
    1529 printk("\n[%s] unmapped region[%x->%x[ included in vseg[%x->%x[\n",
    1530 __FUNCTION__, addr_min, addr_max, vseg->min, vseg->max );
    1531 #endif
    1532         // update vseg max address
    1533         vseg->max = addr_min;
    1534 
    1535         // update vpn_base and vpn_size
    1536         vpn_min        = vseg->min >> CONFIG_PPM_PAGE_SHIFT;
    1537         vpn_max        = (vseg->max - 1) >> CONFIG_PPM_PAGE_SHIFT;
    1538         vseg->vpn_base = vpn_min;
    1539         vseg->vpn_size = vpn_max - vpn_min + 1;
    1540 
    1541 #if( DEBUG_VMM_RESIZE_VSEG & 1 )
    1542 if( DEBUG_VMM_RESIZE_VSEG < cycle )
    1543 printk("\n[%s] thread[%x,%x] changed vseg_max\n",
    1544 __FUNCTION__, this->process->pid, this->trdid );
    1545 #endif
    1546         error = 0;
    1547 
    1548     }
    1549     else                                                          // vseg cut in three regions
    1550     {
    1551 
    1552 #if( DEBUG_VMM_RESIZE_VSEG & 1 )
    1553 if( DEBUG_VMM_RESIZE_VSEG < cycle )
    1554 printk("\n[%s] unmapped region[%x->%x[ included in vseg[%x->%x[\n",
    1555 __FUNCTION__, addr_min, addr_max, vseg->min, vseg->max );
    1556 #endif
    1557         // resize existing vseg
    1558         vseg->max = addr_min;
    1559 
    1560         // update vpn_base and vpn_size
    1561         vpn_min        = vseg->min >> CONFIG_PPM_PAGE_SHIFT;
    1562         vpn_max        = (vseg->max - 1) >> CONFIG_PPM_PAGE_SHIFT;
    1563         vseg->vpn_base = vpn_min;
    1564         vseg->vpn_size = vpn_max - vpn_min + 1;
    1565 
    1566         // create new vseg
    1567         new = vmm_create_vseg( process,
    1568                                vseg->type,
    1569                                addr_min,
    1570                                (vseg->max - addr_max),
    1571                                vseg->file_offset,
    1572                                vseg->file_size,
    1573                                vseg->mapper_xp,
    1574                                vseg->cxy );
    1575 
    1576 #if( DEBUG_VMM_RESIZE_VSEG & 1 )
    1577 if( DEBUG_VMM_RESIZE_VSEG < cycle )
    1578 printk("\n[%s] thread[%x,%x] replaced vseg by two smal vsegs\n",
    1579 __FUNCTION__, this->process->pid, this->trdid );
    1580 #endif
    1581 
    1582         if( new == NULL ) error = -1;
    1583         else              error = 0;
    1584     }
    1585 
    1586 #if DEBUG_VMM_RESIZE_VSEG
    1587 if( DEBUG_VMM_RESIZE_VSEG < cycle )
    1588 printk("\n[%s] thread[%x,%x] exit / process %x / base %x / size %d / cycle %d\n",
    1589 __FUNCTION__, this->process->pid, this->trdid, process->pid, base, size, cycle );
    1590 #endif
    1591 
    1592         return error;
    1593 
    1594 }  // vmm_resize_vseg()
     1708printk("[%s] thread[%x,%x] exit / process %x / %s / base %x / cycle %d\n",
     1709__FUNCTION__, this->process->pid, this->trdid,
     1710process->pid, vseg_type_str(vseg->type), vseg->min, cycle );
     1711#endif
     1712
     1713}  // end vmm_resize_vseg
     1714
     1715/////////////////////////////////////////////////////////////////////////////////////////////
     1716// This static function is called twice by the vmm_get_vseg() function.
     1717// It scan the - possibly remote - VSL defined by the <vmm_xp> argument to find the vseg
     1718// containing a given virtual address <vaddr>. It uses remote accesses to access the remote
     1719// VSL if required. The VSL lock protecting the VSL must be taken by the caller.
     1720/////////////////////////////////////////////////////////////////////////////////////////////
     1721// @ vmm_xp  : extended pointer on the process VMM.
     1722// @ vaddr   : virtual address.
     1723// @ return local pointer on remote vseg if success / return NULL if not found.
     1724/////////////////////////////////////////////////////////////////////////////////////////////
     1725static vseg_t * vmm_vseg_from_vaddr( xptr_t     vmm_xp,
     1726                                     intptr_t   vaddr )
     1727{
     1728    xptr_t   iter_xp;
     1729    xptr_t   vseg_xp;
     1730    vseg_t * vseg;
     1731    intptr_t min;
     1732    intptr_t max;
     1733
     1734    // get cluster and local pointer on target VMM
     1735    vmm_t * vmm_ptr = GET_PTR( vmm_xp );
     1736    cxy_t   vmm_cxy = GET_CXY( vmm_xp );
     1737
     1738    // build extended pointer on VSL root
     1739    xptr_t root_xp = XPTR( vmm_cxy , &vmm_ptr->vsegs_root );
     1740
     1741    // scan the list of vsegs in VSL
     1742    XLIST_FOREACH( root_xp , iter_xp )
     1743    {
     1744        vseg_xp = XLIST_ELEMENT( iter_xp , vseg_t , xlist );
     1745        vseg    = GET_PTR( vseg_xp );
     1746
     1747        min = hal_remote_l32( XPTR( vmm_cxy , &vseg->min ) );
     1748        max = hal_remote_l32( XPTR( vmm_cxy , &vseg->max ) );
     1749
     1750        // return success when match
     1751        if( (vaddr >= min) && (vaddr < max) ) return vseg;
     1752    }
     1753
     1754    // return failure
     1755    return NULL;
     1756
     1757}  // end vmm_vseg_from_vaddr()
    15951758
    15961759///////////////////////////////////////////
     
    15991762                       vseg_t   ** found_vseg )
    16001763{
    1601     xptr_t    vseg_xp;
    1602     vseg_t  * vseg;
    1603     vmm_t   * vmm;
    1604     error_t   error;
    1605 
    1606     // get pointer on local VMM
    1607     vmm = &process->vmm;
     1764    xptr_t    loc_lock_xp;     // extended pointer on local VSL lock
     1765    xptr_t    ref_lock_xp;     // extended pointer on reference VSL lock
     1766    vseg_t  * loc_vseg;        // local pointer on local vseg
     1767    vseg_t  * ref_vseg;        // local pointer on reference vseg
     1768
     1769    // build extended pointer on local VSL lock
     1770    loc_lock_xp = XPTR( local_cxy , &process->vmm.vsl_lock );
     1771     
     1772    // get local VSL lock
     1773    remote_queuelock_acquire( loc_lock_xp );
    16081774
    16091775    // try to get vseg from local VMM
    1610     vseg = vmm_vseg_from_vaddr( vmm , vaddr );
    1611 
    1612     if( vseg == NULL )   // vseg not found in local cluster => try to get it from ref
    1613         {
     1776    loc_vseg = vmm_vseg_from_vaddr( XPTR( local_cxy, &process->vmm ) , vaddr );
     1777
     1778    if (loc_vseg == NULL)   // vseg not found => access reference VSL
     1779    {
    16141780        // get extended pointer on reference process
    16151781        xptr_t ref_xp = process->ref_xp;
    16161782
    1617         // get cluster and local pointer on reference process 
     1783        // get cluster and local pointer on reference process
    16181784        cxy_t       ref_cxy = GET_CXY( ref_xp );
    16191785        process_t * ref_ptr = GET_PTR( ref_xp );
    16201786
    1621         if( local_cxy == ref_cxy )  return -1;   // local cluster is the reference
    1622 
    1623         // get extended pointer on reference vseg
    1624         rpc_vmm_get_vseg_client( ref_cxy , ref_ptr , vaddr , &vseg_xp , &error );
    1625            
    1626         if( error )   return -1;                // vseg not found => illegal user vaddr
    1627        
    1628         // allocate a vseg in local cluster
    1629         vseg = vseg_alloc();
    1630 
    1631         if( vseg == NULL ) return -1;           // cannot allocate a local vseg
    1632 
    1633         // initialise local vseg from reference
    1634         vseg_init_from_ref( vseg , vseg_xp );
    1635 
    1636         // build extended pointer on VSL lock
    1637         xptr_t lock_xp = XPTR( local_cxy , &vmm->vsl_lock );
    1638  
    1639         // take the VSL lock in write mode
    1640         remote_rwlock_wr_acquire( lock_xp );
    1641 
    1642         // register local vseg in local VSL
    1643         vmm_attach_vseg_to_vsl( vmm , vseg );
    1644  
    1645         // release the VSL lock
    1646         remote_rwlock_wr_release( lock_xp );
    1647     }   
    1648 
    1649     // success
    1650     *found_vseg = vseg;
    1651     return 0;
    1652 
     1787        // build extended pointer on reference VSL lock
     1788        ref_lock_xp = XPTR( ref_cxy , &ref_ptr->vmm.vsl_lock );
     1789     
     1790        // get reference VSL lock
     1791        remote_queuelock_acquire( ref_lock_xp );
     1792
     1793        // try to get vseg from reference VMM
     1794        ref_vseg = vmm_vseg_from_vaddr( XPTR( ref_cxy , &ref_ptr->vmm ) , vaddr );
     1795
     1796        if( ref_vseg == NULL )  // vseg not found => return error
     1797        {
     1798            printk("\n[ERROR] in %s : vaddr %x in process %x out of segment\n",
     1799            __FUNCTION__, vaddr, process->pid );
     1800
     1801            // release reference VSL lock
     1802            remote_queuelock_release( ref_lock_xp );
     1803
     1804            return -1;
     1805        }
     1806        else                    // vseg found => try to update local VSL
     1807        {
     1808            // allocate a local vseg descriptor
     1809            loc_vseg = vseg_alloc();
     1810
     1811            if( loc_vseg == NULL )   // no memory => return error
     1812            {
     1813                printk("\n[ERROR] in %s : vaddr %x in process %x / no memory for local vseg\n",
     1814                __FUNCTION__, vaddr, process->pid );
     1815
     1816                // release reference VSL & local VSL locks
     1817                remote_queuelock_release( ref_lock_xp );
     1818                remote_queuelock_release( loc_lock_xp );
     1819
     1820                return -1;
     1821            }
     1822            else                     // update local VSL and return success
     1823            {
     1824                // initialize local vseg
     1825                vseg_init_from_ref( loc_vseg , XPTR( ref_cxy , ref_vseg ) );
     1826
     1827                // register local vseg in local VSL
     1828                vmm_attach_vseg_to_vsl( &process->vmm , loc_vseg );
     1829
     1830                // release reference VSL & local VSL locks
     1831                remote_queuelock_release( ref_lock_xp );
     1832                remote_queuelock_release( loc_lock_xp );
     1833
     1834                *found_vseg = loc_vseg;
     1835                return 0;
     1836            }
     1837        }
     1838    }
     1839    else                        // vseg found in local VSL => return success
     1840    {
     1841        // release local VSL lock
     1842        remote_queuelock_release( loc_lock_xp );
     1843
     1844        *found_vseg = loc_vseg;
     1845        return 0;
     1846    }
    16531847}  // end vmm_get_vseg()
    16541848
     
    16581852// pointer on the allocated page descriptor.
    16591853// The vseg cannot have the FILE type.
     1854//////////////////////////////////////////////////////////////////////////////////////
     1855// @ vseg   : local pointer on vseg.
     1856// @ vpn    : unmapped vpn.
     1857// @ return an extended pointer on the allocated page
    16601858//////////////////////////////////////////////////////////////////////////////////////
    16611859static xptr_t vmm_page_allocate( vseg_t * vseg,
     
    21942392#if DEBUG_VMM_HANDLE_COW
    21952393uint32_t   cycle = (uint32_t)hal_get_cycles();
    2196 if( DEBUG_VMM_HANDLE_COW < cycle )
     2394if( (DEBUG_VMM_HANDLE_COW < cycle) && (vpn > 0) )
    21972395printk("\n[%s] thread[%x,%x] enter for vpn %x / core[%x,%d] / cycle %d\n",
    21982396__FUNCTION__, this->process->pid, this->trdid, vpn, local_cxy, this->core->lid, cycle );
     
    22002398
    22012399#if ((DEBUG_VMM_HANDLE_COW & 3) == 3 )
    2202 hal_vmm_display( process , true );
     2400hal_vmm_display( XPTR( local_cxy , process ) , true );
    22032401#endif
    22042402
     
    22162414
    22172415#if DEBUG_VMM_HANDLE_COW
    2218 if( DEBUG_VMM_HANDLE_COW < cycle )
     2416if( (DEBUG_VMM_HANDLE_COW < cycle) && (vpn > 0) )
    22192417printk("\n[%s] thread[%x,%x] get vseg %s\n",
    22202418__FUNCTION__, this->process->pid, this->trdid, vseg_type_str(vseg->type) );
     
    22562454
    22572455#if DEBUG_VMM_HANDLE_COW
    2258 if( DEBUG_VMM_HANDLE_COW < cycle )
     2456if( (DEBUG_VMM_HANDLE_COW < cycle) && (vpn > 0) )
    22592457printk("\n[%s] thread[%x,%x] get pte for vpn %x : ppn %x / attr %x\n",
    22602458__FUNCTION__, this->process->pid, this->trdid, vpn, old_ppn, old_attr );
     
    22852483
    22862484#if DEBUG_VMM_HANDLE_COW
    2287 if( DEBUG_VMM_HANDLE_COW < cycle )
     2485if( (DEBUG_VMM_HANDLE_COW < cycle) && (vpn > 0) )
    22882486printk("\n[%s] thread[%x,%x] get forks = %d for vpn %x\n",
    22892487__FUNCTION__, this->process->pid, this->trdid, forks, vpn );
     
    23152513
    23162514#if DEBUG_VMM_HANDLE_COW
    2317 if( DEBUG_VMM_HANDLE_COW < cycle )
     2515if( (DEBUG_VMM_HANDLE_COW < cycle) && (vpn > 0) )
    23182516printk("\n[%s] thread[%x,%x] get new ppn %x for vpn %x\n",
    23192517__FUNCTION__, this->process->pid, this->trdid, new_ppn, vpn );
     
    23262524
    23272525#if DEBUG_VMM_HANDLE_COW
    2328 if( DEBUG_VMM_HANDLE_COW < cycle )
     2526if( (DEBUG_VMM_HANDLE_COW < cycle) && (vpn > 0) )
    23292527printk("\n[%s] thread[%x,%x] copied old page to new page\n",
    23302528__FUNCTION__, this->process->pid, this->trdid );
     
    23382536
    23392537#if(DEBUG_VMM_HANDLE_COW & 1)
    2340 if( DEBUG_VMM_HANDLE_COW < cycle )
     2538if( (DEBUG_VMM_HANDLE_COW < cycle) && (vpn > 0) )
    23412539printk("\n[%s] thread[%x,%x] no pending forks / keep existing PPN %x\n",
    23422540__FUNCTION__, this->process->pid, this->trdid, old_ppn );
     
    23492547
    23502548#if(DEBUG_VMM_HANDLE_COW & 1)
    2351 if( DEBUG_VMM_HANDLE_COW < cycle )
     2549if( (DEBUG_VMM_HANDLE_COW < cycle) && (vpn > 0) )
    23522550printk("\n[%s] thread[%x,%x] new_attr %x / new_ppn %x\n",
    23532551__FUNCTION__, this->process->pid, this->trdid, new_attr, new_ppn );
     
    23672565    else
    23682566    {
    2369         if( ref_cxy == local_cxy )                  // reference cluster is local
    2370         {
    2371             vmm_global_update_pte( process,
    2372                                    vpn,
    2373                                    new_attr,
    2374                                    new_ppn );
    2375         }
    2376         else                                        // reference cluster is remote
    2377         {
    2378             rpc_vmm_global_update_pte_client( ref_cxy,
    2379                                               ref_ptr,
    2380                                               vpn,
    2381                                               new_attr,
    2382                                               new_ppn );
    2383         }
     2567        // set new PTE in all GPT copies
     2568        vmm_global_update_pte( process,
     2569                               vpn,
     2570                               new_attr,
     2571                               new_ppn );
    23842572    }
    23852573
    23862574#if DEBUG_VMM_HANDLE_COW
    23872575cycle = (uint32_t)hal_get_cycles();
    2388 if( DEBUG_VMM_HANDLE_COW < cycle )
     2576if( (DEBUG_VMM_HANDLE_COW < cycle) && (vpn > 0) )
    23892577printk("\n[%s] thread[%x,%x] exit for vpn %x / core[%x,%d] / cycle %d\n",
    23902578__FUNCTION__, this->process->pid, this->trdid, vpn, local_cxy, this->core->lid, cycle );
     
    23922580
    23932581#if ((DEBUG_VMM_HANDLE_COW & 3) == 3)
    2394 hal_vmm_display( process , true );
     2582hal_vmm_display( XPTR( local_cxy , process ) , true );
    23952583#endif
    23962584
  • trunk/kernel/mm/vmm.h

    r635 r640  
    112112typedef struct vmm_s
    113113{
    114         remote_rwlock_t  vsl_lock;            /*! lock protecting the local VSL                 */
    115         xlist_entry_t    vsegs_root;          /*! Virtual Segment List (complete in reference)  */
    116         uint32_t         vsegs_nr;            /*! total number of local vsegs                   */
    117 
    118     gpt_t            gpt;                 /*! Generic Page Table (complete in reference)    */
    119 
    120     stack_mgr_t      stack_mgr;           /*! embedded STACK vsegs allocator                */
    121     mmap_mgr_t       mmap_mgr;            /*! embedded MMAP vsegs allocator                 */
    122 
    123         uint32_t         false_pgfault_nr;    /*! false page fault counter (for all threads)    */
    124         uint32_t         local_pgfault_nr;    /*! false page fault counter (for all threads)    */
    125         uint32_t         global_pgfault_nr;   /*! false page fault counter (for all threads)    */
    126     uint32_t         false_pgfault_cost;  /*! cumulated cost (for all threads)              */
    127     uint32_t         local_pgfault_cost;  /*! cumulated cost (for all threads)              */
    128     uint32_t         global_pgfault_cost; /*! cumulated cost (for all threads)              */
    129 
    130     vpn_t            args_vpn_base;       /*! args vseg first page                          */
    131     vpn_t            envs_vpn_base;       /*! envs vseg first page                          */
    132         vpn_t            code_vpn_base;       /*! code vseg first page                          */
    133         vpn_t            data_vpn_base;       /*! data vseg first page                          */
    134     vpn_t            heap_vpn_base;       /*! heap zone first page                          */
    135 
    136         intptr_t         entry_point;         /*! main thread entry point                       */
     114        remote_queuelock_t vsl_lock;            /*! lock protecting the local VSL               */
     115        xlist_entry_t      vsegs_root;          /*! Virtual Segment List root                   */
     116        uint32_t           vsegs_nr;            /*! total number of local vsegs                 */
     117
     118    gpt_t              gpt;                 /*! Generic Page Table descriptor               */
     119
     120    stack_mgr_t        stack_mgr;           /*! embedded STACK vsegs allocator              */
     121    mmap_mgr_t         mmap_mgr;            /*! embedded MMAP vsegs allocator               */
     122
     123        uint32_t           false_pgfault_nr;    /*! false page fault counter (for all threads)  */
     124        uint32_t           local_pgfault_nr;    /*! false page fault counter (for all threads)  */
     125        uint32_t           global_pgfault_nr;   /*! false page fault counter (for all threads)  */
     126    uint32_t           false_pgfault_cost;  /*! cumulated cost (for all threads)            */
     127    uint32_t           local_pgfault_cost;  /*! cumulated cost (for all threads)            */
     128    uint32_t           global_pgfault_cost; /*! cumulated cost (for all threads)            */
     129
     130    vpn_t              args_vpn_base;       /*! args vseg first page                        */
     131    vpn_t              envs_vpn_base;       /*! envs vseg first page                        */
     132        vpn_t              code_vpn_base;       /*! code vseg first page                        */
     133        vpn_t              data_vpn_base;       /*! data vseg first page                        */
     134    vpn_t              heap_vpn_base;       /*! heap zone first page                        */
     135
     136        intptr_t           entry_point;         /*! main thread entry point                     */
    137137}
    138138vmm_t;
     
    143143 * - The GPT has been previously created, with the hal_gpt_create() function.
    144144 * - The "kernel" vsegs are previously registered, by the hal_vmm_kernel_update() function.
    145  * - The "code" and "data" vsegs are registered by the elf_load_process() function.
     145 * - The "code" and "data" vsegs arlmmmmmme registered by the elf_load_process() function.
    146146 * - The "stack" vsegs are dynamically registered by the thread_user_create() function.
    147147 * - The "file", "anon", "remote" vsegs are dynamically registered by the mmap() syscall.
     
    206206
    207207/*********************************************************************************************
     208 * This function modifies the size of the vseg identified by <process> and <base> arguments
     209 * in all clusters containing a VSL copy, as defined by <new_base> and <new_size> arguments.
     210 * This function is called by the sys_munmap() function, and can be called by a thread
     211 * running in any cluster, as it uses remote accesses.
     212 * It cannot fail, as only vseg registered  in VSL copies are updated.
     213 *********************************************************************************************
     214 * @ process   : local pointer on process descriptor.
     215 * @ base      : current vseg base address in user space.
     216 * @ new_base  : new vseg base.
     217 * @ new_size  : new vseg size.
     218 ********************************************************************************************/
     219void vmm_global_resize_vseg( struct process_s * process,
     220                             intptr_t           base,
     221                             intptr_t           new_base,
     222                             intptr_t           new_size );
     223
     224/*********************************************************************************************
     225 * This function removes the vseg identified by the <process> and <base> arguments from
     226 * the VSL and remove all associated PTE entries from the GPT.
     227 * This is done in all clusters containing a VMM copy to maintain VMM coherence.
     228 * This function can be called by a thread running in any cluster, as it uses the
     229 * vmm_remove_vseg() in the local cluster, and the RPC_VMM_REMOVE_VSEG for remote clusters.
     230 * It cannot fail, as only vseg registered  in VSL copies are deleted.
     231 *********************************************************************************************
     232 * @ pid      : local pointer on process identifier.
     233 * @ base     : vseg base address in user space.
     234 ********************************************************************************************/
     235void vmm_global_delete_vseg( struct process_s * process,
     236                             intptr_t           base );
     237
     238/*********************************************************************************************
    208239 * This function modifies one GPT entry identified by the <process> and <vpn> arguments
    209  * in all clusters containing a process copy. It is used to maintain coherence in GPT
    210  * copies, using remote_write accesses.
    211  * It must be called by a thread running in the process owner cluster.
    212  * Use the RPC_VMM_GLOBAL_UPDATE_PTE if required.
     240 * in all clusters containing a process copy. It maintains coherence in GPT copies,
     241 * using remote_write accesses.
    213242 * It cannot fail, as only mapped PTE2 in GPT copies are updated.
    214243 *********************************************************************************************
     
    282311/*********************************************************************************************
    283312 * This function removes from the VMM of a process descriptor identified by the <process>
    284  * argument the vseg identified by the <vseg> argument. It can be used for any type of vseg.
    285  * As it uses local pointers, it must be called by a local thread.
    286  * It is called by the vmm_user_reset(), vmm_delete_vseg() and vmm_destroy() functions.
     313 * argument the vseg identified by the <vseg> argument. 
     314 * It is called by the vmm_user_reset(), vmm_global_delete_vseg() and vmm_destroy() functions.
     315 * It must be called by a local thread, running in the cluster containing the modified VMM.
     316 * Use the RPC_VMM_REMOVE_VSEG if required.
    287317 * It makes a kernel panic if the process is not registered in the local cluster,
    288318 * or if the vseg is not registered in the process VSL.
    289319 * For all vseg types, the vseg is detached from local VSL, and all associated PTEs are
    290320 * unmapped from local GPT. Other actions depend on the vseg type:
    291  * - Regarding the vseg descriptor release:
     321 * Regarding the vseg descriptor release:
    292322 *   . for ANON and REMOTE, the vseg is not released, but registered in local zombi_list.
    293323 *   . for STACK the vseg is released to the local stack allocator.
    294324 *   . for all other types, the vseg is released to the local kmem.
    295  * - Regarding the physical pages release:
     325 * Regarding the physical pages release:
    296326 *   . for KERNEL and FILE, the pages are not released to kmem.
    297327 *   . for CODE and STACK, the pages are released to local kmem when they are not COW.
    298328 *   . for DATA, ANON and REMOTE, the pages are released to relevant kmem only when
    299329 *     the local cluster is the reference cluster.
    300  * The lock protecting the VSL must be taken by the caller.
    301  *********************************************************************************************
    302  * @ process  : local pointer on process.
    303  * @ vseg     : local pointer on vseg.
     330 * The VSL lock protecting the VSL must be taken by the caller.
     331 *********************************************************************************************
     332 * @ process  : local pointer on process descriptor.
     333 * @ vseg     : local pointer on target vseg.
    304334 ********************************************************************************************/
    305335void vmm_remove_vseg( struct process_s * process,
     
    307337
    308338/*********************************************************************************************
    309  * This function call the vmm_remove vseg() function to remove from the VMM of a local
    310  * process descriptor, identified by the <pid> argument the vseg identified by the <vaddr>
    311  * virtual address in user space.
    312  * Use the RPC_VMM_DELETE_VSEG to remove a vseg from a remote process descriptor.
    313  *********************************************************************************************
    314  * @ pid      : process identifier.
    315  * @ vaddr    : virtual address in user space.
    316  ********************************************************************************************/
    317 void vmm_delete_vseg( pid_t    pid,
    318                       intptr_t vaddr );
    319 
    320 /*********************************************************************************************
    321  * This function removes a given region (defined by a base address and a size) from
    322  * the VMM of a given process descriptor. This can modify the number of vsegs:
    323  * (a) if the region is not entirely mapped in an existing vseg, it's an error.
    324  * (b) if the region has same base and size as an existing vseg, the vseg is removed.
    325  * (c) if the removed region cut the vseg in two parts, it is modified.
    326  * (d) if the removed region cut the vseg in three parts, it is modified, and a new
    327  *     vseg is created with same type.
    328  * FIXME [AG] this function should be called by a thread running in the reference cluster,
    329  *       and the VMM should be updated in all process descriptors copies.
    330  *********************************************************************************************
    331  * @ process   : pointer on process descriptor
    332  * @ base      : vseg base address
    333  * @ size      : vseg size (bytes)
    334  ********************************************************************************************/
    335 error_t vmm_resize_vseg( struct process_s * process,
    336                          intptr_t           base,
    337                          intptr_t           size );
    338 
    339 /*********************************************************************************************
    340  * This low-level function scan the local VSL in <vmm> to find the unique vseg containing
    341  * a given virtual address <vaddr>.
    342  * It is called by the vmm_get_vseg(), vmm_get_pte(), and vmm_resize_vseg() functions.
    343  *********************************************************************************************
    344  * @ vmm     : pointer on the process VMM.
    345  * @ vaddr   : virtual address.
    346  * @ return vseg pointer if success / return NULL if not found.
    347  ********************************************************************************************/
    348 struct vseg_s * vmm_vseg_from_vaddr( vmm_t    * vmm,
    349                                      intptr_t   vaddr );
    350 
    351 /*********************************************************************************************
    352  * This function checks that a given virtual address is contained in a registered vseg.
    353  * It can be called by any thread running in any cluster:
    354  * - if the vseg is registered in the local process VMM, it returns the local vseg pointer.
    355  * - if the vseg is missing in local VMM, it uses a RPC to get it from the reference cluster,
    356  *   register it in local VMM and returns the local vseg pointer, if success.
    357  * - it returns an user error if the vseg is missing in the reference VMM, or if there is
    358  *   not enough memory for a new vseg descriptor in the calling thread cluster.
    359  *********************************************************************************************
    360  * @ process   : [in] pointer on process descriptor
    361  * @ vaddr     : [in] virtual address
    362  * @ vseg      : [out] local pointer on local vseg
    363  * @ returns 0 if success / returns -1 if user error (out of segment).
     339 * This function resize a local vseg identified by the <process> and <vseg> arguments.
     340 * It is called by the vmm_global_resize() function.
     341 * It must be called by a local thread, running in the cluster containing the modified VMM.
     342 * Use the RPC_VMM_RESIZE_VSEG if required.
     343 * It makes a kernel panic if the process is not registered in the local cluster,
     344 * or if the vseg is not registered in the process VSL.
     345 * The new vseg, defined by the <new_base> and <new_size> arguments must be strictly
     346 * included in the target vseg. The target VSL size and base fields are modified in the VSL.
     347 * If the new vseg contains less pages than the target vseg, the relevant pages are
     348 * removed from the GPT.
     349 * The VSL lock protecting the VSL must be taken by the caller.
     350 *********************************************************************************************
     351 * @ process   : local pointer on process descriptor
     352 * @ vseg      : local pointer on target vseg
     353 * @ new_base  : vseg base address
     354 * @ new_size  : vseg size (bytes)
     355 ********************************************************************************************/
     356void vmm_resize_vseg( struct process_s * process,
     357                      struct vseg_s    * vseg,
     358                      intptr_t           new_base,
     359                      intptr_t           new_size );
     360
     361/*********************************************************************************************
     362 * This function checks that a given virtual address <vaddr> in a given <process> is
     363 * contained in a registered vseg. It can be called by any thread running in any cluster.
     364 * - if the vseg is registered in the local process VSL, it returns the local vseg pointer.
     365 * - if the vseg is missing in local VSL, it access directly the reference VSL.
     366 * - if the vseg is found in reference VSL, it updates the local VSL and returns this pointer.
     367 * It returns an error when the vseg is missing in the reference VMM, or when there is
     368 * not enough memory for a new vseg descriptor in the calling thread cluster.
     369 * For both the local and the reference VSL, it takes the VSL lock before scanning the VSL.
     370 *********************************************************************************************
     371 * @ process   : [in] pointer on process descriptor.
     372 * @ vaddr     : [in] virtual address.
     373 * @ vseg      : [out] local pointer on local vseg.
     374 * @ returns 0 if success / returns -1 if user error
    364375 ********************************************************************************************/
    365376error_t vmm_get_vseg( struct process_s  * process,
     
    395406 * This function is called by the generic exception handler in case of WRITE violation event,
    396407 * detected for a given <vpn>. The <process> argument is used to access the relevant VMM.
    397  * It returns a kernel panic if VPN is not in a registered vseg or is not mapped.
     408 * It returns a kernel panic if the faulty VPN is not in a registered vseg, or is not mapped.
    398409 * For a legal mapped vseg there is two cases:
    399410 * 1) If the missing VPN belongs to a private vseg (STACK), it access only the local GPT.
  • trunk/kernel/mm/vseg.h

    r625 r640  
    22 * vseg.h - virtual segment (vseg) related operations
    33 *
    4  * Authors   Ghassan Almaless (2008,2009,2010,2011, 2012)
    5  *           Mohamed Lamine Karaoui (2015)
    6  *           Alain Greiner (2016,2017,2018,2019)
     4 * Authors  Alain Greiner (2016,2017,2018,2019)
    75 *
    86 * Copyright (c) UPMC Sorbonne Universites
  • trunk/kernel/syscalls/sys_display.c

    r637 r640  
    134134        case DISPLAY_VMM:
    135135        {
    136             cxy_t cxy = (cxy_t)arg0;
    137             pid_t pid = (pid_t)arg1;
     136            cxy_t cxy      = (cxy_t)arg0;
     137            pid_t pid      = (pid_t)arg1;
     138            bool_t mapping = (arg2 != 0);
    138139
    139140            // check cxy argument
     
    163164
    164165            // call kernel function
    165                 hal_vmm_display( process_xp , true );
     166                hal_vmm_display( process_xp , mapping );
    166167
    167168            break;
     
    197198            }
    198199
    199             if( cxy == local_cxy )
    200             {
    201                     sched_display( lid );
    202             }
    203             else
    204             {
    205                 sched_remote_display( cxy , lid );
    206             }
     200            // call kernel function
     201            sched_remote_display( cxy , lid );
    207202
    208203            break;
  • trunk/kernel/syscalls/sys_munmap.c

    r635 r640  
    7272    }
    7373
     74    // compute unmapped region min an max
     75    intptr_t addr_min = (intptr_t)vaddr;
     76    intptr_t addr_max = addr_min + size;
     77
     78
     79    // get vseg min & max addresses
     80    intptr_t vseg_min = vseg->min;
     81    intptr_t vseg_max = vseg->max;
     82
    7483    // enable IRQs
    7584    hal_enable_irq( &save_sr );
    7685
    77     // call relevant kernel function
    78     error = vmm_resize_vseg( process , (intptr_t)vaddr , (intptr_t)size );
    79 
    80     if ( error )
     86    // action depend on both vseg and region bases & sizes
     87    if( (vseg_min > addr_min) || (vseg_max < addr_max) )   // region not included in vseg
    8188    {
    8289
    8390#if DEBUG_SYSCALLS_ERROR
    84 printk("\n[ERROR] in %s : cannot remove mapping\n", __FUNCTION__ );
     91printk("\n[ERROR] in %s : region[%x->%x] / vseg[%x->%x] => non included in vseg\n",
     92__FUNCTION__, process->pid, this->trdid, addr_min, addr_max, vseg_min, vseg_max );
    8593#endif
    8694                this->errno = EINVAL;
    8795                return -1;
     96    }
     97    else if( (vseg_min == addr_min) && (vseg_min == vseg_max) ) 
     98    {
     99
     100#if( DEBUG_SYS_MUNMAP & 1 )
     101if( DEBUG_SYS_MUNMAP < cycle )
     102printk("\n[%s] unmapped region[%x->%x[ / vseg[%x->%x[ => vseg deleted\n",
     103__FUNCTION__, addr_min, addr_max, vseg_min, vseg_max );
     104#endif
     105        // delete existing vseg
     106        vmm_global_delete_vseg( process,
     107                                vseg_min );
     108    }
     109    else if( (vseg_min == addr_min) || (vseg_min == vseg_max) ) 
     110    {
     111
     112#if( DEBUG_SYS_MUNMAP & 1 )
     113if( DEBUG_SYS_MUNMAP < cycle )
     114printk("\n[%s] unmapped region[%x->%x[ / vseg[%x->%x[ => vseg resized\n",
     115__FUNCTION__, addr_min, addr_max, vseg_min, vseg_max );
     116#endif
     117        // resize existing vseg
     118        vmm_global_resize_vseg( process,
     119                                vseg_min,
     120                                addr_min,
     121                                addr_max - addr_min );
     122    }
     123    else     //  vseg_min < addr_min) && (addr_max < vseg_max)         
     124    {
     125
     126#if( DEBUG_SYS_MUNMAP & 1 )
     127if( DEBUG_SYS_MUNMAP < cycle )
     128printk("\n[%s] unmapped region[%x->%x[ / vseg[%x->%x[ => vseg resized & new vseg created\n",
     129__FUNCTION__, addr_min, addr_max, vseg_min, vseg_max );
     130#endif
     131        // resize existing vseg
     132        vmm_global_resize_vseg( process,
     133                                vseg_min,
     134                                vseg_min,
     135                                addr_min - vseg_min );
     136
     137        // create new vseg
     138        vmm_create_vseg( process,
     139                         vseg->type,
     140                         addr_max,
     141                         vseg_max - addr_max,
     142                         vseg->file_offset,
     143                         vseg->file_size,
     144                         vseg->mapper_xp,
     145                         vseg->cxy );
    88146    }
    89147
  • trunk/kernel/syscalls/syscalls.h

    r637 r640  
    187187/******************************************************************************************
    188188 * [11] This function remove an existing mapping defined by the <addr> and <size>
    189  * arguments in user space.
    190  ******************************************************************************************
     189 * arguments in user space. This can modify the number of vsegs:
     190 * (a) if the region is not entirely mapped in one existing vseg, it's an error.
     191 * (b) if the region has same base and size as an existing vseg, the vseg is removed.
     192 * (c) if the removed region cut the exiting vseg in two parts, it is resized.
     193 * (d) if the removed region cut the vseg in three parts, it is modified, and a new
     194 *     vseg is created with same type.
     195 * All existing VSL copies are updated.
     196******************************************************************************************
    191197 * @ addr  : base address in user space.
    192  * # size  : number of bytes.
     198 * @ size  : number of bytes.
    193199 * @ return 0 if success / return -1 if failure.
    194200 *****************************************************************************************/
Note: See TracChangeset for help on using the changeset viewer.