Ignore:
Timestamp:
May 3, 2018, 5:51:22 PM (6 years ago)
Author:
alain
Message:

1/ Fix a bug in the Multithreaded "sort" applicationr:
The pthread_create() arguments must be declared as global variables.
2/ The exit syscall can be called by any thread of a process..

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/kernel/kern/rpc.c

    r438 r440  
    114114    client_core_lid = this->core->lid;
    115115
    116     // select a server_core index:
    117     // use client core index if possible / core 0 otherwise
     116    // select a server_core : use client core index if possible / core 0 otherwise
    118117    if( client_core_lid < hal_remote_lw( XPTR( server_cxy , &cluster->cores_nr ) ) )
    119118    {
     
    133132
    134133    // get local pointer on rpc_fifo in remote cluster,
    135     remote_fifo_t * rpc_fifo = &LOCAL_CLUSTER->rpc_fifo;
     134    remote_fifo_t * rpc_fifo = &LOCAL_CLUSTER->rpc_fifo[server_core_lid];
    136135
    137136        // post RPC in remote fifo / deschedule and retry if fifo full
     
    231230    core_t        * core     = this->core;
    232231    scheduler_t   * sched    = &core->scheduler;
    233         remote_fifo_t * rpc_fifo = &LOCAL_CLUSTER->rpc_fifo;
     232        remote_fifo_t * rpc_fifo = &LOCAL_CLUSTER->rpc_fifo[core->lid];
    234233
    235234#if DEBUG_RPC_SERVER_GENERIC
     
    243242        hal_disable_irq( &sr_save );
    244243
    245     // activate (or create) RPC thread if RPC FIFO not empty
     244    // activate (or create) RPC thread if RPC FIFO not empty and no acive RPC thread
    246245        if( (rpc_fifo->owner == 0) && (local_fifo_is_empty(rpc_fifo) == false) )
    247246    {
     
    254253#endif
    255254
    256         // search one IDLE RPC thread  
     255        // search one IDLE RPC thread associated to the selected core  
    257256        list_entry_t * iter;
    258257        LIST_FOREACH( &sched->k_root , iter )
     
    270269        }
    271270
    272         // create new RPC thread if not found   
     271        // create new RPC thread for the selected core if not found   
    273272        if( found == false )                   
    274273        {
     
    277276                                                      &rpc_thread_func,
    278277                                          NULL,
    279                                                       this->core->lid );
    280                 if( error )
    281             {
    282                 assert( false , __FUNCTION__ ,
    283                 "no memory to allocate a new RPC thread in cluster %x", local_cxy );
    284             }
     278                                                      core->lid );
     279                 
     280            assert( (error == 0), __FUNCTION__ ,
     281            "no memory to allocate a new RPC thread in cluster %x", local_cxy );
    285282
    286283            // unblock created RPC thread
    287284            thread->blocked = 0;
    288285
    289             // update core descriptor counter 
    290             hal_atomic_add( &LOCAL_CLUSTER->rpc_threads , 1 );
     286            // update RRPC threads counter 
     287            hal_atomic_add( &LOCAL_CLUSTER->rpc_threads[core->lid] , 1 );
    291288
    292289#if DEBUG_RPC_SERVER_GENERIC
     
    325322void rpc_thread_func()
    326323{
    327     uint32_t     count;       // handled RPC requests counter
    328     error_t      empty;       // local RPC fifo state
    329     xptr_t       desc_xp;     // extended pointer on RPC request
    330     cxy_t        desc_cxy;    // RPC request cluster (client)
    331     rpc_desc_t * desc_ptr;    // RPC request local pointer
    332     uint32_t     index;       // RPC request index
    333     thread_t   * thread_ptr;  // local pointer on client thread
    334     lid_t        core_lid;    // local index of client core
    335     bool_t       blocking;    // blocking RPC when true
     324    error_t         empty;              // local RPC fifo state
     325    xptr_t          desc_xp;            // extended pointer on RPC request
     326    cxy_t           desc_cxy;           // RPC request cluster (client)
     327    rpc_desc_t    * desc_ptr;           // RPC request local pointer
     328    uint32_t        index;              // RPC request index
     329    thread_t      * client_ptr;         // local pointer on client thread
     330        thread_t      * server_ptr;         // local pointer on server thread
     331    xptr_t          server_xp;          // extended pointer on server thread
     332    lid_t           client_core_lid;    // local index of client core
     333    lid_t           server_core_lid;    // local index of server core
     334    bool_t          blocking;           // blocking RPC when true
     335        remote_fifo_t * rpc_fifo;           // local pointer on RPC fifo
    336336 
    337337    // makes RPC thread not preemptable
    338338        hal_disable_irq( NULL );
    339339 
    340         thread_t      * this     = CURRENT_THREAD;
    341         remote_fifo_t * rpc_fifo = &LOCAL_CLUSTER->rpc_fifo;
     340        server_ptr      = CURRENT_THREAD;
     341    server_xp       = XPTR( local_cxy , server_ptr );
     342    server_core_lid = server_ptr->core->lid;
     343        rpc_fifo        = &LOCAL_CLUSTER->rpc_fifo[server_core_lid];
    342344
    343345    // two embedded loops:
    344346    // - external loop : "infinite" RPC thread
    345     // - internal loop : handle up to CONFIG_RPC_PENDING_MAX RPC requests
     347    // - internal loop : handle one RPC request per iteration
    346348 
    347349        while(1)  // infinite loop
    348350        {
    349351        // try to take RPC_FIFO ownership
    350         if( hal_atomic_test_set( &rpc_fifo->owner , this->trdid ) )
     352        if( hal_atomic_test_set( &rpc_fifo->owner , server_ptr->trdid ) )
    351353        {
    352354
     
    355357if( DEBUG_RPC_SERVER_GENERIC < cycle )
    356358printk("\n[DBG] %s : RPC thread %x in cluster %x takes RPC fifo ownership / cycle %d\n",
    357 __FUNCTION__, this, local_cxy, cycle );
    358 #endif
    359             // initializes RPC requests counter
    360             count = 0;
    361 
    362                     // exit internal loop in three cases:
    363             // - RPC fifo is empty
    364             // - ownership has been lost (because descheduling)
    365             // - max number of RPCs is reached
    366                 while( 1 )  // internal loop
     359__FUNCTION__, server_ptr, local_cxy, cycle );
     360#endif
     361                while( 1 )  //  one RPC request per iteration
    367362            {
    368363                    empty = local_fifo_get_item( rpc_fifo , (uint64_t *)&desc_xp );
    369364
    370                     if ( empty == 0 ) // one RPC request found
     365                // exit when FIFO empty or FIFO ownership lost (in case of descheduling)
     366                    if ( (empty == 0) && (rpc_fifo->owner == server_ptr->trdid) )
    371367                {
    372368                    // get client cluster and pointer on RPC descriptor
     
    381377if( DEBUG_RPC_SERVER_GENERIC < cycle )
    382378printk("\n[DBG] %s : RPC thread %x in cluster %x got rpc[%d] / rpc_cxy %x / rpc_ptr %x\n",
    383 __FUNCTION__, this, local_cxy, index, desc_cxy, desc_ptr );
     379__FUNCTION__, server_ptr, local_cxy, index, desc_cxy, desc_ptr );
    384380#endif
    385381                    // call the relevant server function
     
    390386if( DEBUG_RPC_SERVER_GENERIC < cycle )
    391387printk("\n[DBG] %s : RPC thread %x in cluster %x completes rpc[%d] / rpc_ptr %x / cycle %d\n",
    392 __FUNCTION__, this, local_cxy, index, desc_ptr, cycle );
    393 #endif
    394                     // increment handled RPCs counter
    395                         count++;
    396 
     388__FUNCTION__, server_ptr, local_cxy, index, desc_ptr, cycle );
     389#endif
    397390                    // decrement response counter in RPC descriptor if blocking
    398391                    if( blocking )
     
    402395
    403396                        // get client thread pointer and client core lid from RPC descriptor
    404                         thread_ptr = hal_remote_lpt( XPTR( desc_cxy , &desc_ptr->thread ) );
    405                         core_lid  = hal_remote_lw ( XPTR( desc_cxy , &desc_ptr->lid ) );
     397                        client_ptr      = hal_remote_lpt( XPTR( desc_cxy , &desc_ptr->thread ) );
     398                        client_core_lid = hal_remote_lw ( XPTR( desc_cxy , &desc_ptr->lid ) );
    406399
    407400                        // unblock client thread
    408                         thread_unblock( XPTR( desc_cxy , thread_ptr ) , THREAD_BLOCKED_RPC );
     401                        thread_unblock( XPTR( desc_cxy , client_ptr ) , THREAD_BLOCKED_RPC );
    409402
    410403                        hal_fence();
     
    414407if( DEBUG_RPC_SERVER_GENERIC < cycle )
    415408printk("\n[DBG] %s : RPC thread %x (cluster %x) unblocked client thread %x (cluster %x)\n",
    416 __FUNCTION__, this, local_cxy, thread_ptr, desc_cxy, cycle );
     409__FUNCTION__, server_ptr, local_cxy, client_ptr, desc_cxy, cycle );
    417410#endif
    418411                        // send IPI to client core
    419                             dev_pic_send_ipi( desc_cxy , core_lid );
     412                            dev_pic_send_ipi( desc_cxy , client_core_lid );
    420413                    }
    421414                        }
    422        
    423                 // chek exit condition
    424                         if( local_fifo_is_empty( rpc_fifo )  ||
    425                     (rpc_fifo->owner != this->trdid) ||
    426                     (count >= CONFIG_RPC_PENDING_MAX) ) break;
     415                else
     416                {
     417                    break;
     418                }
    427419                } // end internal loop
    428420
    429421            // release rpc_fifo ownership if not lost
    430             if( rpc_fifo->owner == this->trdid ) rpc_fifo->owner = 0;
     422            if( rpc_fifo->owner == server_ptr->trdid ) rpc_fifo->owner = 0;
    431423
    432424        }  // end if RPC fifo
    433425
    434         // sucide if too many RPC threads in cluster
    435         if( LOCAL_CLUSTER->rpc_threads >= CONFIG_RPC_THREADS_MAX )
     426        // RPC thread blocks on IDLE
     427        thread_block( server_xp , THREAD_BLOCKED_IDLE );
     428
     429        // sucide if too many RPC threads / simply deschedule otherwise
     430        if( LOCAL_CLUSTER->rpc_threads[server_core_lid] >= CONFIG_RPC_THREADS_MAX )
    436431            {
    437432
     
    440435if( DEBUG_RPC_SERVER_GENERIC < cycle )
    441436printk("\n[DBG] %s : RPC thread %x in cluster %x suicides / cycle %d\n",
    442 __FUNCTION__, this, local_cxy, cycle );
     437__FUNCTION__, server_ptr, local_cxy, cycle );
    443438#endif
    444439            // update RPC threads counter
    445440                hal_atomic_add( &LOCAL_CLUSTER->rpc_threads , -1 );
    446441
    447             // suicide
    448                 thread_kill( XPTR( local_cxy , this ),
    449                          true,                      // is_exit
    450                          true );                    // is forced
     442            // RPC thread blocks on GLOBAL
     443                thread_block( server_xp , THREAD_BLOCKED_GLOBAL );
     444
     445            // RPC thread set the REQ_DELETE flag to suicide
     446            hal_remote_atomic_or( server_xp , THREAD_FLAG_REQ_DELETE );
    451447            }
     448        else
     449        {
    452450
    453451#if DEBUG_RPC_SERVER_GENERIC
    454452uint32_t cycle = (uint32_t)hal_get_cycles();
    455453if( DEBUG_RPC_SERVER_GENERIC < cycle )
    456 printk("\n[DBG] %s : RPC thread %x in cluster %x deschedules / cycle %d\n",
    457 __FUNCTION__, this, local_cxy, cycle );
    458 #endif
    459 
    460         // Block and deschedule
    461         thread_block( XPTR( local_cxy , this ) , THREAD_BLOCKED_IDLE );
    462         sched_yield("RPC fifo empty or too much work");
    463 
    464 #if DEBUG_RPC_SERVER_GENERIC
    465 cycle = (uint32_t)hal_get_cycles();
    466 if( DEBUG_RPC_SERVER_GENERIC < cycle )
    467 printk("\n[DBG] %s : RPC thread %x in cluster %x resumes / cycle %d\n",
    468 __FUNCTION__, this, local_cxy, cycle );
    469 #endif
     454printk("\n[DBG] %s : RPC thread %x in cluster %x block & deschedules / cycle %d\n",
     455__FUNCTION__, server_ptr, local_cxy, cycle );
     456#endif
     457
     458            // RPC thread deschedules
     459            assert( thread_can_yield( server_ptr ) , __FUNCTION__, "illegal sched_yield\n" );
     460            sched_yield("RPC fifo empty");
     461        }
    470462
    471463        } // end infinite loop
     
    646638
    647639    // set input arguments in RPC descriptor 
    648     rpc.args[0] = (uint64_t)(intptr_t)ref_process_xp;
    649     rpc.args[1] = (uint64_t)(intptr_t)parent_thread_xp;
     640    rpc.args[0] = (uint64_t)ref_process_xp;
     641    rpc.args[1] = (uint64_t)parent_thread_xp;
    650642
    651643    // register RPC request in remote RPC fifo
     
    903895void rpc_process_sigaction_server( xptr_t xp )
    904896{
    905     pid_t        pid;              // target process identifier
    906     process_t  * process;          // pointer on local target process descriptor
    907     uint32_t     action;           // sigaction index
    908     thread_t   * client_thread;    // pointer on client thread in client cluster
    909     cxy_t        client_cxy;       // client cluster identifier
    910     rpc_desc_t * rpc;              // pointer on rpc descriptor in client cluster
    911     xptr_t       count_xp;         // extended pointer on response counter
    912     lid_t        client_lid;       // client core local index
     897    pid_t        pid;             // target process identifier
     898    process_t  * process;         // pointer on local target process descriptor
     899    uint32_t     action;          // sigaction index
     900    thread_t   * client_ptr;      // pointer on client thread in client cluster
     901    xptr_t       client_xp;       // extended pointer client thread
     902    cxy_t        client_cxy;      // client cluster identifier
     903    rpc_desc_t * rpc;             // pointer on rpc descriptor in client cluster
     904    xptr_t       count_xp;        // extended pointer on responses counter
     905    uint32_t     count_value;     // responses counter value
     906    lid_t        client_lid;      // client core local index
    913907
    914908    // get client cluster identifier and pointer on RPC descriptor
     
    927921#endif
    928922
     923    // get client thread pointers
     924    client_ptr = (thread_t *)hal_remote_lpt( XPTR( client_cxy , &rpc->thread ) );
     925    client_xp  = XPTR( client_cxy , client_ptr );
     926
    929927    // get local process descriptor
    930928    process = cluster_get_local_process_from_pid( pid );
    931929
    932930    // call relevant kernel function
    933     if      ( action == DELETE_ALL_THREADS  ) process_delete_threads ( process );
    934     else if ( action == BLOCK_ALL_THREADS   ) process_block_threads  ( process );
     931    if      ( action == DELETE_ALL_THREADS  ) process_delete_threads ( process , client_xp );
     932    else if ( action == BLOCK_ALL_THREADS   ) process_block_threads  ( process , client_xp );
    935933    else if ( action == UNBLOCK_ALL_THREADS ) process_unblock_threads( process );
    936934
     
    939937
    940938    // decrement the responses counter in RPC descriptor,
     939    count_value = hal_remote_atomic_add( count_xp , -1 );
     940
    941941    // unblock the client thread only if it is the last response.
    942     if( hal_remote_atomic_add( count_xp , -1 ) == 1 )
     942    if( count_value == 1 )
    943943    {
    944         // get client thread pointer and client core lid
    945         client_thread = (thread_t *)hal_remote_lpt( XPTR( client_cxy , &rpc->thread ) );
     944        // get client core lid
    946945        client_lid    = (lid_t)     hal_remote_lw ( XPTR( client_cxy , &rpc->lid    ) );
    947946
    948         thread_unblock( XPTR( client_cxy , client_thread ) , THREAD_BLOCKED_RPC );
     947        // unblock client thread
     948        thread_unblock( client_xp , THREAD_BLOCKED_RPC );
     949
     950        // send an IPI to client core
    949951        dev_pic_send_ipi( client_cxy , client_lid );
    950952    }
     
    11921194                                    vfs_dentry_t * dentry )
    11931195{
     1196#if DEBUG_RPC_VFS_DENTRY_DESTROY
     1197uint32_t cycle = (uint32_t)hal_get_cycles();
     1198if( cycle > DEBUG_RPC_VFS_DENTRY_DESTROY )
     1199printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n",
     1200__FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     1201#endif
     1202
    11941203    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    11951204
     
    12061215    rpc_send( cxy , &rpc );
    12071216
     1217#if DEBUG_RPC_VFS_DENTRY_DESTROY
     1218cycle = (uint32_t)hal_get_cycles();
     1219if( cycle > DEBUG_RPC_VFS_DENTRY_DESTROY )
     1220printk("\n[DBG] %s : thread %x exit / cycle %d\n",
     1221__FUNCTION__ , CURRENT_THREAD , cycle );
     1222#endif
    12081223}
    12091224
     
    12111226void rpc_vfs_dentry_destroy_server( xptr_t xp )
    12121227{
     1228#if DEBUG_RPC_VFS_DENTRY_DESTROY
     1229uint32_t cycle = (uint32_t)hal_get_cycles();
     1230if( cycle > DEBUG_RPC_VFS_DENTRY_DESTROY )
     1231printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n",
     1232__FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     1233#endif
     1234
    12131235    vfs_dentry_t * dentry;
    12141236
     
    12231245    vfs_dentry_destroy( dentry );
    12241246
     1247#if DEBUG_RPC_VFS_DENTRY_DESTROY
     1248cycle = (uint32_t)hal_get_cycles();
     1249if( cycle > DEBUG_RPC_VFS_DENTRY_DESTROY )
     1250printk("\n[DBG] %s : thread %x exit / cycle %d\n",
     1251__FUNCTION__ , CURRENT_THREAD , cycle );
     1252#endif
    12251253}
    12261254
     
    13191347                                  vfs_file_t * file )
    13201348{
     1349#if DEBUG_RPC_VFS_FILE_DESTROY
     1350uint32_t cycle = (uint32_t)hal_get_cycles();
     1351if( cycle > DEBUG_RPC_VFS_FILE_DESTROY )
     1352printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n",
     1353__FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     1354#endif
     1355
    13211356    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    13221357
     
    13331368    rpc_send( cxy , &rpc );
    13341369
     1370#if DEBUG_RPC_VFS_FILE_DESTROY
     1371cycle = (uint32_t)hal_get_cycles();
     1372if( cycle > DEBUG_RPC_VFS_FILE_DESTROY )
     1373printk("\n[DBG] %s : thread %x exit / cycle %d\n",
     1374__FUNCTION__ , CURRENT_THREAD , cycle );
     1375#endif
    13351376}
    13361377
     
    13381379void rpc_vfs_file_destroy_server( xptr_t xp )
    13391380{
     1381#if DEBUG_RPC_VFS_FILE_DESTROY
     1382uint32_t cycle = (uint32_t)hal_get_cycles();
     1383if( cycle > DEBUG_RPC_VFS_FILE_DESTROY )
     1384printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n",
     1385__FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     1386#endif
     1387
    13401388    vfs_file_t * file;
    13411389
     
    13501398    vfs_file_destroy( file );
    13511399
     1400#if DEBUG_RPC_VFS_FILE_DESTROY
     1401cycle = (uint32_t)hal_get_cycles();
     1402if( cycle > DEBUG_RPC_VFS_FILE_DESTROY )
     1403printk("\n[DBG] %s : thread %x exit / cycle %d\n",
     1404__FUNCTION__ , CURRENT_THREAD , cycle );
     1405#endif
    13521406}
    13531407
     
    15361590                              error_t   * error )      // out
    15371591{
     1592#if DEBUG_RPC_VMM_GET_VSEG
     1593uint32_t cycle = (uint32_t)hal_get_cycles();
     1594if( cycle > DEBUG_RPC_VMM_GET_VSEG )
     1595printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n",
     1596__FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     1597#endif
     1598
    15381599    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    15391600
     
    15551616    *error   = (error_t)rpc.args[3];
    15561617
     1618#if DEBUG_RPC_VMM_GET_VSEG
     1619cycle = (uint32_t)hal_get_cycles();
     1620if( cycle > DEBUG_RPC_VMM_GET_VSEG )
     1621printk("\n[DBG] %s : thread %x exit on core[%x,%d] / cycle %d\n",
     1622__FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     1623#endif
    15571624}
    15581625
     
    15601627void rpc_vmm_get_vseg_server( xptr_t xp )
    15611628{
     1629#if DEBUG_RPC_VMM_GET_VSEG
     1630uint32_t cycle = (uint32_t)hal_get_cycles();
     1631if( cycle > DEBUG_RPC_VMM_GET_VSEG )
     1632printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n",
     1633__FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     1634#endif
     1635
    15621636    process_t   * process;
    15631637    intptr_t      vaddr;
     
    15821656    hal_remote_swd( XPTR( client_cxy , &desc->args[3] ) , (uint64_t)error );
    15831657
    1584 }
    1585 
    1586 
    1587 /////////////////////////////////////////////////////////////////////////////////////////
    1588 // [21]          Marshaling functions attached to RPC_VMM_GET_PTE  (blocking)
     1658#if DEBUG_RPC_VMM_GET_VSEG
     1659cycle = (uint32_t)hal_get_cycles();
     1660if( cycle > DEBUG_RPC_VMM_GET_VSEG )
     1661printk("\n[DBG] %s : thread %x exit on core[%x,%d] / cycle %d\n",
     1662__FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     1663#endif
     1664}
     1665
     1666
     1667/////////////////////////////////////////////////////////////////////////////////////////
     1668// [21]          Marshaling functions attached to RPC_VMM_GET_VSEG  (blocking)
    15891669/////////////////////////////////////////////////////////////////////////////////////////
    15901670
     
    15981678                             error_t   * error )   // out
    15991679{
    1600     assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
    1601 
    1602     // initialise RPC descriptor header
    1603     rpc_desc_t  rpc;
    1604     rpc.index    = RPC_VMM_GET_PTE;
     1680#if DEBUG_RPC_VMM_GET_PTE
     1681uint32_t cycle = (uint32_t)hal_get_cycles();
     1682if( cycle > DEBUG_RPC_VMM_GET_PTE )
     1683printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n",
     1684__FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     1685#endif
     1686
     1687    assert( (cxy != local_cxy) , __FUNCTION__ , "target cluster is not remote\n");
     1688
     1689    // initialise RPC descriptor header
     1690    rpc_desc_t  rpc;
     1691    rpc.index    = RPC_VMM_GET_VSEG;
    16051692    rpc.blocking = true;
    16061693    rpc.responses = 1;
     
    16191706    *error = (error_t)rpc.args[5];
    16201707
     1708#if DEBUG_RPC_VMM_GET_PTE
     1709cycle = (uint32_t)hal_get_cycles();
     1710if( cycle > DEBUG_RPC_VMM_GET_PTE )
     1711printk("\n[DBG] %s : thread %x exit on core[%x,%d] / cycle %d\n",
     1712__FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     1713#endif
    16211714}
    16221715
     
    16241717void rpc_vmm_get_pte_server( xptr_t xp )
    16251718{
     1719#if DEBUG_RPC_VMM_GET_PTE
     1720uint32_t cycle = (uint32_t)hal_get_cycles();
     1721if( cycle > DEBUG_RPC_VMM_GET_PTE )
     1722printk("\n[DBG] %s : thread %x enter on core[%x,%d] / cycle %d\n",
     1723__FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     1724#endif
     1725
    16261726    process_t   * process;
    16271727    vpn_t         vpn;
     
    16481748    hal_remote_swd( XPTR( client_cxy , &desc->args[5] ) , (uint64_t)error );
    16491749
     1750#if DEBUG_RPC_VMM_GET_PTE
     1751cycle = (uint32_t)hal_get_cycles();
     1752if( cycle > DEBUG_RPC_VMM_GET_PTE )
     1753printk("\n[DBG] %s : thread %x exit on core[%x,%d] / cycle %d\n",
     1754__FUNCTION__ , CURRENT_THREAD , local_cxy, CURRENT_THREAD->core->lid , cycle );
     1755#endif
    16501756}
    16511757
Note: See TracChangeset for help on using the changeset viewer.