Ignore:
Timestamp:
Oct 4, 2018, 11:47:36 PM (6 years ago)
Author:
alain
Message:

Complete restructuration of kernel locks.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • trunk/kernel/kern/thread.c

    r531 r564  
    11/*
    2  * thread.c -  implementation of thread operations (user & kernel)
     2 * thread.c -   thread operations implementation (user & kernel)
    33 *
    44 * Author  Ghassan Almaless (2008,2009,2010,2011,2012)
    5  *         Alain Greiner (2016,2017)
     5 *         Alain Greiner (2016,2017,2018)
    66 *
    77 * Copyright (c) UPMC Sorbonne Universites
     
    4848//////////////////////////////////////////////////////////////////////////////////////
    4949
    50 extern process_t      process_zero;
     50extern process_t            process_zero;       // allocated in kernel_init.c
     51extern char               * lock_type_str[];    // allocated in kernel_init.c
     52extern chdev_directory_t    chdev_dir;          // allocated in kernel_init.c
    5153
    5254//////////////////////////////////////////////////////////////////////////////////////
     
    145147        cluster_t    * local_cluster = LOCAL_CLUSTER;
    146148
    147 #if DEBUG_THREAD_USER_INIT
     149#if DEBUG_THREAD_INIT
    148150uint32_t cycle = (uint32_t)hal_get_cycles();
    149 if( DEBUG_THREAD_USER_INIT < cycle )
    150 printk("\n[DBG] %s : thread %x enter to init thread %x in process %x / cycle %d\n",
    151 __FUNCTION__, CURRENT_THREAD, thread, process->pid , cycle );
    152 #endif
    153 
    154     // register new thread in process descriptor, and get a TRDID
    155     thread->type = type; // needed by process_register_thread.
    156     error = process_register_thread( process, thread , &trdid );
    157 
    158     if( error )
    159     {
    160         printk("\n[ERROR] in %s : cannot get TRDID\n", __FUNCTION__ );
    161         return EINVAL;
    162     }
     151if( DEBUG_THREAD_INIT < cycle )
     152printk("\n[DBG] %s : thread %x in process %x enter fot thread %x in process %x / cycle %d\n",
     153__FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid,
     154 thread, process->pid , cycle );
     155#endif
    163156
    164157    // compute thread descriptor size without kernel stack
     
    166159
    167160        // Initialize new thread descriptor
    168     thread->trdid           = trdid;
     161        thread->type            = type;
    169162    thread->quantum         = 0;            // TODO
    170163    thread->ticks_nr        = 0;            // TODO
     
    173166        thread->process         = process;
    174167
    175     thread->local_locks     = 0;
    176     thread->remote_locks    = 0;
    177 
    178 #if CONFIG_LOCKS_DEBUG
    179     list_root_init( &thread->locks_root ); 
    180     xlist_root_init( XPTR( local_cxy , &thread->xlocks_root ) );
     168    thread->busylocks       = 0;
     169
     170#if DEBUG_BUSYLOCK
     171    xlist_root_init( XPTR( local_cxy , &thread->busylocks_root ) );
    181172#endif
    182173
     
    194185    thread->blocked         = THREAD_BLOCKED_GLOBAL;
    195186
    196     // reset sched list
     187    // register new thread in process descriptor, and get a TRDID
     188    error = process_register_thread( process, thread , &trdid );
     189
     190    if( error )
     191    {
     192        printk("\n[ERROR] in %s : cannot get TRDID\n", __FUNCTION__ );
     193        return EINVAL;
     194    }
     195
     196    // initialize trdid
     197    thread->trdid           = trdid;
     198
     199    // initialize sched list
    197200    list_entry_init( &thread->sched_list );
    198201
    199     // reset thread info
     202    // initialize waiting queue entries
     203    list_entry_init( &thread->wait_list );
     204    xlist_entry_init( XPTR( local_cxy , &thread->wait_xlist ) );
     205
     206    // initialize thread info
    200207    memset( &thread->info , 0 , sizeof(thread_info_t) );
    201208
    202     // initializes join_lock
    203     remote_spinlock_init( XPTR( local_cxy , &thread->join_lock ) );
     209    // initialize join_lock
     210    remote_busylock_init( XPTR( local_cxy , &thread->join_lock ), LOCK_THREAD_JOIN );
    204211
    205212    // initialise signature
     
    216223    dqdt_update_threads( 1 );
    217224
    218 #if DEBUG_THREAD_USER_INIT
     225#if DEBUG_THREAD_INIT
    219226cycle = (uint32_t)hal_get_cycles();
    220 if( DEBUG_THREAD_USER_INIT < cycle )
    221 printk("\n[DBG] %s : thread %x exit  after init of thread %x in process %x / cycle %d\n",
    222 __FUNCTION__, CURRENT_THREAD, thread, process->pid , cycle );
     227if( DEBUG_THREAD_INIT < cycle )
     228printk("\n[DBG] %s : thread %x in process %x exit for thread %x in process %x / cycle %d\n",
     229__FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid,
     230thread, process->pid , cycle );
    223231#endif
    224232
     
    436444    args  = (void *)  hal_remote_lpt( XPTR( parent_cxy , &parent_ptr->entry_args    ));
    437445    base  = (intptr_t)hal_remote_lpt( XPTR( parent_cxy , &parent_ptr->u_stack_base  ));
    438     size  = (uint32_t)hal_remote_lw ( XPTR( parent_cxy , &parent_ptr->u_stack_size  ));
    439     flags =           hal_remote_lw ( XPTR( parent_cxy , &parent_ptr->flags         ));
     446    size  = (uint32_t)hal_remote_l32 ( XPTR( parent_cxy , &parent_ptr->u_stack_size  ));
     447    flags =           hal_remote_l32 ( XPTR( parent_cxy , &parent_ptr->flags         ));
    440448    uzone = (reg_t *) hal_remote_lpt( XPTR( parent_cxy , &parent_ptr->uzone_current ));
    441449
     
    474482    }
    475483
     484#if (DEBUG_THREAD_USER_FORK & 1)
     485if( DEBUG_THREAD_USER_FORK < cycle )
     486printk("\n[DBG] %s : thread %x in process %x / initialised thread %x in process %x\n",
     487__FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid,
     488child_ptr->trdid, child_process->pid );
     489#endif
     490
    476491    // return child pointer
    477492    *child_thread = child_ptr;
     
    502517    }
    503518
    504     // create and initialize STACK vseg
     519#if (DEBUG_THREAD_USER_FORK & 1)
     520if( DEBUG_THREAD_USER_FORK < cycle )
     521printk("\n[DBG] %s : thread %x in process %x / created CPU & FPU contexts\n",
     522__FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid );
     523#endif
     524
     525   // create and initialize STACK vseg
    505526    vseg = vseg_alloc();
    506527    vseg_init( vseg,
     
    514535
    515536    // register STACK vseg in local child VSL
    516     vseg_attach( &child_process->vmm , vseg );
     537    vmm_vseg_attach( &child_process->vmm , vseg );
     538
     539#if (DEBUG_THREAD_USER_FORK & 1)
     540if( DEBUG_THREAD_USER_FORK < cycle )
     541printk("\n[DBG] %s : thread %x in process %x / created stack vseg\n",
     542__FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid );
     543#endif
    517544
    518545    // copy all valid STACK GPT entries   
     
    530557        if( error )
    531558        {
    532             vseg_detach( vseg );
     559            vmm_vseg_detach( &child_process->vmm , vseg );
    533560            vseg_free( vseg );
    534561            thread_release( child_ptr );
     
    549576            xptr_t lock_xp  = XPTR( page_cxy , &page_ptr->lock );
    550577
    551             // increment the forks counter
    552             remote_spinlock_lock( lock_xp ); 
     578            // get lock protecting page
     579            remote_busylock_acquire( lock_xp ); 
     580
     581            // increment the forks counter in page descriptor
    553582            hal_remote_atomic_add( forks_xp , 1 );
    554             remote_spinlock_unlock( lock_xp ); 
     583
     584            // release lock protecting page
     585            remote_busylock_release( lock_xp ); 
    555586
    556587#if (DEBUG_THREAD_USER_FORK & 1)
     
    559590printk("\n[DBG] %s : thread %x in process %x copied one PTE to child GPT : vpn %x / forks %d\n",
    560591__FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid, vpn,
    561 hal_remote_lw( XPTR( page_cxy , &page_ptr->forks) ) );
     592hal_remote_l32( XPTR( page_cxy , &page_ptr->forks) ) );
    562593#endif
    563594
     
    596627#endif
    597628
    598         assert( (thread->type == THREAD_USER )          , "bad type" );
    599         assert( (thread->signature == THREAD_SIGNATURE) , "bad signature" );
    600         assert( (thread->local_locks == 0)              , "bad local locks" );
    601         assert( (thread->remote_locks == 0)             , "bad remote locks" );
     629// check parent thread attributes
     630assert( (thread->type == THREAD_USER )          , "bad type" );
     631assert( (thread->signature == THREAD_SIGNATURE) , "bad signature" );
     632assert( (thread->busylocks == 0)                , "bad busylocks" );
    602633
    603634        // re-initialize various thread descriptor fields
     
    605636    thread->ticks_nr        = 0;            // TODO
    606637    thread->time_last_check = 0;            // TODO
    607 
    608 #if CONFIG_LOCKS_DEBUG
    609     list_root_init( &thread->locks_root ); 
    610     xlist_root_init( XPTR( local_cxy , &thread->xlocks_root ) );
    611 #endif
    612638
    613639    thread->entry_func      = entry_func;
     
    622648    thread->fork_cxy        = 0;    // not inherited
    623649
     650    // re-initialize busylocks counters
     651    thread->busylocks       = 0;
     652
    624653    // reset thread info
    625654    memset( &thread->info , 0 , sizeof(thread_info_t) );
    626655
    627     // initialize join_lock
    628     remote_spinlock_init( XPTR( local_cxy , &thread->join_lock ) );
     656    // re-initialize join_lock
     657    remote_busylock_init( XPTR( local_cxy , &thread->join_lock ), LOCK_THREAD_JOIN );
    629658
    630659    // allocate an user stack vseg for main thread
     
    664693        hal_cpu_context_exec( thread );
    665694
    666     assert( false, "we should execute this code");
     695    assert( false, "we should not execute this code");
    667696 
    668697    return 0;
     
    742771                           lid_t           core_lid )
    743772{
    744     assert( (type == THREAD_IDLE) , "illegal thread type" );
    745     assert( (core_lid < LOCAL_CLUSTER->cores_nr) , "illegal core index" );
     773
     774// check arguments
     775assert( (type == THREAD_IDLE) , "illegal thread type" );
     776assert( (core_lid < LOCAL_CLUSTER->cores_nr) , "illegal core index" );
    746777
    747778    // initialize thread descriptor
     
    784815#endif
    785816
    786     assert( (thread->local_locks == 0) ,
    787     "local lock not released for thread %x in process %x", thread->trdid, process->pid );
    788 
    789     assert( (thread->remote_locks == 0) ,
    790     "remote lock not released for thread %x in process %x", thread->trdid, process->pid );
     817// check busylocks counter
     818assert( (thread->busylocks == 0) ,
     819"busylock not released for thread %x in process %x", thread->trdid, process->pid );
    791820
    792821    // update intrumentation values
     
    890919}  // thread_reset_req_ack()
    891920
    892 ////////////////////////////////
    893 inline bool_t thread_can_yield( void )
    894 {
    895     thread_t * this = CURRENT_THREAD;
    896     return (this->local_locks == 0) && (this->remote_locks == 0);
    897 }
    898 
    899 /////////////////////////
    900 void thread_check_sched( void )
    901 {
    902     thread_t * this = CURRENT_THREAD;
    903 
    904         if( (this->local_locks == 0) &&
    905         (this->remote_locks == 0) &&
    906         (this->flags & THREAD_FLAG_SCHED) )
    907     {
    908         this->flags &= ~THREAD_FLAG_SCHED;
    909         sched_yield( "delayed scheduling" );
    910     }
    911 
    912 }  // end thread_check_sched()
    913 
    914921//////////////////////////////////////
    915922void thread_block( xptr_t   thread_xp,
     
    930937printk("\n[DBG] %s : thread %x in process %x blocked thread %x in process %x / cause %x\n",
    931938__FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid,
    932 ptr->trdid, hal_remote_lw(XPTR( cxy , &process->pid )), cause );
     939ptr->trdid, hal_remote_l32(XPTR( cxy , &process->pid )), cause );
    933940#endif
    934941
     
    953960printk("\n[DBG] %s : thread %x in process %x unblocked thread %x in process %x / cause %x\n",
    954961__FUNCTION__, CURRENT_THREAD->trdid, CURRENT_THREAD->process->pid,
    955 ptr->trdid, hal_remote_lw(XPTR( cxy , &process->pid )), cause );
     962ptr->trdid, hal_remote_l32(XPTR( cxy , &process->pid )), cause );
    956963#endif
    957964
     
    974981    thread_t  * target_ptr;             // pointer on target thread
    975982    xptr_t      target_flags_xp;        // extended pointer on target thread <flags>
    976     uint32_t    target_flags;           // target thread <flags> value
    977983    xptr_t      target_join_lock_xp;    // extended pointer on target thread <join_lock>
    978984    xptr_t      target_join_xp_xp;      // extended pointer on target thread <join_xp>
     
    982988    thread_t  * joining_ptr;            // pointer on joining thread
    983989    cxy_t       joining_cxy;            // joining thread cluster
    984     cxy_t       owner_cxy;              // process owner cluster
    985 
    986 
    987     // get target thread pointers, identifiers, and flags
     990
     991    // get target thread cluster and local pointer
    988992    target_cxy      = GET_CXY( target_xp );
    989993    target_ptr      = GET_PTR( target_xp );
    990     target_trdid    = hal_remote_lw( XPTR( target_cxy , &target_ptr->trdid ) );
     994
     995    // get target thread identifiers, and attached flag
     996    target_trdid    = hal_remote_l32( XPTR( target_cxy , &target_ptr->trdid ) );
    991997    target_ltid     = LTID_FROM_TRDID( target_trdid );
    992998    target_flags_xp = XPTR( target_cxy , &target_ptr->flags );
    993     target_flags    = hal_remote_lw( target_flags_xp );
     999    target_attached = ( (hal_remote_l32( target_flags_xp ) & THREAD_FLAG_DETACHED) == 0 );
    9941000
    9951001    // get killer thread pointers
     
    9981004
    9991005#if DEBUG_THREAD_DELETE
    1000 uint32_t cycle  = (uint32_t)hal_get_cycles;
     1006uint32_t cycle  = (uint32_t)hal_get_cycles();
    10011007if( DEBUG_THREAD_DELETE < cycle )
    1002 printk("\n[DBG] %s : killer thread %x enter for target thread %x / cycle %d\n",
    1003 __FUNCTION__, killer_ptr, target_ptr, cycle );
    1004 #endif
    1005 
    1006     // target thread cannot be the main thread, because the main thread
    1007     // must be deleted by the parent process sys_wait() function
    1008     owner_cxy = CXY_FROM_PID( pid );
    1009     assert( ((owner_cxy != target_cxy) || (target_ltid != 0)),
    1010     "tharget thread cannot be the main thread\n" );
     1008printk("\n[DBG] %s : thread %x in process %x enters / target thread %x / cycle %d\n",
     1009__FUNCTION__, killer_ptr->trdid, killer_ptr->process->pid, target_ptr->trdid, cycle );
     1010#endif
     1011
     1012// check killer thread can yield
     1013assert( (killer_ptr->busylocks == 0),
     1014"cannot yield : busylocks = %d\n", killer_ptr->busylocks );
     1015
     1016// check target thread is not the main thread, because the main thread
     1017// must be deleted by the parent process sys_wait() function
     1018assert( ((CXY_FROM_PID( pid ) != target_cxy) || (target_ltid != 0)),
     1019"tharget thread cannot be the main thread\n" );
    10111020
    10121021    // block the target thread
    10131022    thread_block( target_xp , THREAD_BLOCKED_GLOBAL );
    10141023
    1015     // get attached from target flag descriptor
    1016     target_attached = ((hal_remote_lw( target_flags_xp ) & THREAD_FLAG_DETACHED) != 0);
    1017 
    1018     // synchronize with the joining thread if the target thread is attached
    1019     if( target_attached && (is_forced == false) )
    1020     {
     1024    // synchronize with the joining thread if attached
     1025    if( target_attached && (is_forced == false) )
     1026    {
     1027
     1028#if (DEBUG_THREAD_DELETE & 1)
     1029if( DEBUG_THREAD_DELETE < cycle )
     1030printk("\n[DBG] %s : thread %x in process %x / target thread is attached\n",
     1031__FUNCTION__, killer_ptr->trdid, killer_ptr->process->pid );
     1032#endif
    10211033        // build extended pointers on target thread join fields
    10221034        target_join_lock_xp  = XPTR( target_cxy , &target_ptr->join_lock );
     
    10271039
    10281040        // take the join_lock in target thread descriptor
    1029         remote_spinlock_lock( target_join_lock_xp );
     1041        remote_busylock_acquire( target_join_lock_xp );
    10301042
    10311043        // get join_done from target thread descriptor
    1032         target_join_done = ((hal_remote_lw( target_flags_xp ) & THREAD_FLAG_JOIN_DONE) != 0);
     1044        target_join_done = ((hal_remote_l32( target_flags_xp ) & THREAD_FLAG_JOIN_DONE) != 0);
    10331045   
    10341046        if( target_join_done )  // joining thread arrived first => unblock the joining thread
    10351047        {
     1048
     1049#if (DEBUG_THREAD_DELETE & 1)
     1050if( DEBUG_THREAD_DELETE < cycle )
     1051printk("\n[DBG] %s : thread %x in process %x / joining thread arrived first\n",
     1052__FUNCTION__, killer_ptr->trdid, killer_ptr->process->pid );
     1053#endif
    10361054            // get extended pointer on joining thread
    1037             joining_xp  = (xptr_t)hal_remote_lwd( target_join_xp_xp );
     1055            joining_xp  = (xptr_t)hal_remote_l64( target_join_xp_xp );
    10381056            joining_ptr = GET_PTR( joining_xp );
    10391057            joining_cxy = GET_CXY( joining_xp );
     
    10461064
    10471065            // release the join_lock in target thread descriptor
    1048             remote_spinlock_unlock( target_join_lock_xp );
     1066            remote_busylock_release( target_join_lock_xp );
     1067
     1068            // set the REQ_DELETE flag in target thread descriptor
     1069            hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_REQ_DELETE );
    10491070
    10501071            // restore IRQs
    10511072            hal_restore_irq( save_sr );
    10521073        }
    1053         else                // this thread arrived first => register flags and deschedule
     1074        else                // killer thread arrived first => register flags and deschedule
    10541075        {
     1076
     1077#if (DEBUG_THREAD_DELETE & 1)
     1078if( DEBUG_THREAD_DELETE < cycle )
     1079printk("\n[DBG] %s : thread %x in process %x / killer thread arrived first\n",
     1080__FUNCTION__, killer_ptr->trdid, killer_ptr->process->pid );
     1081#endif
    10551082            // set the kill_done flag in target thread
    10561083            hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_KILL_DONE );
     
    10601087
    10611088            // set extended pointer on killer thread in target thread
    1062             hal_remote_swd( target_join_xp_xp , killer_xp );
     1089            hal_remote_s64( target_join_xp_xp , killer_xp );
    10631090
    10641091            // release the join_lock in target thread descriptor
    1065             remote_spinlock_unlock( target_join_lock_xp );
    1066 
     1092            remote_busylock_release( target_join_lock_xp );
     1093
     1094#if (DEBUG_THREAD_DELETE & 1)
     1095if( DEBUG_THREAD_DELETE < cycle )
     1096printk("\n[DBG] %s : thread %x in process %x / killer thread deschedule\n",
     1097__FUNCTION__, killer_ptr->trdid, killer_ptr->process->pid );
     1098#endif
    10671099            // deschedule
    10681100            sched_yield( "killer thread wait joining thread" );
     1101
     1102#if (DEBUG_THREAD_DELETE & 1)
     1103if( DEBUG_THREAD_DELETE < cycle )
     1104printk("\n[DBG] %s : thread %x in process %x / killer thread resume\n",
     1105__FUNCTION__, killer_ptr->trdid, killer_ptr->process->pid );
     1106#endif
     1107            // set the REQ_DELETE flag in target thread descriptor
     1108            hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_REQ_DELETE );
    10691109
    10701110            // restore IRQs
    10711111            hal_restore_irq( save_sr );
    10721112        }
    1073     }  // end if attached
    1074 
    1075     // set the REQ_DELETE flag in target thread descriptor
    1076     hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_REQ_DELETE );
     1113    }
     1114    else                                                   // target thread not attached
     1115    {
     1116        // set the REQ_DELETE flag in target thread descriptor
     1117        hal_remote_atomic_or( target_flags_xp , THREAD_FLAG_REQ_DELETE );
     1118    }
    10771119
    10781120#if DEBUG_THREAD_DELETE
    10791121cycle  = (uint32_t)hal_get_cycles;
    10801122if( DEBUG_THREAD_DELETE < cycle )
    1081 printk("\n[DBG] %s : killer thread %x exit for target thread %x / cycle %d\n",
    1082 __FUNCTION__, killer_ptr, target_ptr, cycle );
     1123printk("\n[DBG] %s : thread %x in process %x exit / target thread %x / cycle %d\n",
     1124__FUNCTION__, killer_ptr->trdid, killer_ptr->process->pid, target_ptr->trdid, cycle );
    10831125#endif
    10841126
     
    10871129
    10881130
    1089 ///////////////////////
     1131/////////////////////////////
    10901132void thread_idle_func( void )
    10911133{
    1092 
    1093 #if DEBUG_THREAD_IDLE
    1094 uint32_t cycle;
    1095 #endif
    1096 
    10971134    while( 1 )
    10981135    {
     
    11041141        {
    11051142
    1106 #if (DEBUG_THREAD_IDLE & 1)
    1107 cycle  = (uint32_t)hal_get_cycles;
     1143#if DEBUG_THREAD_IDLE
     1144{
     1145uint32_t cycle = (uint32_t)hal_get_cycles();
    11081146if( DEBUG_THREAD_IDLE < cycle )
    11091147printk("\n[DBG] %s : idle thread on core[%x,%d] goes to sleep / cycle %d\n",
    11101148__FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, cycle );
     1149}
    11111150#endif
    11121151
    11131152            hal_core_sleep();
    11141153
    1115 #if (DEBUG_THREAD_IDLE & 1)
    1116 cycle  = (uint32_t)hal_get_cycles;
     1154#if DEBUG_THREAD_IDLE
     1155{
     1156uint32_t cycle = (uint32_t)hal_get_cycles();
    11171157if( DEBUG_THREAD_IDLE < cycle )
    11181158printk("\n[DBG] %s : idle thread on core[%x,%d] wake up / cycle %d\n",
    11191159__FUNCTION__, local_cxy, CURRENT_THREAD->core->lid, cycle );
     1160}
    11201161#endif
    11211162
     
    11231164
    11241165#if DEBUG_THREAD_IDLE
     1166{
     1167uint32_t cycle = (uint32_t)hal_get_cycles();
     1168if( DEBUG_THREAD_IDLE < cycle )
    11251169sched_display( CURRENT_THREAD->core->lid );
     1170}
    11261171#endif     
    1127 
    11281172        // search a runable thread
    1129         sched_yield( "IDLE" );
    1130     }
     1173        sched_yield( "running idle thread" );
     1174
     1175    } // end while
     1176
    11311177}  // end thread_idle()
    11321178
     
    11341180///////////////////////////////////////////
    11351181void thread_time_update( thread_t * thread,
    1136                          uint32_t   is_user )
     1182                         bool_t     is_user )
    11371183{
    11381184    cycle_t current_cycle;   // current cycle counter value
     
    11541200    if( is_user ) info->usr_cycles += (current_cycle - last_cycle);
    11551201    else          info->sys_cycles += (current_cycle - last_cycle);
    1156 }
     1202
     1203}  // end thread_time_update()
    11571204
    11581205/////////////////////////////////////
     
    11741221
    11751222    // check trdid argument
    1176         if( (target_thread_ltid >= CONFIG_THREAD_MAX_PER_CLUSTER) ||
     1223        if( (target_thread_ltid >= CONFIG_THREADS_MAX_PER_CLUSTER) ||
    11771224        cluster_is_undefined( target_cxy ) )         return XPTR_NULL;
    11781225
     
    11821229                       sizeof(xlist_entry_t) );
    11831230
    1184     // get extended pointer on lock protecting the list of processes
     1231    // get extended pointer on lock protecting the list of local processes
    11851232    lock_xp = XPTR( target_cxy , &LOCAL_CLUSTER->pmgr.local_lock );
    11861233
    11871234    // take the lock protecting the list of processes in target cluster
    1188     remote_spinlock_lock( lock_xp );
    1189 
    1190     // loop on list of process in target cluster to find the PID process
     1235    remote_queuelock_acquire( lock_xp );
     1236
     1237    // scan the list of local processes in target cluster
    11911238    xptr_t  iter;
    11921239    bool_t  found = false;
     
    11951242        target_process_xp  = XLIST_ELEMENT( iter , process_t , local_list );
    11961243        target_process_ptr = GET_PTR( target_process_xp );
    1197         target_process_pid = hal_remote_lw( XPTR( target_cxy , &target_process_ptr->pid ) );
     1244        target_process_pid = hal_remote_l32( XPTR( target_cxy , &target_process_ptr->pid ) );
    11981245        if( target_process_pid == pid )
    11991246        {
     
    12041251
    12051252    // release the lock protecting the list of processes in target cluster
    1206     remote_spinlock_unlock( lock_xp );
     1253    remote_queuelock_release( lock_xp );
    12071254
    12081255    // check PID found
     
    12161263
    12171264    return XPTR( target_cxy , target_thread_ptr );
     1265
     1266}  // end thread_get_xptr()
     1267
     1268///////////////////////////////////////////////////
     1269void thread_assert_can_yield( thread_t    * thread,
     1270                              const char  * func_str )
     1271{
     1272    // does nothing if thread does not hold any busylock
     1273
     1274    if( thread->busylocks )
     1275    {
     1276        // get pointers on TXT0 chdev
     1277        xptr_t    txt0_xp  = chdev_dir.txt_tx[0];
     1278        cxy_t     txt0_cxy = GET_CXY( txt0_xp );
     1279        chdev_t * txt0_ptr = GET_PTR( txt0_xp );
     1280
     1281        // get extended pointer on TXT0 lock
     1282        xptr_t  txt0_lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
     1283
     1284        // get TXT0 lock
     1285        remote_busylock_acquire( txt0_lock_xp );
     1286
     1287        // display error message on TXT0
     1288        nolock_printk("\n[PANIC] in %s / thread %x in process %x [%x] cannot yield : "
     1289        "%d busylock(s) / cycle %d\n",
     1290        func_str, thread->trdid, thread->process->pid, thread,
     1291        thread->busylocks, (uint32_t)hal_get_cycles() );
     1292
     1293#if DEBUG_BUSYLOCK
     1294if( XPTR( local_cxy , thread ) == DEBUG_BUSYLOCK_THREAD_XP )
     1295{
     1296    // get root of list of taken busylocks
     1297    xptr_t    root_xp  = XPTR( local_cxy , &thread->busylocks_root );
     1298    xptr_t    iter_xp;
     1299
     1300    // scan list of busylocks
     1301    XLIST_FOREACH( root_xp , iter_xp )
     1302    {
     1303        xptr_t       lock_xp   = XLIST_ELEMENT( iter_xp , busylock_t , xlist );
     1304        cxy_t        lock_cxy  = GET_CXY( lock_xp );
     1305        busylock_t * lock_ptr  = GET_PTR( lock_xp );
     1306        uint32_t     lock_type = hal_remote_l32( XPTR( lock_cxy , &lock_ptr->type ) );
     1307        nolock_printk(" - %s in cluster %x\n", lock_type_str[lock_type] , lock_cxy );
     1308    }
    12181309}
    1219 
     1310#endif
     1311
     1312        // release TXT0 lock
     1313        remote_busylock_release( txt0_lock_xp );
     1314
     1315        // suicide
     1316        hal_core_sleep();
     1317    }
     1318}  // end thread_assert_can yield()
     1319
     1320#if DEBUG_BUSYLOCK
     1321
     1322////////////////////////////////////////////////////
     1323void thread_display_busylocks( uint32_t   lock_type,
     1324                               bool_t     is_acquire )
     1325{
     1326    xptr_t    iter_xp;
     1327
     1328    // get cluster and local pointer of target thread
     1329    cxy_t      thread_cxy = GET_CXY( DEBUG_BUSYLOCK_THREAD_XP );
     1330    thread_t * thread_ptr = GET_PTR( DEBUG_BUSYLOCK_THREAD_XP );
     1331
     1332    // get extended pointer on root of busylocks
     1333    xptr_t    root_xp = XPTR( thread_cxy , &thread_ptr->busylocks_root );
     1334
     1335   // get pointers on TXT0 chdev
     1336    xptr_t    txt0_xp  = chdev_dir.txt_tx[0];
     1337    cxy_t     txt0_cxy = GET_CXY( txt0_xp );
     1338    chdev_t * txt0_ptr = GET_PTR( txt0_xp );
     1339
     1340    // get extended pointer on remote TXT0 lock
     1341    xptr_t  txt0_lock_xp = XPTR( txt0_cxy , &txt0_ptr->wait_lock );
     1342
     1343    // get TXT0 lock
     1344    remote_busylock_acquire( txt0_lock_xp );
     1345
     1346    if( is_acquire )
     1347    {
     1348        nolock_printk("\n### thread [%x,%x] ACQUIRE lock %s / root %x / locks :\n",
     1349        thread_cxy, thread_ptr, lock_type_str[lock_type], GET_PTR(root_xp) );
     1350    }
     1351    else
     1352    {
     1353        nolock_printk("\n### thread [%x,%x] RELEASE lock %s / root %x / locks :\n",
     1354        thread_cxy, thread_ptr, lock_type_str[lock_type], GET_PTR(root_xp) );
     1355    }
     1356
     1357    int i;
     1358
     1359    XLIST_FOREACH( root_xp , iter_xp )
     1360    {
     1361        xptr_t       ilock_xp   = XLIST_ELEMENT( iter_xp , busylock_t , xlist );
     1362        cxy_t        ilock_cxy  = GET_CXY( ilock_xp );
     1363        busylock_t * ilock_ptr  = GET_PTR( ilock_xp );
     1364        uint32_t     ilock_type = hal_remote_l32( XPTR( ilock_cxy , &ilock_ptr->type ) );
     1365        nolock_printk(" - %s in cluster %x\n", lock_type_str[ilock_type] , ilock_cxy );
     1366    }
     1367
     1368    // release TXT0 lock
     1369    remote_busylock_release( txt0_lock_xp );
     1370}
     1371#endif
Note: See TracChangeset for help on using the changeset viewer.