Changeset 409 for trunk/kernel/libk


Ignore:
Timestamp:
Dec 20, 2017, 4:51:09 PM (7 years ago)
Author:
alain
Message:

Fix bugs in exec

Location:
trunk/kernel/libk
Files:
8 edited

Legend:

Unmodified
Added
Removed
  • trunk/kernel/libk/remote_rwlock.c

    r337 r409  
    4040    hal_remote_sw ( XPTR( lock_cxy , &lock_ptr->current ) , 0 );
    4141    hal_remote_sw ( XPTR( lock_cxy , &lock_ptr->count )   , 0 );
     42
     43#if CONFIG_LOCKS_DEBUG
    4244    hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner )   , XPTR_NULL );
     45    xlist_entry_init( XPTR( lock_cxy , &lock_ptr->list ) );
     46#endif
     47
    4348}
    4449
     
    5358    cxy_t             lock_cxy = GET_CXY( lock_xp );
    5459
    55     // get cluster and local pointer on local thread
     60    // get local pointer on local thread
    5661    thread_t          * thread_ptr = CURRENT_THREAD;
    5762
     
    8186    thread_ptr->remote_locks++;
    8287
     88#if CONFIG_LOCKS_DEBUG
     89    xlist_add_first( XPTR( local_cxy , &thread_ptr->xlocks_root ) ,
     90                     XPTR( lock_cxy ,  &lock_ptr->list ) );
     91#endif
     92
    8393    // sync
    8494    hal_fence();
     
    115125    // decrement thread.remote_locks
    116126        thread_ptr->remote_locks--;
     127
     128#if CONFIG_LOCKS_DEBUG
     129    xlist_unlink( XPTR( lock_cxy , &lock->ptr->list ) );
     130#endif
    117131
    118132    // enable interrupts
     
    134148    cxy_t             lock_cxy = GET_CXY( lock_xp );
    135149
    136     // get cluster and local pointer on local thread
    137     cxy_t               thread_cxy = local_cxy;
     150    // get local pointer on local thread
    138151    thread_t          * thread_ptr = CURRENT_THREAD;
    139152
     
    142155    xptr_t              count_xp   = XPTR( lock_cxy   , &lock_ptr->count );
    143156    xptr_t              current_xp = XPTR( lock_cxy   , &lock_ptr->current );
    144     xptr_t              owner_xp   = XPTR( lock_cxy   , &lock_ptr->owner );
    145     xptr_t              thread_xp  = XPTR( thread_cxy , thread_ptr );
    146157
    147158    // disable interrupts
     
    165176    }
    166177
    167     // register owner thread
    168     hal_remote_swd( owner_xp , thread_xp );
     178#if CONFIG_LOCKS_DEBUG
     179    hal_remote_swd( XPTR( lock_cxy  , &lock_ptr->owner ) ,
     180                    XPTR( local_cxy , thread_ptr ) );
     181    xlist_add_first( XPTR( local_cxy , &thread_ptr->xlocks_root ) ,
     182                     XPTR( lock_cxy  , &lock_ptr->list ) );
     183#endif   
    169184
    170185    // increment thread.remote_locks
     
    188203    thread_t          * thread_ptr = CURRENT_THREAD;
    189204
    190     // compute extended pointers on lock->ticket, lock->owner
     205    // compute extended pointer on lock->ticket
    191206    xptr_t              current_xp = XPTR( lock_cxy   , &lock_ptr->current );
    192     xptr_t              owner_xp   = XPTR( lock_cxy   , &lock_ptr->owner );
    193207
    194208    // disable interrupts
    195209        hal_disable_irq( &mode );
    196210 
    197     // unregister owner thread, and release lock
    198     hal_remote_swd( owner_xp , XPTR_NULL );
     211#if CONFIG_LOCKS_OWNER
     212    hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner ) , XPTR_NULL );
     213    xlist_unlink( XPTR( lock_cxy , &lock_ptr->list ) );
     214#endif
     215
     216    // release lock
    199217    hal_remote_atomic_add( current_xp , 1 );
    200218
     
    217235    uint32_t     current;               // ticket index of current owner
    218236    uint32_t     count;                 // current number of reader threads
    219     xptr_t       owner;                 // extended pointer on writer thread
    220237
    221238    // get cluster and local pointer on remote_rwlock
     
    226243    current = hal_remote_lw ( XPTR( lock_cxy , &lock_ptr->current ) );
    227244    count   = hal_remote_lw ( XPTR( lock_cxy , &lock_ptr->count ) );
    228     owner   = hal_remote_lwd( XPTR( lock_cxy , &lock_ptr->owner ) );
    229 
    230     printk("\n*** rwlock <%l> %s : ticket = %d / current = %d / count = %d / owner = %l\n",
    231            lock_xp , comment , ticket , current , count , owner );
     245
     246    printk("\n*** rwlock <%l> %s : ticket = %d / current = %d / count = %d\n",
     247           lock_xp , comment , ticket , current , count );
    232248
    233249}  // end remote_rwlock_print()
  • trunk/kernel/libk/remote_rwlock.h

    r50 r409  
    4040 *   accesses before starting its own access.
    4141 * When the lock is taken by another thread, the new-comers use a busy waiting policy.
    42  *
    43  * It uses a busy-waiting policy if the lock is already allocated to another thread.
    4442 **************************************************************************************/
    4543
    4644typedef struct remote_rwlock_s
    4745{
    48     uint32_t     ticket;                /*! first free ticket index                   */
    49     uint32_t     current;               /*! ticket index of current owner             */
    50     uint32_t     count;                 /*! current number of reader threads          */
    51     xptr_t       owner;                 /*! extended pointer on writer thread         */
     46    uint32_t       ticket;          /*! first free ticket index                       */
     47    uint32_t       current;         /*! ticket index of current owner                 */
     48    uint32_t       count;           /*! current number of reader threads              */
     49
     50#if CONFIG_LOCKS_DEBUG
     51    xptr_t         owner;           /*! extended pointer on writer thread             */
     52    xlist_entry_t  list;            /*! member of list of remote locks taken by owner */
     53#endif
     54
    5255}
    5356remote_rwlock_t;
  • trunk/kernel/libk/remote_spinlock.c

    r408 r409  
    3838
    3939        hal_remote_sw ( XPTR( cxy , &ptr->taken ) , 0 );
     40
     41#if CONFIG_LOCKS_CONFIG
    4042        hal_remote_swd( XPTR( cxy , &ptr->owner ) , XPTR_NULL );
    4143        xlist_entry_init( XPTR( cxy , &ptr->list ) );
     44#endif
     45
    4246}
    4347
     
    5256        cxy_t               lock_cxy = GET_CXY( lock_xp );
    5357
    54         // get cluster and local pointer on local thread
    55         cxy_t               thread_cxy = local_cxy;
     58        // get local pointer on local thread
    5659        thread_t          * thread_ptr = CURRENT_THREAD;
    5760
     
    7376                thread_ptr->remote_locks++;
    7477
     78#if CONFIG_LOCKS_DEBUG
    7579                hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner ) ,
    76                             (uint64_t)XPTR( thread_cxy , thread_ptr) );
    77 
    78                 xlist_add_first( XPTR( thread_cxy , &thread_ptr->xlocks_root ) ,
    79                              XPTR( lock_cxy , &lock_ptr->list ) );
     80                                XPTR( thread_cxy , thread_ptr) );
     81                xlist_add_first( XPTR( local_cxy , &thread_ptr->xlocks_root ) ,
     82                                 XPTR( lock_cxy , &lock_ptr->list ) );
     83#endif
    8084
    8185                hal_restore_irq(mode);
     
    96100        cxy_t               lock_cxy = GET_CXY( lock_xp );
    97101
    98         // get cluster and local pointer on local thread
    99         cxy_t               thread_cxy = local_cxy;
     102        // get local pointer on local thread
    100103        thread_t          * thread_ptr = CURRENT_THREAD;
    101104
     
    118121        thread_ptr->remote_locks++;
    119122
    120         hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner ) ,
    121                         (uint64_t)XPTR( thread_cxy , thread_ptr) );
    122 
    123         xlist_add_first( XPTR( thread_cxy , &thread_ptr->xlocks_root ) ,
    124                          XPTR( lock_cxy , &lock_ptr->list ) );
     123#if CONFIG_LOCKS_DEBUG
     124        hal_remote_swd( XPTR( lock_cxy  , &lock_ptr->owner ) ,
     125                        XPTR( local_cxy , thread_ptr) );
     126        xlist_add_first( XPTR( local_cxy , &thread_ptr->xlocks_root ) ,
     127                         XPTR( lock_cxy  , &lock_ptr->list ) );
     128#endif
    125129
    126130        // irq_state must be restored when lock is released
     
    140144        thread_t          * thread_ptr = CURRENT_THREAD;
    141145
     146#if CONFIG_LOCKS_DEBUG
    142147        hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner ) , XPTR_NULL );
     148        xlist_unlink( XPTR( lock_cxy , &lock_ptr->list ) );
     149#endif
     150
    143151        hal_remote_sw ( XPTR( lock_cxy , &lock_ptr->taken ) , 0 );
    144152        thread_ptr->remote_locks--;
    145         xlist_unlink( XPTR( lock_cxy , &lock_ptr->list ) );
    146153
    147154    // deschedule if pending request
     
    163170        cxy_t               lock_cxy = GET_CXY( lock_xp );
    164171
    165     // get cluster and local pointer on calling thread
    166     cxy_t               thread_cxy = local_cxy;
     172    // get local pointer on calling thread
    167173    thread_t          * thread_ptr = CURRENT_THREAD;
    168174
     
    191197        thread_ptr->remote_locks++;
    192198
    193         hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner ) ,
    194                         (uint64_t)XPTR( thread_cxy , thread_ptr) );
    195 
    196         xlist_add_first( XPTR( thread_cxy , &thread_ptr->xlocks_root ) ,
    197                          XPTR( lock_cxy , &lock_ptr->list ) );
     199#if CONFIG_LOCKS_DEBUG
     200        hal_remote_swd( XPTR( lock_cxy  , &lock_ptr->owner ) ,
     201                        XPTR( local_cxy , thread_ptr) );
     202        xlist_add_first( XPTR( local_cxy , &thread_ptr->xlocks_root ) ,
     203                         XPTR( lock_cxy  , &lock_ptr->list ) );
     204#endif
    198205
    199206        // enable interrupts
     
    211218        thread_t          * thread_ptr = CURRENT_THREAD;
    212219
     220#if CONFIG_LOCKS_DEBUG
    213221        hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner ) , XPTR_NULL );
     222        xlist_unlink( XPTR( lock_cxy , &lock_ptr->list ) );
     223#endif
     224
    214225        hal_remote_sw ( XPTR( lock_cxy , &lock_ptr->taken ) , 0 );
    215226        thread_ptr->remote_locks--;
    216         xlist_unlink( XPTR( lock_cxy , &lock_ptr->list ) );
    217227
    218228    // deschedule if pending request
     
    220230}
    221231
    222 //////////////////////////////////////////////
    223 xptr_t remote_spinlock_owner( xptr_t lock_xp )
    224 {
    225     // get cluster and local pointer on remote_spinlock
    226     remote_spinlock_t * lock_ptr = (remote_spinlock_t *)GET_PTR( lock_xp );
    227     cxy_t               lock_cxy = GET_CXY( lock_xp );
    228 
    229     return hal_remote_lw( XPTR( lock_cxy , &lock_ptr->owner ) );
    230 }
  • trunk/kernel/libk/remote_spinlock.h

    r101 r409  
    3333 * This structure defines a remote spinlock, that can be used to protect
    3434 * exclusive access to a trans-cluster shared resource. It can be taken by any
    35  * thread running in any cluster. All access functions use remote pointers,
    36  * and the owner thread is registrated as a remote pointer.
     35 * thread running in any cluster. All access functions use remote pointers.
     36 * The "owner" and "list" are optionnal fields used for debug.
     37 * It register the list of all remote spinlocks taken by a given thread.
    3738 **************************************************************************************/
    3839
     
    4041{
    4142    volatile uint32_t     taken;       /*! free if 0 / taken if non zero             */
     43
     44#if CONFIG_LOCKS_DEBUG
    4245    xptr_t                owner;       /*! extended pointer on the owner thread      */
    4346    xlist_entry_t         list;        /*! list of all remote_lock taken by owner    */
     47#endif
     48
    4449}
    4550remote_spinlock_t;
     
    100105void remote_spinlock_unlock( xptr_t  lock_xp );
    101106
    102 /***************************************************************************************
    103  * This debug function returns the current owner of a remote spinlock.
    104  ***************************************************************************************
    105  * @ lock_xp    : extended pointer on the remote spinlock
    106  * @ return XPTR_NULL if not taken / return owner thread if lock already taken
    107  **************************************************************************************/
    108 xptr_t remote_spinlock_owner( xptr_t  lock_xp );
    109 
    110 
    111107#endif
  • trunk/kernel/libk/rwlock.c

    r337 r409  
    3737    lock->current = 0;
    3838    lock->count   = 0;
     39
     40#if CONFIG_LOCKS_DEBUG
    3941        lock->owner   = NULL;
     42    list_entry_init( &lock->list );
     43#endif
     44
    4045}
    4146
     
    6570    this->local_locks++;
    6671
     72#if CONFIG_LOCKS_DEBUG
     73    list_add_first( &this->locks_root , &lock->list );
     74#endif
     75
    6776    // consistency
    6877    hal_fence();
     
    8897    hal_atomic_add( &lock->count , -1 );
    8998    this->local_locks--;
     99
     100#if CONFIG_LOCKS_DEBUG
     101    list_unlink( &lock->list );
     102#endif
    90103
    91104    // enable IRQs
     
    123136    }
    124137
     138    this->local_locks++;
     139
     140#if CONFIG_LOCKS_DEBUG
    125141    lock->owner = this;
    126     this->local_locks++;
     142    list_add_first( &this->locks_root , &lock->list );
     143#endif
    127144
    128145    // enable IRQs
     
    140157        hal_disable_irq( &mode );
    141158 
     159#if CONFIG_LOCKS_DEBUG
     160    lock->owner = NULL;
     161    list_unlink( &lock->list );
     162#endif
     163
    142164    // release lock
    143165    lock->current++;
    144     lock->owner = NULL;
    145166    this->local_locks--;
    146167
  • trunk/kernel/libk/rwlock.h

    r14 r409  
    4040 * As this local lock is only accessed by the local threads, if the lock is taken,
    4141 * the new-comers use a busy waiting policy with a delay between retry.
     42 * TODO : Introduce the rwlocks in the list of locks taken by a given thread for debug.
    4243 ******************************************************************************************/
    4344
     
    4849/*******************************************************************************************
    4950 * This structure defines a local rwlock.
     51 * The "owner" and "list" fields are used for debug.
    5052 ******************************************************************************************/
    5153
     
    5557    uint32_t            current;          /*! ticket index of current owner               */
    5658    uint32_t            count;            /*! number of simultaneous readers threads      */
     59
     60#if CONFIG_LOCKS_DEBUG
    5761        struct thread_s   * owner;            /*! pointer on curent writer thread             */
     62    list_entry_t        list;             /*! member of list of locks taken by owner      */
     63#endif
     64
    5865}
    5966rwlock_t;
  • trunk/kernel/libk/spinlock.c

    r408 r409  
    3737{
    3838    lock->taken = 0;
     39
     40#if CONFIG_LOCKS_DEBUG
    3941    lock->owner = NULL;
    4042    list_entry_init( &lock->list );
     43#endif
     44
    4145}
    4246
     
    6670
    6771    this->local_locks++;
     72
     73#if CONFIG_LOCKS_DEBUG
    6874    lock->owner = this;
    6975    list_add_first( &this->locks_root , &lock->list );
     76#endif
    7077
    7178    // irq_state must be restored when lock is released
     
    7986    thread_t * this = CURRENT_THREAD;;
    8087
     88#if CONFIG_LOCKS_DEBUG
    8189    lock->owner = NULL;
     90    list_unlink( &lock->list );
     91#endif
     92
    8293    lock->taken = 0;
    8394    this->local_locks--;
    84     list_unlink( &lock->list );
    8595
    8696    // deschedule if pending request
     
    121131
    122132    this->local_locks++;
     133
     134#if CONFIG_LOCKS_DEBUG
    123135    lock->owner = this;
    124136    list_add_first( &this->locks_root , &lock->list );
     137#endif
    125138
    126139    // restore IRQs
     
    148161    {
    149162        this->local_locks++;
     163
     164#if CONFIG_LOCKS_DEBUG
    150165        lock->owner = this;
    151166        list_add_first( &this->locks_root , &lock->list );
     167#endif
     168
    152169        hal_restore_irq(mode);
    153170        return 0;
     
    160177    thread_t * this = CURRENT_THREAD;
    161178
     179#if CONFIG_LOCKS_DEBUG
    162180    lock->owner = NULL;
     181    list_unlink( &lock->list );
     182#endif
     183
    163184    lock->taken = 0;
    164185    this->local_locks--;
    165     list_unlink( &lock->list );
    166186
    167187    // deschedule if pending request
  • trunk/kernel/libk/spinlock.h

    r14 r409  
    5555/*******************************************************************************************
    5656 * This structure defines a local spinlock.
     57 * The "owner" and "list" are optionnal fields used for debug.
     58 * It register the list of all spinlocks taken by a given thread.
    5759 ******************************************************************************************/
    5860
     
    6062{
    6163        uint32_t            taken;             /*! state : free if zero / taken if non zero  */
     64
     65#if CONFIG_LOCKS_DEBUG
    6266        struct thread_s   * owner;             /*! pointer on curent owner thread            */
    63     list_entry_t        list;              /*! list of all locks taken by owner          */
     67    list_entry_t        list;              /*! member of list of locks taken by owner    */
     68#endif
     69
    6470}
    6571spinlock_t;
     
    96102/*******************************************************************************************
    97103 * This blocking function locks a local spinlock.
    98  * If the lock is already taken, the calling thread deschedules and retries when
    99  * it is rescheduled, until success.
     104 * If the lock is already taken, the calling thread deschedules without blocking,
     105 * and retries when it is rescheduled, until success.
    100106 * It increments the calling thread local_locks count when the lock has been taken.
    101107 *******************************************************************************************
Note: See TracChangeset for help on using the changeset viewer.