Changeset 409 for trunk/kernel/libk
- Timestamp:
- Dec 20, 2017, 4:51:09 PM (7 years ago)
- Location:
- trunk/kernel/libk
- Files:
-
- 8 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/libk/remote_rwlock.c
r337 r409 40 40 hal_remote_sw ( XPTR( lock_cxy , &lock_ptr->current ) , 0 ); 41 41 hal_remote_sw ( XPTR( lock_cxy , &lock_ptr->count ) , 0 ); 42 43 #if CONFIG_LOCKS_DEBUG 42 44 hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner ) , XPTR_NULL ); 45 xlist_entry_init( XPTR( lock_cxy , &lock_ptr->list ) ); 46 #endif 47 43 48 } 44 49 … … 53 58 cxy_t lock_cxy = GET_CXY( lock_xp ); 54 59 55 // get cluster andlocal pointer on local thread60 // get local pointer on local thread 56 61 thread_t * thread_ptr = CURRENT_THREAD; 57 62 … … 81 86 thread_ptr->remote_locks++; 82 87 88 #if CONFIG_LOCKS_DEBUG 89 xlist_add_first( XPTR( local_cxy , &thread_ptr->xlocks_root ) , 90 XPTR( lock_cxy , &lock_ptr->list ) ); 91 #endif 92 83 93 // sync 84 94 hal_fence(); … … 115 125 // decrement thread.remote_locks 116 126 thread_ptr->remote_locks--; 127 128 #if CONFIG_LOCKS_DEBUG 129 xlist_unlink( XPTR( lock_cxy , &lock->ptr->list ) ); 130 #endif 117 131 118 132 // enable interrupts … … 134 148 cxy_t lock_cxy = GET_CXY( lock_xp ); 135 149 136 // get cluster and local pointer on local thread 137 cxy_t thread_cxy = local_cxy; 150 // get local pointer on local thread 138 151 thread_t * thread_ptr = CURRENT_THREAD; 139 152 … … 142 155 xptr_t count_xp = XPTR( lock_cxy , &lock_ptr->count ); 143 156 xptr_t current_xp = XPTR( lock_cxy , &lock_ptr->current ); 144 xptr_t owner_xp = XPTR( lock_cxy , &lock_ptr->owner );145 xptr_t thread_xp = XPTR( thread_cxy , thread_ptr );146 157 147 158 // disable interrupts … … 165 176 } 166 177 167 // register owner thread 168 hal_remote_swd( owner_xp , thread_xp ); 178 #if CONFIG_LOCKS_DEBUG 179 hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner ) , 180 XPTR( local_cxy , thread_ptr ) ); 181 xlist_add_first( XPTR( local_cxy , &thread_ptr->xlocks_root ) , 182 XPTR( lock_cxy , &lock_ptr->list ) ); 183 #endif 169 184 170 185 // increment thread.remote_locks … … 188 203 thread_t * thread_ptr = CURRENT_THREAD; 189 204 190 // compute extended pointer s on lock->ticket, lock->owner205 // compute extended pointer on lock->ticket 191 206 xptr_t current_xp = XPTR( lock_cxy , &lock_ptr->current ); 192 xptr_t owner_xp = XPTR( lock_cxy , &lock_ptr->owner );193 207 194 208 // disable interrupts 195 209 hal_disable_irq( &mode ); 196 210 197 // unregister owner thread, and release lock 198 hal_remote_swd( owner_xp , XPTR_NULL ); 211 #if CONFIG_LOCKS_OWNER 212 hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner ) , XPTR_NULL ); 213 xlist_unlink( XPTR( lock_cxy , &lock_ptr->list ) ); 214 #endif 215 216 // release lock 199 217 hal_remote_atomic_add( current_xp , 1 ); 200 218 … … 217 235 uint32_t current; // ticket index of current owner 218 236 uint32_t count; // current number of reader threads 219 xptr_t owner; // extended pointer on writer thread220 237 221 238 // get cluster and local pointer on remote_rwlock … … 226 243 current = hal_remote_lw ( XPTR( lock_cxy , &lock_ptr->current ) ); 227 244 count = hal_remote_lw ( XPTR( lock_cxy , &lock_ptr->count ) ); 228 owner = hal_remote_lwd( XPTR( lock_cxy , &lock_ptr->owner ) ); 229 230 printk("\n*** rwlock <%l> %s : ticket = %d / current = %d / count = %d / owner = %l\n", 231 lock_xp , comment , ticket , current , count , owner ); 245 246 printk("\n*** rwlock <%l> %s : ticket = %d / current = %d / count = %d\n", 247 lock_xp , comment , ticket , current , count ); 232 248 233 249 } // end remote_rwlock_print() -
trunk/kernel/libk/remote_rwlock.h
r50 r409 40 40 * accesses before starting its own access. 41 41 * When the lock is taken by another thread, the new-comers use a busy waiting policy. 42 *43 * It uses a busy-waiting policy if the lock is already allocated to another thread.44 42 **************************************************************************************/ 45 43 46 44 typedef struct remote_rwlock_s 47 45 { 48 uint32_t ticket; /*! first free ticket index */ 49 uint32_t current; /*! ticket index of current owner */ 50 uint32_t count; /*! current number of reader threads */ 51 xptr_t owner; /*! extended pointer on writer thread */ 46 uint32_t ticket; /*! first free ticket index */ 47 uint32_t current; /*! ticket index of current owner */ 48 uint32_t count; /*! current number of reader threads */ 49 50 #if CONFIG_LOCKS_DEBUG 51 xptr_t owner; /*! extended pointer on writer thread */ 52 xlist_entry_t list; /*! member of list of remote locks taken by owner */ 53 #endif 54 52 55 } 53 56 remote_rwlock_t; -
trunk/kernel/libk/remote_spinlock.c
r408 r409 38 38 39 39 hal_remote_sw ( XPTR( cxy , &ptr->taken ) , 0 ); 40 41 #if CONFIG_LOCKS_CONFIG 40 42 hal_remote_swd( XPTR( cxy , &ptr->owner ) , XPTR_NULL ); 41 43 xlist_entry_init( XPTR( cxy , &ptr->list ) ); 44 #endif 45 42 46 } 43 47 … … 52 56 cxy_t lock_cxy = GET_CXY( lock_xp ); 53 57 54 // get cluster and local pointer on local thread 55 cxy_t thread_cxy = local_cxy; 58 // get local pointer on local thread 56 59 thread_t * thread_ptr = CURRENT_THREAD; 57 60 … … 73 76 thread_ptr->remote_locks++; 74 77 78 #if CONFIG_LOCKS_DEBUG 75 79 hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner ) , 76 (uint64_t)XPTR( thread_cxy , thread_ptr) );77 78 xlist_add_first( XPTR( thread_cxy , &thread_ptr->xlocks_root ) ,79 XPTR( lock_cxy , &lock_ptr->list ) ); 80 XPTR( thread_cxy , thread_ptr) ); 81 xlist_add_first( XPTR( local_cxy , &thread_ptr->xlocks_root ) , 82 XPTR( lock_cxy , &lock_ptr->list ) ); 83 #endif 80 84 81 85 hal_restore_irq(mode); … … 96 100 cxy_t lock_cxy = GET_CXY( lock_xp ); 97 101 98 // get cluster and local pointer on local thread 99 cxy_t thread_cxy = local_cxy; 102 // get local pointer on local thread 100 103 thread_t * thread_ptr = CURRENT_THREAD; 101 104 … … 118 121 thread_ptr->remote_locks++; 119 122 120 hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner ) , 121 (uint64_t)XPTR( thread_cxy , thread_ptr) ); 122 123 xlist_add_first( XPTR( thread_cxy , &thread_ptr->xlocks_root ) , 124 XPTR( lock_cxy , &lock_ptr->list ) ); 123 #if CONFIG_LOCKS_DEBUG 124 hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner ) , 125 XPTR( local_cxy , thread_ptr) ); 126 xlist_add_first( XPTR( local_cxy , &thread_ptr->xlocks_root ) , 127 XPTR( lock_cxy , &lock_ptr->list ) ); 128 #endif 125 129 126 130 // irq_state must be restored when lock is released … … 140 144 thread_t * thread_ptr = CURRENT_THREAD; 141 145 146 #if CONFIG_LOCKS_DEBUG 142 147 hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner ) , XPTR_NULL ); 148 xlist_unlink( XPTR( lock_cxy , &lock_ptr->list ) ); 149 #endif 150 143 151 hal_remote_sw ( XPTR( lock_cxy , &lock_ptr->taken ) , 0 ); 144 152 thread_ptr->remote_locks--; 145 xlist_unlink( XPTR( lock_cxy , &lock_ptr->list ) );146 153 147 154 // deschedule if pending request … … 163 170 cxy_t lock_cxy = GET_CXY( lock_xp ); 164 171 165 // get cluster and local pointer on calling thread 166 cxy_t thread_cxy = local_cxy; 172 // get local pointer on calling thread 167 173 thread_t * thread_ptr = CURRENT_THREAD; 168 174 … … 191 197 thread_ptr->remote_locks++; 192 198 193 hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner ) , 194 (uint64_t)XPTR( thread_cxy , thread_ptr) ); 195 196 xlist_add_first( XPTR( thread_cxy , &thread_ptr->xlocks_root ) , 197 XPTR( lock_cxy , &lock_ptr->list ) ); 199 #if CONFIG_LOCKS_DEBUG 200 hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner ) , 201 XPTR( local_cxy , thread_ptr) ); 202 xlist_add_first( XPTR( local_cxy , &thread_ptr->xlocks_root ) , 203 XPTR( lock_cxy , &lock_ptr->list ) ); 204 #endif 198 205 199 206 // enable interrupts … … 211 218 thread_t * thread_ptr = CURRENT_THREAD; 212 219 220 #if CONFIG_LOCKS_DEBUG 213 221 hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner ) , XPTR_NULL ); 222 xlist_unlink( XPTR( lock_cxy , &lock_ptr->list ) ); 223 #endif 224 214 225 hal_remote_sw ( XPTR( lock_cxy , &lock_ptr->taken ) , 0 ); 215 226 thread_ptr->remote_locks--; 216 xlist_unlink( XPTR( lock_cxy , &lock_ptr->list ) );217 227 218 228 // deschedule if pending request … … 220 230 } 221 231 222 //////////////////////////////////////////////223 xptr_t remote_spinlock_owner( xptr_t lock_xp )224 {225 // get cluster and local pointer on remote_spinlock226 remote_spinlock_t * lock_ptr = (remote_spinlock_t *)GET_PTR( lock_xp );227 cxy_t lock_cxy = GET_CXY( lock_xp );228 229 return hal_remote_lw( XPTR( lock_cxy , &lock_ptr->owner ) );230 } -
trunk/kernel/libk/remote_spinlock.h
r101 r409 33 33 * This structure defines a remote spinlock, that can be used to protect 34 34 * exclusive access to a trans-cluster shared resource. It can be taken by any 35 * thread running in any cluster. All access functions use remote pointers, 36 * and the owner thread is registrated as a remote pointer. 35 * thread running in any cluster. All access functions use remote pointers. 36 * The "owner" and "list" are optionnal fields used for debug. 37 * It register the list of all remote spinlocks taken by a given thread. 37 38 **************************************************************************************/ 38 39 … … 40 41 { 41 42 volatile uint32_t taken; /*! free if 0 / taken if non zero */ 43 44 #if CONFIG_LOCKS_DEBUG 42 45 xptr_t owner; /*! extended pointer on the owner thread */ 43 46 xlist_entry_t list; /*! list of all remote_lock taken by owner */ 47 #endif 48 44 49 } 45 50 remote_spinlock_t; … … 100 105 void remote_spinlock_unlock( xptr_t lock_xp ); 101 106 102 /***************************************************************************************103 * This debug function returns the current owner of a remote spinlock.104 ***************************************************************************************105 * @ lock_xp : extended pointer on the remote spinlock106 * @ return XPTR_NULL if not taken / return owner thread if lock already taken107 **************************************************************************************/108 xptr_t remote_spinlock_owner( xptr_t lock_xp );109 110 111 107 #endif -
trunk/kernel/libk/rwlock.c
r337 r409 37 37 lock->current = 0; 38 38 lock->count = 0; 39 40 #if CONFIG_LOCKS_DEBUG 39 41 lock->owner = NULL; 42 list_entry_init( &lock->list ); 43 #endif 44 40 45 } 41 46 … … 65 70 this->local_locks++; 66 71 72 #if CONFIG_LOCKS_DEBUG 73 list_add_first( &this->locks_root , &lock->list ); 74 #endif 75 67 76 // consistency 68 77 hal_fence(); … … 88 97 hal_atomic_add( &lock->count , -1 ); 89 98 this->local_locks--; 99 100 #if CONFIG_LOCKS_DEBUG 101 list_unlink( &lock->list ); 102 #endif 90 103 91 104 // enable IRQs … … 123 136 } 124 137 138 this->local_locks++; 139 140 #if CONFIG_LOCKS_DEBUG 125 141 lock->owner = this; 126 this->local_locks++; 142 list_add_first( &this->locks_root , &lock->list ); 143 #endif 127 144 128 145 // enable IRQs … … 140 157 hal_disable_irq( &mode ); 141 158 159 #if CONFIG_LOCKS_DEBUG 160 lock->owner = NULL; 161 list_unlink( &lock->list ); 162 #endif 163 142 164 // release lock 143 165 lock->current++; 144 lock->owner = NULL;145 166 this->local_locks--; 146 167 -
trunk/kernel/libk/rwlock.h
r14 r409 40 40 * As this local lock is only accessed by the local threads, if the lock is taken, 41 41 * the new-comers use a busy waiting policy with a delay between retry. 42 * TODO : Introduce the rwlocks in the list of locks taken by a given thread for debug. 42 43 ******************************************************************************************/ 43 44 … … 48 49 /******************************************************************************************* 49 50 * This structure defines a local rwlock. 51 * The "owner" and "list" fields are used for debug. 50 52 ******************************************************************************************/ 51 53 … … 55 57 uint32_t current; /*! ticket index of current owner */ 56 58 uint32_t count; /*! number of simultaneous readers threads */ 59 60 #if CONFIG_LOCKS_DEBUG 57 61 struct thread_s * owner; /*! pointer on curent writer thread */ 62 list_entry_t list; /*! member of list of locks taken by owner */ 63 #endif 64 58 65 } 59 66 rwlock_t; -
trunk/kernel/libk/spinlock.c
r408 r409 37 37 { 38 38 lock->taken = 0; 39 40 #if CONFIG_LOCKS_DEBUG 39 41 lock->owner = NULL; 40 42 list_entry_init( &lock->list ); 43 #endif 44 41 45 } 42 46 … … 66 70 67 71 this->local_locks++; 72 73 #if CONFIG_LOCKS_DEBUG 68 74 lock->owner = this; 69 75 list_add_first( &this->locks_root , &lock->list ); 76 #endif 70 77 71 78 // irq_state must be restored when lock is released … … 79 86 thread_t * this = CURRENT_THREAD;; 80 87 88 #if CONFIG_LOCKS_DEBUG 81 89 lock->owner = NULL; 90 list_unlink( &lock->list ); 91 #endif 92 82 93 lock->taken = 0; 83 94 this->local_locks--; 84 list_unlink( &lock->list );85 95 86 96 // deschedule if pending request … … 121 131 122 132 this->local_locks++; 133 134 #if CONFIG_LOCKS_DEBUG 123 135 lock->owner = this; 124 136 list_add_first( &this->locks_root , &lock->list ); 137 #endif 125 138 126 139 // restore IRQs … … 148 161 { 149 162 this->local_locks++; 163 164 #if CONFIG_LOCKS_DEBUG 150 165 lock->owner = this; 151 166 list_add_first( &this->locks_root , &lock->list ); 167 #endif 168 152 169 hal_restore_irq(mode); 153 170 return 0; … … 160 177 thread_t * this = CURRENT_THREAD; 161 178 179 #if CONFIG_LOCKS_DEBUG 162 180 lock->owner = NULL; 181 list_unlink( &lock->list ); 182 #endif 183 163 184 lock->taken = 0; 164 185 this->local_locks--; 165 list_unlink( &lock->list );166 186 167 187 // deschedule if pending request -
trunk/kernel/libk/spinlock.h
r14 r409 55 55 /******************************************************************************************* 56 56 * This structure defines a local spinlock. 57 * The "owner" and "list" are optionnal fields used for debug. 58 * It register the list of all spinlocks taken by a given thread. 57 59 ******************************************************************************************/ 58 60 … … 60 62 { 61 63 uint32_t taken; /*! state : free if zero / taken if non zero */ 64 65 #if CONFIG_LOCKS_DEBUG 62 66 struct thread_s * owner; /*! pointer on curent owner thread */ 63 list_entry_t list; /*! list of all locks taken by owner */ 67 list_entry_t list; /*! member of list of locks taken by owner */ 68 #endif 69 64 70 } 65 71 spinlock_t; … … 96 102 /******************************************************************************************* 97 103 * This blocking function locks a local spinlock. 98 * If the lock is already taken, the calling thread deschedules and retries when99 * it is rescheduled, until success.104 * If the lock is already taken, the calling thread deschedules without blocking, 105 * and retries when it is rescheduled, until success. 100 106 * It increments the calling thread local_locks count when the lock has been taken. 101 107 *******************************************************************************************
Note: See TracChangeset
for help on using the changeset viewer.