Changeset 331
- Timestamp:
- Aug 7, 2017, 10:06:03 AM (7 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/libk/spinlock.c
r296 r331 1 1 /* 2 2 * spinlock.c - kernel spinlock synchronization 3 * 3 * 4 4 * Authors Ghassan Almaless (2008,2009,2010,2011,2012) 5 5 * Alain Greiner (2016} … … 35 35 ////////////////////////////////////////////// 36 36 inline void spinlock_init( spinlock_t * lock ) 37 { 38 39 37 { 38 lock->taken = 0; 39 lock->owner = NULL; 40 40 list_entry_init( &lock->list ); 41 41 } 42 42 43 43 /////////////////////////////////////////// 44 void spinlock_lock_busy( spinlock_t * lock, 44 void spinlock_lock_busy( spinlock_t * lock, 45 45 uint32_t * irq_state ) 46 46 { 47 48 49 50 47 reg_t mode; 48 volatile uint32_t taken; 49 thread_t * this = CURRENT_THREAD; 50 bool_t isAtomic = false; 51 51 52 52 // disable interrupts 53 54 53 hal_disable_irq( &mode ); 54 55 55 // loop until success 56 57 58 56 while( isAtomic == false ) 57 { 58 taken = lock->taken; 59 59 60 60 // try to take the lock if not already taken 61 61 if( taken == 0 ) 62 62 { 63 63 isAtomic = hal_atomic_cas( &lock->taken , 0 , 1 ); 64 64 } 65 65 } 66 66 67 67 this->local_locks++; 68 68 lock->owner = this; 69 69 list_add_first( &this->locks_root , &lock->list ); 70 70 71 // irq_state must be restored when lock is released 71 // irq_state must be restored when lock is released 72 72 *irq_state = mode; 73 73 } … … 77 77 uint32_t irq_state ) 78 78 { 79 80 79 thread_t * this = CURRENT_THREAD;; 80 81 81 lock->owner = NULL; 82 82 lock->taken = 0; 83 83 this->local_locks--; 84 84 list_unlink( &lock->list ); 85 86 85 86 hal_restore_irq( irq_state ); 87 87 } 88 88 89 89 /////////////////////////////////////// 90 90 void spinlock_lock( spinlock_t * lock ) 91 91 { 92 93 94 95 96 92 reg_t mode; 93 thread_t * this = CURRENT_THREAD; 94 bool_t isAtomic = false; 95 volatile uint32_t taken; 96 97 97 // disable interrupts 98 99 98 hal_disable_irq( &mode ); 99 100 100 // loop until success 101 102 101 while( isAtomic == false ) 102 { 103 103 taken = lock->taken; 104 104 105 105 // deschedule without blocking when lock already taken 106 106 if( taken != 0 ) 107 107 { 108 108 hal_restore_irq( mode ); … … 113 113 114 114 // try to atomically take the lock if not already taken 115 115 isAtomic = hal_atomic_cas( &lock->taken , 0 , 1 ); 116 116 } 117 117 118 118 this->local_locks++; 119 119 lock->owner = this; 120 120 list_add_first( &this->locks_root , &lock->list ); … … 126 126 ///////////////////////////////////////////// 127 127 error_t spinlock_trylock( spinlock_t * lock ) 128 { 129 130 131 128 { 129 reg_t mode; 130 bool_t isAtomic = false; 131 thread_t * this = CURRENT_THREAD; 132 132 133 133 hal_disable_irq( &mode ); 134 134 135 136 137 138 139 140 141 142 135 if( lock->taken == 0) 136 isAtomic = hal_atomic_cas( &lock->taken , 0 , 1); 137 138 if(isAtomic == false) 139 { 140 hal_restore_irq(mode); 141 return 1; 142 } 143 143 else 144 144 { 145 146 145 this->local_locks++; 146 lock->owner = this; 147 147 list_add_first( &this->locks_root , &lock->list ); 148 hal_restore_irq(mode); 149 148 hal_restore_irq(mode); 149 return 0; 150 150 } 151 151 } … … 154 154 void spinlock_unlock( spinlock_t * lock ) 155 155 { 156 157 156 thread_t * this = CURRENT_THREAD; 157 158 158 lock->owner = NULL; 159 159 lock->taken = 0; … … 162 162 } 163 163 164
Note: See TracChangeset
for help on using the changeset viewer.