Changeset 93 for trunk/kernel/libk/remote_spinlock.c
- Timestamp:
- Jun 29, 2017, 12:46:00 PM (7 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/libk/remote_spinlock.c
r60 r93 1 1 /* 2 2 * remote_spinlock.c - kernel remote spinlock implementation. 3 * 3 * 4 4 * Authors Mohamed Karaoui (2015) 5 5 * Alain Greiner (2016) … … 33 33 /////////////////////////////////////////// 34 34 void remote_spinlock_init( xptr_t lock_xp ) 35 { 36 37 38 39 40 41 xlist_entry_init( XPTR( cxy , &ptr->list ) ); 35 { 36 remote_spinlock_t * ptr = (remote_spinlock_t *)GET_PTR( lock_xp ); 37 cxy_t cxy = GET_CXY( lock_xp ); 38 39 hal_remote_sw ( XPTR( cxy , &ptr->taken ) , 0 ); 40 hal_remote_swd( XPTR( cxy , &ptr->owner ) , XPTR_NULL ); 41 xlist_entry_init( XPTR( cxy , &ptr->list ) ); 42 42 } 43 43 44 44 ///////////////////////////////////////////////// 45 45 error_t remote_spinlock_trylock( xptr_t lock_xp ) 46 { 46 { 47 47 reg_t mode; 48 48 bool_t isAtomic = false; 49 49 50 51 52 53 54 55 56 57 58 59 50 // get cluster and local pointer on remote_spinlock 51 remote_spinlock_t * lock_ptr = (remote_spinlock_t *)GET_PTR( lock_xp ); 52 cxy_t lock_cxy = GET_CXY( lock_xp ); 53 54 // get cluster and local pointer on local thread 55 cxy_t thread_cxy = local_cxy; 56 thread_t * thread_ptr = CURRENT_THREAD; 57 58 // disable interrupts 59 hal_disable_irq( &mode ); 60 60 61 61 if( hal_remote_lw( XPTR( lock_cxy , &lock_ptr->taken ) ) == 0 ) 62 62 { 63 63 isAtomic = hal_remote_atomic_cas( XPTR( lock_cxy , &lock_ptr->taken ) , 0 , 1 ); 64 65 66 if( isAtomic == false ) // failure 64 } 65 66 if( isAtomic == false ) // failure 67 67 { 68 68 hal_restore_irq( mode ); 69 69 return 1; 70 70 } 71 72 73 74 75 76 77 78 79 80 81 hal_restore_irq(mode);82 83 71 else // success : register lock in thread 72 { 73 thread_ptr->remote_locks++; 74 75 hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner ) , 76 (uint64_t)XPTR( thread_cxy , thread_ptr) ); 77 78 xlist_add_first( XPTR( thread_cxy , &thread_ptr->xlocks_root ) , 79 XPTR( lock_cxy , &lock_ptr->list ) ); 80 81 hal_restore_irq(mode); 82 return 0; 83 } 84 84 } 85 85 … … 88 88 uint32_t * irq_state ) 89 89 { 90 90 bool_t isAtomic = false; 91 91 reg_t mode; 92 93 94 95 96 97 98 99 100 101 102 92 volatile uint32_t taken; 93 94 // get cluster and local pointer on remote_spinlock 95 remote_spinlock_t * lock_ptr = (remote_spinlock_t *)GET_PTR( lock_xp ); 96 cxy_t lock_cxy = GET_CXY( lock_xp ); 97 98 // get cluster and local pointer on local thread 99 cxy_t thread_cxy = local_cxy; 100 thread_t * thread_ptr = CURRENT_THREAD; 101 102 // disable interrupts 103 103 hal_disable_irq( &mode ); 104 105 // loop until success 104 105 // loop until success 106 106 while( isAtomic == false ) 107 107 { 108 108 taken = hal_remote_lw( XPTR( lock_cxy , &lock_ptr->taken ) ); 109 109 110 110 // try to take the lock if not already taken 111 111 if( taken == 0 ) 112 112 { 113 114 115 } 116 117 113 isAtomic = hal_remote_atomic_cas( XPTR( lock_cxy , &lock_ptr->taken ) , 0 , 1 ); 114 } 115 } 116 117 // register lock in thread 118 118 thread_ptr->remote_locks++; 119 119 120 121 122 123 124 125 126 // irq_state must be restored when lock is released 127 120 hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner ) , 121 (uint64_t)XPTR( thread_cxy , thread_ptr) ); 122 123 xlist_add_first( XPTR( thread_cxy , &thread_ptr->xlocks_root ) , 124 XPTR( lock_cxy , &lock_ptr->list ) ); 125 126 // irq_state must be restored when lock is released 127 *irq_state = mode; 128 128 129 129 } // end remote_spinlock_lock_busy() … … 133 133 uint32_t irq_state ) 134 134 { 135 136 137 138 139 140 135 // get cluster and local pointer on remote_spinlock 136 remote_spinlock_t * lock_ptr = (remote_spinlock_t *)GET_PTR( lock_xp ); 137 cxy_t lock_cxy = GET_CXY( lock_xp ); 138 139 // get pointer on local thread 140 thread_t * thread_ptr = CURRENT_THREAD; 141 141 142 142 hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner ) , XPTR_NULL ); … … 146 146 thread_ptr->remote_locks--; 147 147 148 149 150 148 xlist_unlink( XPTR( lock_cxy , &lock_ptr->list ) ); 149 150 hal_restore_irq( irq_state ); 151 151 } 152 152 … … 154 154 void remote_spinlock_lock( xptr_t lock_xp ) 155 155 { 156 156 bool_t isAtomic = false; 157 157 reg_t mode; 158 159 160 161 162 163 164 165 166 167 168 158 volatile uint32_t taken; 159 160 // get cluster and local pointer on remote_spinlock 161 remote_spinlock_t * lock_ptr = (remote_spinlock_t *)GET_PTR( lock_xp ); 162 cxy_t lock_cxy = GET_CXY( lock_xp ); 163 164 // get cluster and local pointer on local thread 165 cxy_t thread_cxy = local_cxy; 166 thread_t * thread_ptr = CURRENT_THREAD; 167 168 // disable interrupts 169 169 hal_disable_irq( &mode ); 170 171 // loop until success 170 171 // loop until success 172 172 while( isAtomic == false ) 173 173 { 174 174 taken = hal_remote_lw( XPTR( lock_cxy , &lock_ptr->taken ) ); 175 175 176 176 // deschedule if possible when lock already taken 177 177 if( taken != 0 ) 178 178 { 179 180 181 179 hal_restore_irq( mode ); 180 if( thread_can_yield() ) sched_yield(); 181 hal_disable_irq( &mode ); 182 182 continue; 183 183 } 184 185 184 185 // try to take the lock if not already taken 186 186 isAtomic = hal_remote_atomic_cas( XPTR( lock_cxy , &lock_ptr->taken ) , 0 , 1 ); 187 187 } 188 188 189 189 // register lock in thread 190 190 thread_ptr->remote_locks++; 191 191 192 193 194 195 196 197 198 192 hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner ) , 193 (uint64_t)XPTR( thread_cxy , thread_ptr) ); 194 195 xlist_add_first( XPTR( thread_cxy , &thread_ptr->xlocks_root ) , 196 XPTR( lock_cxy , &lock_ptr->list ) ); 197 198 // enable interrupts 199 199 hal_restore_irq( mode ); 200 200 } … … 203 203 void remote_spinlock_unlock( xptr_t lock_xp ) 204 204 { 205 206 207 208 209 210 205 // get cluster and local pointer on remote_spinlock 206 remote_spinlock_t * lock_ptr = (remote_spinlock_t *)GET_PTR( lock_xp ); 207 cxy_t lock_cxy = GET_CXY( lock_xp ); 208 209 // get pointer on local thread 210 thread_t * thread_ptr = CURRENT_THREAD; 211 211 212 212 hal_remote_swd( XPTR( lock_cxy , &lock_ptr->owner ) , XPTR_NULL ); … … 216 216 thread_ptr->remote_locks--; 217 217 218 219 } 220 218 xlist_unlink( XPTR( lock_cxy , &lock_ptr->list ) ); 219 } 220
Note: See TracChangeset
for help on using the changeset viewer.