Changeset 104 for trunk/kernel/libk
- Timestamp:
- Jun 30, 2017, 8:57:37 AM (8 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/kernel/libk/remote_barrier.c
r60 r104 1 1 /* 2 2 * remote_barrier.c - Access a POSIX barrier. 3 * 3 * 4 4 * Author Alain Greiner (2016,2017) 5 5 * … … 35 35 ///////////////////////////////////////////////// 36 36 inline void remote_barrier( xptr_t barrier_xp, 37 uint32_t count ) 37 uint32_t count ) 38 38 { 39 39 uint32_t expected; … … 45 45 uint32_t sense = hal_remote_lw( XPTR( cxy , &ptr->sense ) ); 46 46 47 47 // compute expected value 48 48 if ( sense == 0 ) expected = 1; 49 49 else expected = 0; 50 50 51 // atomically increment current 51 // atomically increment current 52 52 uint32_t current = hal_remote_atomic_add( XPTR( cxy , &ptr->current ) , 1 ); 53 53 54 54 // last task reset current and toggle sense 55 if( current == (count-1) ) 56 { 57 hal_remote_sw( XPTR( cxy , &ptr->current) , 0 ); 58 hal_remote_sw( XPTR( cxy , &ptr->sense ) , expected ); 59 } 60 else // other tasks poll the sense 55 if( current == (count-1) ) 56 { 57 hal_remote_sw( XPTR( cxy , &ptr->current) , 0 ); 58 hal_remote_sw( XPTR( cxy , &ptr->sense ) , expected ); 59 } 60 else // other tasks poll the sense 61 61 { 62 62 while( hal_remote_lw( XPTR( cxy , &ptr->sense ) ) != expected ) asm volatile ("nop"); … … 73 73 xptr_t ref_xp = process->ref_xp; 74 74 75 // get cluster and local pointer on reference process 75 // get cluster and local pointer on reference process 76 76 cxy_t ref_cxy = GET_CXY( ref_xp ); 77 77 process_t * ref_ptr = (process_t *)GET_PTR( ref_xp ); 78 78 79 // get extended pointer on root of barriers list 79 // get extended pointer on root of barriers list 80 80 xptr_t root_xp = XPTR( ref_cxy , &ref_ptr->barrier_root ); 81 81 82 82 // scan reference process barriers list 83 83 xptr_t iter_xp; … … 87 87 intptr_t current; 88 88 bool_t found = false; 89 89 90 90 XLIST_FOREACH( root_xp , iter_xp ) 91 91 { … … 93 93 barrier_cxy = GET_CXY( barrier_xp ); 94 94 barrier_ptr = (remote_barrier_t *)GET_PTR( barrier_xp ); 95 current = (intptr_t)hal_remote_lpt( XPTR( barrier_cxy , &barrier_ptr->ident ) ); 95 current = (intptr_t)hal_remote_lpt( XPTR( barrier_cxy , &barrier_ptr->ident ) ); 96 96 if( ident == current ) 97 97 { … … 103 103 if( found == false ) return XPTR_NULL; 104 104 else return barrier_xp; 105 106 } // end remote_barrier_from_ident() 105 } 107 106 108 107 ////////////////////////////////////////////// … … 124 123 125 124 // allocate memory for barrier descriptor 126 if( ref_cxy == local_cxy ) // local cluster is the reference 127 { 128 kmem_req_t req; 125 if( ref_cxy == local_cxy ) // local cluster is the reference 126 { 127 kmem_req_t req; 129 128 req.type = KMEM_BARRIER; 130 129 req.flags = AF_ZERO; … … 140 139 if( barrier_ptr == NULL ) return ENOMEM; 141 140 142 // initialise barrier 141 // initialise barrier 143 142 hal_remote_sw ( XPTR( ref_cxy , &barrier_ptr->nb_threads ) , count ); 144 143 hal_remote_sw ( XPTR( ref_cxy , &barrier_ptr->current ) , 0 ); … … 157 156 158 157 return 0; 159 160 } // end remote_barrier_create() 158 } 161 159 162 160 //////////////////////////////////////////////// … … 194 192 rpc_kcm_free_client( barrier_cxy , barrier_ptr , KMEM_BARRIER ); 195 193 } 196 197 } // end remote_barrier_destroy() 194 } 198 195 199 196 ///////////////////////////////////////////// … … 222 219 sense = hal_remote_lw( XPTR( barrier_cxy , &barrier_ptr->sense ) ); 223 220 224 221 // compute expected value 225 222 if ( sense == 0 ) expected = 1; 226 223 else expected = 0; 227 224 228 // atomically increment current 225 // atomically increment current 229 226 current = hal_remote_atomic_add( XPTR( barrier_cxy , &barrier_ptr->current ) , 1 ); 230 227 231 228 // last thread reset current, toggle sense, and activate all waiting threads 232 // other threads block, register in queue, and deschedule 229 // other threads block, register in queue, and deschedule 233 230 234 231 if( current == (count-1) ) // last thread 235 232 { 236 hal_remote_sw( XPTR( barrier_cxy , &barrier_ptr->current) , 0 ); 237 hal_remote_sw( XPTR( barrier_cxy , &barrier_ptr->sense ) , expected ); 233 hal_remote_sw( XPTR( barrier_cxy , &barrier_ptr->current) , 0 ); 234 hal_remote_sw( XPTR( barrier_cxy , &barrier_ptr->sense ) , expected ); 238 235 239 236 // activate waiting threads if required 240 if( xlist_is_empty( root_xp ) == false ) 237 if( xlist_is_empty( root_xp ) == false ) 241 238 { 242 239 // disable interrupts 243 244 240 hal_disable_irq( &irq_state ); 241 245 242 xptr_t iter_xp; 246 243 xptr_t thread_xp; … … 249 246 // get extended pointer on waiting thread 250 247 thread_xp = XLIST_ELEMENT( iter_xp , thread_t , wait_list ); 251 248 252 249 // remove waiting thread from queue 253 250 remote_spinlock_lock( XPTR( barrier_cxy , &barrier_ptr->lock ) ); … … 256 253 257 254 // unblock waiting thread 258 thread_unblock( thread_xp , THREAD_BLOCKED_USERSYNC ); 255 thread_unblock( thread_xp , THREAD_BLOCKED_USERSYNC ); 259 256 } 260 257 … … 263 260 } 264 261 } 265 else // not the last thread 262 else // not the last thread 266 263 { 267 264 // disable interrupts 268 269 265 hal_disable_irq( &irq_state ); 266 270 267 // register calling thread in barrier waiting queue 271 268 xptr_t entry_xp = XPTR( thread_cxy , &thread_ptr->wait_list ); 272 269 273 remote_spinlock_lock( XPTR( barrier_cxy , &barrier_ptr->lock ) ); 270 remote_spinlock_lock( XPTR( barrier_cxy , &barrier_ptr->lock ) ); 274 271 xlist_add_last( root_xp , entry_xp ); 275 remote_spinlock_unlock( XPTR( barrier_cxy , &barrier_ptr->lock ) ); 276 277 // block & deschedule the calling thread 272 remote_spinlock_unlock( XPTR( barrier_cxy , &barrier_ptr->lock ) ); 273 274 // block & deschedule the calling thread 278 275 thread_block( thread_ptr , THREAD_BLOCKED_USERSYNC ); 279 276 sched_yield(); … … 282 279 hal_restore_irq( irq_state ); 283 280 } 284 } // end remote_barrier_wait()281 }
Note: See TracChangeset
for help on using the changeset viewer.