1 | /* |
---|
2 | * remote_barrier.c - POSIX barrier implementation. |
---|
3 | * |
---|
4 | * Author Alain Greiner (2016,2017,2018,2019) |
---|
5 | * |
---|
6 | * Copyright (c) UPMC Sorbonne Universites |
---|
7 | * |
---|
8 | * This file is part of ALMOS-MKH. |
---|
9 | * |
---|
10 | * ALMOS-MKH is free software; you can redistribute it and/or modify it |
---|
11 | * under the terms of the GNU General Public License as published by |
---|
12 | * the Free Software Foundation; version 2.0 of the License. |
---|
13 | * |
---|
14 | * ALMOS-MKH is distributed in the hope that it will be useful, but |
---|
15 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
---|
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
---|
17 | * General Public License for more details. |
---|
18 | * |
---|
19 | * You should have received a copy of the GNU General Public License |
---|
20 | * along with ALMOS-MKH; if not, write to the Free Software Foundation, |
---|
21 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
---|
22 | */ |
---|
23 | |
---|
24 | #include <hal_kernel_types.h> |
---|
25 | #include <hal_macros.h> |
---|
26 | #include <hal_remote.h> |
---|
27 | #include <hal_irqmask.h> |
---|
28 | #include <remote_busylock.h> |
---|
29 | #include <thread.h> |
---|
30 | #include <kmem.h> |
---|
31 | #include <printk.h> |
---|
32 | #include <process.h> |
---|
33 | #include <vmm.h> |
---|
34 | #include <remote_barrier.h> |
---|
35 | |
---|
36 | //////////////////////////////////////////////////// |
---|
37 | // generic (implementation independant) functions |
---|
38 | //////////////////////////////////////////////////// |
---|
39 | |
---|
40 | /////////////////////////////////////////////////// |
---|
41 | xptr_t generic_barrier_from_ident( intptr_t ident ) |
---|
42 | { |
---|
43 | // get pointer on local process_descriptor |
---|
44 | process_t * process = CURRENT_THREAD->process; |
---|
45 | |
---|
46 | // get pointers on reference process |
---|
47 | xptr_t ref_xp = process->ref_xp; |
---|
48 | cxy_t ref_cxy = GET_CXY( ref_xp ); |
---|
49 | process_t * ref_ptr = (process_t *)GET_PTR( ref_xp ); |
---|
50 | |
---|
51 | // get extended pointer on root of barriers list |
---|
52 | xptr_t root_xp = XPTR( ref_cxy , &ref_ptr->barrier_root ); |
---|
53 | |
---|
54 | // scan reference process barriers list |
---|
55 | xptr_t iter_xp; |
---|
56 | xptr_t barrier_xp; |
---|
57 | cxy_t barrier_cxy; |
---|
58 | generic_barrier_t * barrier_ptr; |
---|
59 | intptr_t current; |
---|
60 | bool_t found = false; |
---|
61 | |
---|
62 | XLIST_FOREACH( root_xp , iter_xp ) |
---|
63 | { |
---|
64 | barrier_xp = XLIST_ELEMENT( iter_xp , generic_barrier_t , list ); |
---|
65 | barrier_cxy = GET_CXY( barrier_xp ); |
---|
66 | barrier_ptr = (generic_barrier_t *)GET_PTR( barrier_xp ); |
---|
67 | current = (intptr_t)hal_remote_lpt( XPTR( barrier_cxy , &barrier_ptr->ident ) ); |
---|
68 | if( ident == current ) |
---|
69 | { |
---|
70 | found = true; |
---|
71 | break; |
---|
72 | } |
---|
73 | } |
---|
74 | |
---|
75 | if( found == false ) return XPTR_NULL; |
---|
76 | else return barrier_xp; |
---|
77 | |
---|
78 | } // end generic_barrier_from_ident() |
---|
79 | |
---|
80 | ////////////////////////////////////////////////////////////// |
---|
81 | error_t generic_barrier_create( intptr_t ident, |
---|
82 | uint32_t count, |
---|
83 | pthread_barrierattr_t * attr ) |
---|
84 | { |
---|
85 | xptr_t gen_barrier_xp; // extended pointer on generic barrier descriptor |
---|
86 | generic_barrier_t * gen_barrier_ptr; // local pointer on generic barrier descriptor |
---|
87 | void * barrier; // local pointer on implementation barrier descriptor |
---|
88 | kmem_req_t req; // kmem request |
---|
89 | |
---|
90 | // get pointer on local process_descriptor |
---|
91 | process_t * process = CURRENT_THREAD->process; |
---|
92 | |
---|
93 | // get pointers on reference process |
---|
94 | xptr_t ref_xp = process->ref_xp; |
---|
95 | cxy_t ref_cxy = GET_CXY( ref_xp ); |
---|
96 | process_t * ref_ptr = (process_t *)GET_PTR( ref_xp ); |
---|
97 | |
---|
98 | // allocate memory for generic barrier descriptor |
---|
99 | if( ref_cxy == local_cxy ) // reference cluster is local |
---|
100 | { |
---|
101 | req.type = KMEM_GEN_BARRIER; |
---|
102 | req.flags = AF_ZERO; |
---|
103 | gen_barrier_ptr = kmem_alloc( &req ); |
---|
104 | gen_barrier_xp = XPTR( local_cxy , gen_barrier_ptr ); |
---|
105 | } |
---|
106 | else // reference cluster is remote |
---|
107 | { |
---|
108 | rpc_kcm_alloc_client( ref_cxy, |
---|
109 | KMEM_GEN_BARRIER, |
---|
110 | &gen_barrier_xp ); |
---|
111 | gen_barrier_ptr = GET_PTR( gen_barrier_xp ); |
---|
112 | } |
---|
113 | |
---|
114 | if( gen_barrier_ptr == NULL ) |
---|
115 | { |
---|
116 | printk("\n[ERROR] in %s : cannot create generic barrier\n", __FUNCTION__ ); |
---|
117 | return -1; |
---|
118 | } |
---|
119 | |
---|
120 | // create implementation specific barrier descriptor |
---|
121 | if( attr == NULL ) // simple barrier implementation |
---|
122 | { |
---|
123 | // create simple barrier descriptor |
---|
124 | barrier = simple_barrier_create( count ); |
---|
125 | |
---|
126 | if( barrier == NULL ) |
---|
127 | { |
---|
128 | printk("\n[ERROR] in %s : cannot create simple barrier\n", __FUNCTION__); |
---|
129 | return -1; |
---|
130 | } |
---|
131 | } |
---|
132 | else // QDT barrier implementation |
---|
133 | { |
---|
134 | uint32_t x_size = attr->x_size; |
---|
135 | uint32_t y_size = attr->y_size; |
---|
136 | uint32_t nthreads = attr->nthreads; |
---|
137 | |
---|
138 | // check attributes / count |
---|
139 | if( (x_size * y_size * nthreads) != count ) |
---|
140 | { |
---|
141 | printk("\n[ERROR] in %s : count(%d) != x_size(%d) * y_size(%d) * nthreads(%d)\n", |
---|
142 | __FUNCTION__, count, x_size, y_size, nthreads ); |
---|
143 | return -1; |
---|
144 | } |
---|
145 | |
---|
146 | // create DQT barrier descriptor |
---|
147 | barrier = dqt_barrier_create( x_size , y_size , nthreads ); |
---|
148 | |
---|
149 | if( barrier == NULL ) |
---|
150 | { |
---|
151 | printk("\n[ERROR] in %s : cannot create DQT barrier descriptor\n", __FUNCTION__); |
---|
152 | return -1; |
---|
153 | } |
---|
154 | } |
---|
155 | |
---|
156 | // initialize the generic barrier descriptor |
---|
157 | hal_remote_spt( XPTR( ref_cxy , &gen_barrier_ptr->ident ) , (void*)ident ); |
---|
158 | hal_remote_s32( XPTR( ref_cxy , &gen_barrier_ptr->is_dqt ) , (attr != NULL) ); |
---|
159 | hal_remote_spt( XPTR( ref_cxy , &gen_barrier_ptr->extend ) , barrier ); |
---|
160 | |
---|
161 | // build extended pointers on lock, root and entry for reference process xlist |
---|
162 | xptr_t root_xp = XPTR( ref_cxy , &ref_ptr->barrier_root ); |
---|
163 | xptr_t lock_xp = XPTR( ref_cxy , &ref_ptr->sync_lock ); |
---|
164 | xptr_t entry_xp = XPTR( ref_cxy , &gen_barrier_ptr->list ); |
---|
165 | |
---|
166 | // register barrier in reference process xlist of barriers |
---|
167 | remote_busylock_acquire( lock_xp ); |
---|
168 | xlist_add_first( root_xp , entry_xp ); |
---|
169 | remote_busylock_release( lock_xp ); |
---|
170 | |
---|
171 | return 0; |
---|
172 | |
---|
173 | } // en generic_barrier_create() |
---|
174 | |
---|
175 | ///////////////////////////////////////////////////// |
---|
176 | void generic_barrier_destroy( xptr_t gen_barrier_xp ) |
---|
177 | { |
---|
178 | kmem_req_t req; // kmem request |
---|
179 | |
---|
180 | // get pointer on local process_descriptor |
---|
181 | process_t * process = CURRENT_THREAD->process; |
---|
182 | |
---|
183 | // get pointers on reference process |
---|
184 | xptr_t ref_xp = process->ref_xp; |
---|
185 | cxy_t ref_cxy = GET_CXY( ref_xp ); |
---|
186 | process_t * ref_ptr = GET_PTR( ref_xp ); |
---|
187 | |
---|
188 | // get cluster and local pointer on generic barrier descriptor |
---|
189 | generic_barrier_t * gen_barrier_ptr = GET_PTR( gen_barrier_xp ); |
---|
190 | cxy_t gen_barrier_cxy = GET_CXY( gen_barrier_xp ); |
---|
191 | |
---|
192 | // get barrier type and extension pointer |
---|
193 | bool_t is_dqt = hal_remote_l32( XPTR( gen_barrier_cxy , &gen_barrier_ptr->is_dqt ) ); |
---|
194 | void * extend = hal_remote_lpt( XPTR( gen_barrier_cxy , &gen_barrier_ptr->extend ) ); |
---|
195 | |
---|
196 | // build extended pointer on implementation dependant barrier descriptor |
---|
197 | xptr_t barrier_xp = XPTR( gen_barrier_cxy , extend ); |
---|
198 | |
---|
199 | // delete the implementation specific barrier |
---|
200 | if( is_dqt ) dqt_barrier_destroy( barrier_xp ); |
---|
201 | else simple_barrier_destroy( barrier_xp ); |
---|
202 | |
---|
203 | // build extended pointers on lock and entry for reference process xlist |
---|
204 | xptr_t lock_xp = XPTR( ref_cxy , &ref_ptr->sync_lock ); |
---|
205 | xptr_t entry_xp = XPTR( gen_barrier_cxy , &gen_barrier_ptr->list ); |
---|
206 | |
---|
207 | // remove barrier from reference process xlist |
---|
208 | remote_busylock_acquire( lock_xp ); |
---|
209 | xlist_unlink( entry_xp ); |
---|
210 | remote_busylock_release( lock_xp ); |
---|
211 | |
---|
212 | // release memory allocated to barrier descriptor |
---|
213 | if( gen_barrier_cxy == local_cxy ) |
---|
214 | { |
---|
215 | req.type = KMEM_GEN_BARRIER; |
---|
216 | req.ptr = gen_barrier_ptr; |
---|
217 | kmem_free( &req ); |
---|
218 | } |
---|
219 | else |
---|
220 | { |
---|
221 | rpc_kcm_free_client( gen_barrier_cxy, |
---|
222 | gen_barrier_ptr, |
---|
223 | KMEM_GEN_BARRIER ); |
---|
224 | } |
---|
225 | } // end generic_barrier_destroy() |
---|
226 | |
---|
227 | ////////////////////////////////////////////////// |
---|
228 | void generic_barrier_wait( xptr_t gen_barrier_xp ) |
---|
229 | { |
---|
230 | // get generic barrier descriptor cluster and pointer |
---|
231 | cxy_t gen_barrier_cxy = GET_CXY( gen_barrier_xp ); |
---|
232 | generic_barrier_t * gen_barrier_ptr = GET_PTR( gen_barrier_xp ); |
---|
233 | |
---|
234 | // get implementation type and extend local pointer |
---|
235 | bool_t is_dqt = hal_remote_l32( XPTR( gen_barrier_cxy , &gen_barrier_ptr->is_dqt ) ); |
---|
236 | void * extend = hal_remote_lpt( XPTR( gen_barrier_cxy , &gen_barrier_ptr->extend ) ); |
---|
237 | |
---|
238 | // build extended pointer on implementation specific barrier descriptor |
---|
239 | xptr_t barrier_xp = XPTR( gen_barrier_cxy , extend ); |
---|
240 | |
---|
241 | // call the relevant wait function |
---|
242 | if( is_dqt ) dqt_barrier_wait( barrier_xp ); |
---|
243 | else simple_barrier_wait( barrier_xp ); |
---|
244 | |
---|
245 | } // end generic_barrier_wait() |
---|
246 | |
---|
247 | ///////////////////////////////////////////////////// |
---|
248 | void generic_barrier_display( xptr_t gen_barrier_xp ) |
---|
249 | { |
---|
250 | // get cluster and local pointer |
---|
251 | generic_barrier_t * gen_barrier_ptr = GET_PTR( gen_barrier_xp ); |
---|
252 | cxy_t gen_barrier_cxy = GET_CXY( gen_barrier_xp ); |
---|
253 | |
---|
254 | // get barrier type and extend pointer |
---|
255 | bool_t is_dqt = hal_remote_l32( XPTR( gen_barrier_cxy , &gen_barrier_ptr->is_dqt ) ); |
---|
256 | void * extend = hal_remote_lpt( XPTR( gen_barrier_cxy , &gen_barrier_ptr->extend ) ); |
---|
257 | |
---|
258 | // buil extended pointer on the implementation specific barrier descriptor |
---|
259 | xptr_t barrier_xp = XPTR( gen_barrier_cxy , extend ); |
---|
260 | |
---|
261 | // display barrier state |
---|
262 | if( is_dqt ) dqt_barrier_display( barrier_xp ); |
---|
263 | else simple_barrier_display( barrier_xp ); |
---|
264 | } |
---|
265 | |
---|
266 | |
---|
267 | |
---|
268 | ///////////////////////////////////////////////////////////// |
---|
269 | // simple barrier functions |
---|
270 | ///////////////////////////////////////////////////////////// |
---|
271 | |
---|
272 | /////////////////////////////////////////////////////////// |
---|
273 | simple_barrier_t * simple_barrier_create( uint32_t count ) |
---|
274 | { |
---|
275 | xptr_t barrier_xp; |
---|
276 | simple_barrier_t * barrier; |
---|
277 | |
---|
278 | // get pointer on local client process descriptor |
---|
279 | thread_t * this = CURRENT_THREAD; |
---|
280 | process_t * process = this->process; |
---|
281 | |
---|
282 | // get reference process cluster |
---|
283 | xptr_t ref_xp = process->ref_xp; |
---|
284 | cxy_t ref_cxy = GET_CXY( ref_xp ); |
---|
285 | |
---|
286 | // allocate memory for simple barrier descriptor |
---|
287 | if( ref_cxy == local_cxy ) // reference is local |
---|
288 | { |
---|
289 | kmem_req_t req; |
---|
290 | req.type = KMEM_SMP_BARRIER; |
---|
291 | req.flags = AF_ZERO; |
---|
292 | barrier = kmem_alloc( &req ); |
---|
293 | barrier_xp = XPTR( local_cxy , barrier ); |
---|
294 | } |
---|
295 | else // reference is remote |
---|
296 | { |
---|
297 | rpc_kcm_alloc_client( ref_cxy, |
---|
298 | KMEM_SMP_BARRIER, |
---|
299 | &barrier_xp ); |
---|
300 | barrier = GET_PTR( barrier_xp ); |
---|
301 | } |
---|
302 | |
---|
303 | if( barrier == NULL ) return NULL; |
---|
304 | |
---|
305 | // initialise simple barrier descriptor |
---|
306 | hal_remote_s32 ( XPTR( ref_cxy , &barrier->arity ) , count ); |
---|
307 | hal_remote_s32 ( XPTR( ref_cxy , &barrier->current ) , 0 ); |
---|
308 | hal_remote_s32 ( XPTR( ref_cxy , &barrier->sense ) , 0 ); |
---|
309 | |
---|
310 | xlist_root_init ( XPTR( ref_cxy , &barrier->root ) ); |
---|
311 | remote_busylock_init( XPTR( ref_cxy , &barrier->lock ) , LOCK_BARRIER_STATE ); |
---|
312 | |
---|
313 | #if DEBUG_BARRIER_CREATE |
---|
314 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
315 | if( cycle > DEBUG_BARRIER_CREATE ) |
---|
316 | printk("\n[%s] thread[%x,%x] created barrier (%x,%x) / count %d / cycle %d\n", |
---|
317 | __FUNCTION__, process->pid, this->trdid, ref_cxy, barrier, count, cycle ); |
---|
318 | #endif |
---|
319 | |
---|
320 | return barrier; |
---|
321 | |
---|
322 | } // end simple_barrier_create() |
---|
323 | |
---|
324 | //////////////////////////////////////////////// |
---|
325 | void simple_barrier_destroy( xptr_t barrier_xp ) |
---|
326 | { |
---|
327 | // get barrier cluster and local pointer |
---|
328 | cxy_t barrier_cxy = GET_CXY( barrier_xp ); |
---|
329 | simple_barrier_t * barrier_ptr = GET_PTR( barrier_xp ); |
---|
330 | |
---|
331 | // release memory allocated for barrier descriptor |
---|
332 | if( barrier_cxy == local_cxy ) |
---|
333 | { |
---|
334 | kmem_req_t req; |
---|
335 | req.type = KMEM_SMP_BARRIER; |
---|
336 | req.ptr = barrier_ptr; |
---|
337 | kmem_free( &req ); |
---|
338 | } |
---|
339 | else |
---|
340 | { |
---|
341 | rpc_kcm_free_client( barrier_cxy, |
---|
342 | barrier_ptr, |
---|
343 | KMEM_SMP_BARRIER ); |
---|
344 | } |
---|
345 | |
---|
346 | #if DEBUG_BARRIER_DESTROY |
---|
347 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
348 | thread_t * this = CURRENT_THREAD; |
---|
349 | process_t * process = this->process; |
---|
350 | if( cycle > DEBUG_BARRIER_DESTROY ) |
---|
351 | printk("\n[%s] thread[%x,%x] deleted barrier (%x,%x) / cycle %d\n", |
---|
352 | __FUNCTION__, process->pid, this->trdid, barrier_ptr, barrier_cxy, cycle ); |
---|
353 | #endif |
---|
354 | |
---|
355 | } // end simple_barrier_destroy() |
---|
356 | |
---|
357 | ///////////////////////////////////////////// |
---|
358 | void simple_barrier_wait( xptr_t barrier_xp ) |
---|
359 | { |
---|
360 | uint32_t expected; |
---|
361 | uint32_t sense; |
---|
362 | uint32_t current; |
---|
363 | uint32_t arity; |
---|
364 | xptr_t root_xp; |
---|
365 | xptr_t lock_xp; |
---|
366 | xptr_t current_xp; |
---|
367 | xptr_t sense_xp; |
---|
368 | xptr_t arity_xp; |
---|
369 | |
---|
370 | // get pointer on calling thread |
---|
371 | thread_t * this = CURRENT_THREAD; |
---|
372 | |
---|
373 | // check calling thread can yield |
---|
374 | thread_assert_can_yield( this , __FUNCTION__ ); |
---|
375 | |
---|
376 | // get cluster and local pointer on remote barrier |
---|
377 | simple_barrier_t * barrier_ptr = GET_PTR( barrier_xp ); |
---|
378 | cxy_t barrier_cxy = GET_CXY( barrier_xp ); |
---|
379 | |
---|
380 | #if DEBUG_BARRIER_WAIT |
---|
381 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
382 | if( cycle > DEBUG_BARRIER_WAIT ) |
---|
383 | printk("\n[%s] thread[%x,%x] enter / barrier (%x,%x) / cycle %d\n", |
---|
384 | __FUNCTION__, this->process->pid, this->trdid, barrier_cxy, barrier_ptr, cycle ); |
---|
385 | #endif |
---|
386 | |
---|
387 | // build extended pointers on various barrier descriptor fields |
---|
388 | lock_xp = XPTR( barrier_cxy , &barrier_ptr->lock ); |
---|
389 | root_xp = XPTR( barrier_cxy , &barrier_ptr->root ); |
---|
390 | current_xp = XPTR( barrier_cxy , &barrier_ptr->current ); |
---|
391 | sense_xp = XPTR( barrier_cxy , &barrier_ptr->sense ); |
---|
392 | arity_xp = XPTR( barrier_cxy , &barrier_ptr->arity ); |
---|
393 | |
---|
394 | // take busylock protecting the barrier state |
---|
395 | remote_busylock_acquire( lock_xp ); |
---|
396 | |
---|
397 | // get sense and threads values from barrier descriptor |
---|
398 | sense = hal_remote_l32( sense_xp ); |
---|
399 | arity = hal_remote_l32( arity_xp ); |
---|
400 | |
---|
401 | // compute expected value |
---|
402 | if ( sense == 0 ) expected = 1; |
---|
403 | else expected = 0; |
---|
404 | |
---|
405 | // increment current number of arrived threads / get value before increment |
---|
406 | current = hal_remote_atomic_add( current_xp , 1 ); |
---|
407 | |
---|
408 | // last thread reset current, toggle sense, and activate all waiting threads |
---|
409 | // other threads block, register in queue, and deschedule |
---|
410 | |
---|
411 | if( current == (arity - 1) ) // last thread |
---|
412 | { |
---|
413 | hal_remote_s32( current_xp , 0 ); |
---|
414 | hal_remote_s32( sense_xp , expected ); |
---|
415 | |
---|
416 | // unblock all waiting threads |
---|
417 | while( xlist_is_empty( root_xp ) == false ) |
---|
418 | { |
---|
419 | // get pointers on first waiting thread |
---|
420 | xptr_t thread_xp = XLIST_FIRST( root_xp , thread_t , wait_list ); |
---|
421 | cxy_t thread_cxy = GET_CXY( thread_xp ); |
---|
422 | thread_t * thread_ptr = GET_PTR( thread_xp ); |
---|
423 | |
---|
424 | #if (DEBUG_BARRIER_WAIT & 1) |
---|
425 | trdid_t trdid = hal_remote_l32( XPTR( thread_cxy , &thread_ptr->trdid ) ); |
---|
426 | process_t * process = hal_remote_lpt( XPTR( thread_cxy , &thread_ptr->process ) ); |
---|
427 | pid_t pid = hal_remote_l32( XPTR( thread_cxy , &process->pid ) ); |
---|
428 | if( cycle > DEBUG_BARRIER_WAIT ) |
---|
429 | printk("\n[%s] thread[%x,%x] unblocks thread[%x,%x]\n", |
---|
430 | __FUNCTION__, this->process->pid, this->trdid, pid, trdid ); |
---|
431 | #endif |
---|
432 | |
---|
433 | // remove waiting thread from queue |
---|
434 | xlist_unlink( XPTR( thread_cxy , &thread_ptr->wait_list ) ); |
---|
435 | |
---|
436 | // unblock waiting thread |
---|
437 | thread_unblock( thread_xp , THREAD_BLOCKED_USERSYNC ); |
---|
438 | } |
---|
439 | |
---|
440 | // release busylock protecting the barrier |
---|
441 | remote_busylock_release( lock_xp ); |
---|
442 | } |
---|
443 | else // not the last thread |
---|
444 | { |
---|
445 | |
---|
446 | #if (DEBUG_BARRIER_WAIT & 1) |
---|
447 | if( cycle > DEBUG_BARRIER_WAIT ) |
---|
448 | printk("\n[%s] thread[%x,%x] blocks\n", |
---|
449 | __FUNCTION__, this->process->pid, this->trdid ); |
---|
450 | #endif |
---|
451 | |
---|
452 | // register calling thread in barrier waiting queue |
---|
453 | xlist_add_last( root_xp , XPTR( local_cxy , &this->wait_list ) ); |
---|
454 | |
---|
455 | // block calling thread |
---|
456 | thread_block( XPTR( local_cxy , this ) , THREAD_BLOCKED_USERSYNC ); |
---|
457 | |
---|
458 | // release busylock protecting the remote_barrier |
---|
459 | remote_busylock_release( lock_xp ); |
---|
460 | |
---|
461 | // deschedule |
---|
462 | sched_yield("blocked on barrier"); |
---|
463 | } |
---|
464 | |
---|
465 | #if DEBUG_BARRIER_WAIT |
---|
466 | cycle = (uint32_t)hal_get_cycles(); |
---|
467 | if( cycle > DEBUG_BARRIER_WAIT ) |
---|
468 | printk("\n[%s] thread[%x,%x] exit / barrier (%x,%x) / cycle %d\n", |
---|
469 | __FUNCTION__, this->process->pid, this->trdid, barrier_cxy, barrier_ptr, cycle ); |
---|
470 | #endif |
---|
471 | |
---|
472 | } // end simple_barrier_wait() |
---|
473 | |
---|
474 | ///////////////////////////////////////////////// |
---|
475 | void simple_barrier_display( xptr_t barrier_xp ) |
---|
476 | { |
---|
477 | // get cluster and local pointer on simple barrier |
---|
478 | simple_barrier_t * barrier_ptr = GET_PTR( barrier_xp ); |
---|
479 | cxy_t barrier_cxy = GET_CXY( barrier_xp ); |
---|
480 | |
---|
481 | // get barrier global parameters |
---|
482 | uint32_t current = hal_remote_l32( XPTR( barrier_cxy , &barrier_ptr->current ) ); |
---|
483 | uint32_t arity = hal_remote_l32( XPTR( barrier_cxy , &barrier_ptr->arity ) ); |
---|
484 | |
---|
485 | printk("\n***** simple barrier : %d arrived threads on %d *****\n", |
---|
486 | current, arity ); |
---|
487 | |
---|
488 | } // end simple_barrier_display() |
---|
489 | |
---|
490 | |
---|
491 | |
---|
492 | |
---|
493 | ///////////////////////////////////////////////////////////// |
---|
494 | // DQT barrier functions |
---|
495 | ///////////////////////////////////////////////////////////// |
---|
496 | |
---|
497 | static void dqt_barrier_increment( xptr_t node_xp ); |
---|
498 | |
---|
499 | #if DEBUG_BARRIER_CREATE |
---|
500 | static void dqt_barrier_display( xptr_t barrier_xp ); |
---|
501 | #endif |
---|
502 | |
---|
503 | /////////////////////////////////////////////////////// |
---|
504 | dqt_barrier_t * dqt_barrier_create( uint32_t x_size, |
---|
505 | uint32_t y_size, |
---|
506 | uint32_t nthreads ) |
---|
507 | { |
---|
508 | xptr_t dqt_page_xp; |
---|
509 | page_t * rpc_page; |
---|
510 | xptr_t rpc_page_xp; |
---|
511 | dqt_barrier_t * barrier; // local pointer on DQT barrier descriptor |
---|
512 | xptr_t barrier_xp; // extended pointer on DQT barrier descriptor |
---|
513 | uint32_t z; // actual DQT size == max(x_size,y_size) |
---|
514 | uint32_t levels; // actual number of DQT levels |
---|
515 | xptr_t rpc_xp; // extended pointer on RPC descriptors array |
---|
516 | rpc_desc_t * rpc; // pointer on RPC descriptors array |
---|
517 | uint32_t responses; // responses counter for parallel RPCs |
---|
518 | reg_t save_sr; // for critical section |
---|
519 | uint32_t x; // X coordinate in QDT mesh |
---|
520 | uint32_t y; // Y coordinate in QDT mesh |
---|
521 | uint32_t l; // level coordinate |
---|
522 | kmem_req_t req; // kmem request |
---|
523 | |
---|
524 | // compute size and number of DQT levels |
---|
525 | z = (x_size > y_size) ? x_size : y_size; |
---|
526 | levels = (z < 2) ? 1 : (z < 3) ? 2 : (z < 5) ? 3 : (z < 9) ? 4 : 5; |
---|
527 | |
---|
528 | // check x_size and y_size arguments |
---|
529 | assert( (z <= 16) , "DQT mesh size larger than (16*16)\n"); |
---|
530 | |
---|
531 | // check RPC descriptor size |
---|
532 | assert( (sizeof(rpc_desc_t) <= 128), "RPC descriptor larger than 128 bytes\n"); |
---|
533 | |
---|
534 | // check size of an array of 5 DQT nodes |
---|
535 | assert( (sizeof(dqt_node_t) * 5 <= 512 ), "array of DQT nodes larger than 512 bytes\n"); |
---|
536 | |
---|
537 | // check size of DQT barrier descriptor |
---|
538 | assert( (sizeof(dqt_barrier_t) <= 0x4000 ), "DQT barrier descriptor larger than 4 pages\n"); |
---|
539 | |
---|
540 | // get pointer on local client process descriptor |
---|
541 | thread_t * this = CURRENT_THREAD; |
---|
542 | process_t * process = this->process; |
---|
543 | |
---|
544 | #if DEBUG_BARRIER_CREATE |
---|
545 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
546 | if( cycle > DEBUG_BARRIER_CREATE ) |
---|
547 | printk("\n[%s] thread[%x,%x] enter : x_size %d / y_size %d / levels %d / cycle %d\n", |
---|
548 | __FUNCTION__, process->pid, this->trdid, x_size, y_size, levels, cycle ); |
---|
549 | #endif |
---|
550 | |
---|
551 | // get reference process cluster |
---|
552 | xptr_t ref_xp = process->ref_xp; |
---|
553 | cxy_t ref_cxy = GET_CXY( ref_xp ); |
---|
554 | |
---|
555 | // 1. allocate 4 4 Kbytes pages for DQT barrier descriptor in reference cluster |
---|
556 | dqt_page_xp = ppm_remote_alloc_pages( ref_cxy , 2 ); |
---|
557 | |
---|
558 | if( dqt_page_xp == XPTR_NULL ) return NULL; |
---|
559 | |
---|
560 | // get pointers on DQT barrier descriptor |
---|
561 | barrier_xp = ppm_page2base( dqt_page_xp ); |
---|
562 | barrier = GET_PTR( barrier_xp ); |
---|
563 | |
---|
564 | // initialize global parameters in DQT barrier descriptor |
---|
565 | hal_remote_s32( XPTR( ref_cxy , &barrier->x_size ) , x_size ); |
---|
566 | hal_remote_s32( XPTR( ref_cxy , &barrier->y_size ) , x_size ); |
---|
567 | hal_remote_s32( XPTR( ref_cxy , &barrier->nthreads ) , nthreads ); |
---|
568 | |
---|
569 | #if DEBUG_BARRIER_CREATE |
---|
570 | if( cycle > DEBUG_BARRIER_CREATE ) |
---|
571 | printk("\n[%s] thread[%x,%x] created DQT barrier descriptor at (%x,%x)\n", |
---|
572 | __FUNCTION__, process->pid, this->trdid, ref_cxy, barrier ); |
---|
573 | #endif |
---|
574 | |
---|
575 | // 2. allocate memory from local cluster for an array of 256 RPCs descriptors |
---|
576 | // cannot share the RPC descriptor, because the returned argument is not shared |
---|
577 | req.type = KMEM_PAGE; |
---|
578 | req.size = 3; // 8 pages == 32 Kbytes |
---|
579 | req.flags = AF_ZERO; |
---|
580 | rpc_page = kmem_alloc( &req ); |
---|
581 | rpc_page_xp = XPTR( local_cxy , rpc_page ); |
---|
582 | |
---|
583 | // get pointers on RPC descriptors array |
---|
584 | rpc_xp = ppm_page2base( rpc_page_xp ); |
---|
585 | rpc = GET_PTR( rpc_xp ); |
---|
586 | |
---|
587 | #if DEBUG_BARRIER_CREATE |
---|
588 | if( cycle > DEBUG_BARRIER_CREATE ) |
---|
589 | printk("\n[%s] thread[%x,%x] created RPC descriptors array at (%x,%s)\n", |
---|
590 | __FUNCTION__, process->pid, this->trdid, local_cxy, rpc ); |
---|
591 | #endif |
---|
592 | |
---|
593 | // 3. send parallel RPCs to all existing clusters covered by the DQT |
---|
594 | // to allocate memory for an array of 5 DQT nodes in each cluster |
---|
595 | // (5 nodes per cluster <= 512 bytes per cluster) |
---|
596 | |
---|
597 | responses = 0; // initialize RPC responses counter |
---|
598 | |
---|
599 | // mask IRQs |
---|
600 | hal_disable_irq( &save_sr); |
---|
601 | |
---|
602 | // client thread blocks itself |
---|
603 | thread_block( XPTR( local_cxy , this ) , THREAD_BLOCKED_RPC ); |
---|
604 | |
---|
605 | for ( x = 0 ; x < x_size ; x++ ) |
---|
606 | { |
---|
607 | for ( y = 0 ; y < y_size ; y++ ) |
---|
608 | { |
---|
609 | // send RPC to existing clusters only |
---|
610 | if( LOCAL_CLUSTER->cluster_info[x][y] ) |
---|
611 | { |
---|
612 | cxy_t cxy = HAL_CXY_FROM_XY( x , y ); // target cluster identifier |
---|
613 | |
---|
614 | // build a specific RPC descriptor for each target cluster |
---|
615 | rpc[cxy].rsp = &responses; |
---|
616 | rpc[cxy].blocking = false; |
---|
617 | rpc[cxy].index = RPC_KCM_ALLOC; |
---|
618 | rpc[cxy].thread = this; |
---|
619 | rpc[cxy].lid = this->core->lid; |
---|
620 | rpc[cxy].args[0] = (uint64_t)KMEM_512_BYTES; |
---|
621 | |
---|
622 | // atomically increment expected responses counter |
---|
623 | hal_atomic_add( &responses , 1 ); |
---|
624 | |
---|
625 | // send a non-blocking RPC to allocate 512 bytes in target cluster |
---|
626 | rpc_send( cxy , &rpc[cxy] ); |
---|
627 | } |
---|
628 | } |
---|
629 | } |
---|
630 | |
---|
631 | #if DEBUG_BARRIER_CREATE |
---|
632 | if( cycle > DEBUG_BARRIER_CREATE ) |
---|
633 | printk("\n[%s] thread[%x,%x] sent all RPC requests to allocate dqt_nodes array\n", |
---|
634 | __FUNCTION__, process->pid, this->trdid ); |
---|
635 | #endif |
---|
636 | |
---|
637 | // client thread deschedule |
---|
638 | sched_yield("blocked on parallel rpc_kcm_alloc"); |
---|
639 | |
---|
640 | // restore IRQs |
---|
641 | hal_restore_irq( save_sr); |
---|
642 | |
---|
643 | // 4. initialize the node_xp[x][y][l] array in DQT barrier descriptor |
---|
644 | // the node_xp[x][y][0] value is available in rpc.args[1] |
---|
645 | |
---|
646 | #if DEBUG_BARRIER_CREATE |
---|
647 | if( cycle > DEBUG_BARRIER_CREATE ) |
---|
648 | printk("\n[%s] thread[%x,%x] initialises array of pointers on dqt_nodes\n", |
---|
649 | __FUNCTION__, process->pid, this->trdid ); |
---|
650 | #endif |
---|
651 | |
---|
652 | for ( x = 0 ; x < x_size ; x++ ) |
---|
653 | { |
---|
654 | for ( y = 0 ; y < y_size ; y++ ) |
---|
655 | { |
---|
656 | cxy_t cxy = HAL_CXY_FROM_XY( x , y ); // target cluster identifier |
---|
657 | xptr_t array_xp = (xptr_t)rpc[cxy].args[1]; // x_pointer on node array |
---|
658 | uint32_t offset = sizeof( dqt_node_t ); // size of a DQT node |
---|
659 | |
---|
660 | // set values into the node_xp[x][y][l] array |
---|
661 | for ( l = 0 ; l < levels ; l++ ) |
---|
662 | { |
---|
663 | xptr_t node_xp = array_xp + (offset * l); |
---|
664 | hal_remote_s64( XPTR( ref_cxy , &barrier->node_xp[x][y][l] ), node_xp ); |
---|
665 | |
---|
666 | #if DEBUG_BARRIER_CREATE |
---|
667 | if( cycle > DEBUG_BARRIER_CREATE ) |
---|
668 | printk(" - dqt_node_xp[%d,%d,%d] = (%x,%x) / &dqt_node_xp = %x\n", |
---|
669 | x , y , l , GET_CXY( node_xp ), GET_PTR( node_xp ), &barrier->node_xp[x][y][l] ); |
---|
670 | #endif |
---|
671 | } |
---|
672 | } |
---|
673 | } |
---|
674 | |
---|
675 | // 5. release memory locally allocated for the RPCs array |
---|
676 | req.type = KMEM_PAGE; |
---|
677 | req.ptr = rpc_page; |
---|
678 | kmem_free( &req ); |
---|
679 | |
---|
680 | #if DEBUG_BARRIER_CREATE |
---|
681 | if( cycle > DEBUG_BARRIER_CREATE ) |
---|
682 | printk("\n[%s] thread[%x,%x] released memory for RPC descriptors array\n", |
---|
683 | __FUNCTION__, process->pid, this->trdid ); |
---|
684 | #endif |
---|
685 | |
---|
686 | // 6. initialise all distributed DQT nodes using remote accesses |
---|
687 | // and the pointers stored in the node_xp[x][y][l] array |
---|
688 | for ( x = 0 ; x < x_size ; x++ ) |
---|
689 | { |
---|
690 | for ( y = 0 ; y < y_size ; y++ ) |
---|
691 | { |
---|
692 | // initialize existing clusters only |
---|
693 | if( LOCAL_CLUSTER->cluster_info[x][y] ) |
---|
694 | { |
---|
695 | for ( l = 0 ; l < levels ; l++ ) |
---|
696 | { |
---|
697 | xptr_t parent_xp; |
---|
698 | xptr_t child_xp[4]; |
---|
699 | uint32_t arity = 0; |
---|
700 | |
---|
701 | // get DQT node pointers |
---|
702 | xptr_t node_xp = hal_remote_l64( XPTR( ref_cxy, |
---|
703 | &barrier->node_xp[x][y][l] ) ); |
---|
704 | cxy_t node_cxy = GET_CXY( node_xp ); |
---|
705 | dqt_node_t * node_ptr = GET_PTR( node_xp ); |
---|
706 | |
---|
707 | // compute arity and child_xp[i] |
---|
708 | if (l == 0 ) // bottom DQT node |
---|
709 | { |
---|
710 | arity = nthreads; |
---|
711 | |
---|
712 | child_xp[0] = XPTR_NULL; |
---|
713 | child_xp[1] = XPTR_NULL; |
---|
714 | child_xp[2] = XPTR_NULL; |
---|
715 | child_xp[3] = XPTR_NULL; |
---|
716 | } |
---|
717 | else // not a bottom DQT node |
---|
718 | { |
---|
719 | arity = 0; |
---|
720 | |
---|
721 | // only few non-bottom nodes must be initialised |
---|
722 | if( ((x & ((1<<l)-1)) == 0) && ((y & ((1<<l)-1)) == 0) ) |
---|
723 | { |
---|
724 | uint32_t cx[4]; // x coordinate for children |
---|
725 | uint32_t cy[4]; // y coordinate for children |
---|
726 | uint32_t i; |
---|
727 | |
---|
728 | // the child0 coordinates are equal to the parent coordinates |
---|
729 | // other children coordinates depend on the level value |
---|
730 | cx[0] = x; |
---|
731 | cy[0] = y; |
---|
732 | |
---|
733 | cx[1] = x; |
---|
734 | cy[1] = y + (1 << (l-1)); |
---|
735 | |
---|
736 | cx[2] = x + (1 << (l-1)); |
---|
737 | cy[2] = y; |
---|
738 | |
---|
739 | cx[3] = x + (1 << (l-1)); |
---|
740 | cy[3] = y + (1 << (l-1)); |
---|
741 | |
---|
742 | for ( i = 0 ; i < 4 ; i++ ) |
---|
743 | { |
---|
744 | // child pointer is NULL if outside the mesh |
---|
745 | if ( (cx[i] < x_size) && (cy[i] < y_size) ) |
---|
746 | { |
---|
747 | // get child_xp[i] |
---|
748 | child_xp[i] = hal_remote_l64( XPTR( ref_cxy, |
---|
749 | &barrier->node_xp[cx[i]][cy[i]][l-1] ) ); |
---|
750 | |
---|
751 | // increment arity |
---|
752 | arity++; |
---|
753 | } |
---|
754 | else |
---|
755 | { |
---|
756 | child_xp[i] = XPTR_NULL; |
---|
757 | } |
---|
758 | } |
---|
759 | } |
---|
760 | } |
---|
761 | |
---|
762 | // compute parent_xp |
---|
763 | if( l == (levels - 1) ) // root DQT node |
---|
764 | { |
---|
765 | parent_xp = XPTR_NULL; |
---|
766 | } |
---|
767 | else // not the root |
---|
768 | { |
---|
769 | uint32_t px = 0; // parent X coordinate |
---|
770 | uint32_t py = 0; // parent Y coordinate |
---|
771 | bool_t found = false; |
---|
772 | |
---|
773 | // compute macro_cluster x_min, x_max, y_min, y_max |
---|
774 | uint32_t x_min = x & ~((1<<(l+1))-1); |
---|
775 | uint32_t x_max = x_min + (1<<(l+1)); |
---|
776 | uint32_t y_min = y & ~((1<<(l+1))-1); |
---|
777 | uint32_t y_max = y_min + (1<<(l+1)); |
---|
778 | |
---|
779 | // scan all clusters in macro-cluster[x][y][l] / take first active |
---|
780 | for( px = x_min ; px < x_max ; px++ ) |
---|
781 | { |
---|
782 | for( py = y_min ; py < y_max ; py++ ) |
---|
783 | { |
---|
784 | if( LOCAL_CLUSTER->cluster_info[px][py] ) found = true; |
---|
785 | if( found ) break; |
---|
786 | } |
---|
787 | if( found ) break; |
---|
788 | } |
---|
789 | |
---|
790 | parent_xp = hal_remote_l64( XPTR( ref_cxy , |
---|
791 | &barrier->node_xp[px][py][l+1] ) ); |
---|
792 | } |
---|
793 | |
---|
794 | // initializes the DQT node |
---|
795 | hal_remote_s32( XPTR( node_cxy , &node_ptr->arity ) , arity ); |
---|
796 | hal_remote_s32( XPTR( node_cxy , &node_ptr->current ) , 0 ); |
---|
797 | hal_remote_s32( XPTR( node_cxy , &node_ptr->sense ) , 0 ); |
---|
798 | hal_remote_s32( XPTR( node_cxy , &node_ptr->level ) , l ); |
---|
799 | hal_remote_s64( XPTR( node_cxy , &node_ptr->parent_xp ) , parent_xp ); |
---|
800 | hal_remote_s64( XPTR( node_cxy , &node_ptr->child_xp[0] ) , child_xp[0] ); |
---|
801 | hal_remote_s64( XPTR( node_cxy , &node_ptr->child_xp[1] ) , child_xp[1] ); |
---|
802 | hal_remote_s64( XPTR( node_cxy , &node_ptr->child_xp[2] ) , child_xp[2] ); |
---|
803 | hal_remote_s64( XPTR( node_cxy , &node_ptr->child_xp[3] ) , child_xp[3] ); |
---|
804 | |
---|
805 | xlist_root_init( XPTR( node_cxy , &node_ptr->root ) ); |
---|
806 | |
---|
807 | remote_busylock_init( XPTR( node_cxy , &node_ptr->lock ), |
---|
808 | LOCK_BARRIER_STATE ); |
---|
809 | } |
---|
810 | } |
---|
811 | } |
---|
812 | } |
---|
813 | |
---|
814 | #if DEBUG_BARRIER_CREATE |
---|
815 | cycle = (uint32_t)hal_get_cycles(); |
---|
816 | if( cycle > DEBUG_BARRIER_CREATE ) |
---|
817 | printk("\n[%s] thread[%x,%x] completed DQT barrier initialisation / cycle %d\n", |
---|
818 | __FUNCTION__, process->pid, this->trdid, cycle ); |
---|
819 | dqt_barrier_display( barrier_xp ); |
---|
820 | #endif |
---|
821 | |
---|
822 | return barrier; |
---|
823 | |
---|
824 | } // end dqt_barrier_create() |
---|
825 | |
---|
826 | /////////////////////////////////////////////// |
---|
827 | void dqt_barrier_destroy( xptr_t barrier_xp ) |
---|
828 | { |
---|
829 | page_t * rpc_page; |
---|
830 | xptr_t rpc_page_xp; |
---|
831 | rpc_desc_t * rpc; // local pointer on RPC descriptors array |
---|
832 | xptr_t rpc_xp; // extended pointer on RPC descriptor array |
---|
833 | reg_t save_sr; // for critical section |
---|
834 | kmem_req_t req; // kmem request |
---|
835 | |
---|
836 | thread_t * this = CURRENT_THREAD; |
---|
837 | |
---|
838 | // get DQT barrier descriptor cluster and local pointer |
---|
839 | dqt_barrier_t * barrier_ptr = GET_PTR( barrier_xp ); |
---|
840 | cxy_t barrier_cxy = GET_CXY( barrier_xp ); |
---|
841 | |
---|
842 | #if DEBUG_BARRIER_DESTROY |
---|
843 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
844 | if( cycle > DEBUG_BARRIER_DESTROY ) |
---|
845 | printk("\n[%s] thread[%x,%x] enter for barrier (%x,%x) / cycle %d\n", |
---|
846 | __FUNCTION__, this->process->pid, this->trdid, barrier_cxy, barrier_ptr, cycle ); |
---|
847 | #endif |
---|
848 | |
---|
849 | // get x_size and y_size global parameters |
---|
850 | uint32_t x_size = hal_remote_l32( XPTR( barrier_cxy , &barrier_ptr->x_size ) ); |
---|
851 | uint32_t y_size = hal_remote_l32( XPTR( barrier_cxy , &barrier_ptr->y_size ) ); |
---|
852 | |
---|
853 | // 1. allocate memory from local cluster for an array of 256 RPCs descriptors |
---|
854 | // cannot share the RPC descriptor, because the "buf" argument is not shared |
---|
855 | req.type = KMEM_PAGE; |
---|
856 | req.size = 3; // 8 pages == 32 Kbytes |
---|
857 | req.flags = AF_ZERO; |
---|
858 | rpc_page = kmem_alloc( &req ); |
---|
859 | rpc_page_xp = XPTR( local_cxy , rpc_page ); |
---|
860 | |
---|
861 | // get pointers on RPC descriptors array |
---|
862 | rpc_xp = ppm_page2base( rpc_page_xp ); |
---|
863 | rpc = GET_PTR( rpc_xp ); |
---|
864 | |
---|
865 | // 2. send parallel RPCs to all existing clusters covered by the DQT |
---|
866 | // to release memory allocated for the arrays of DQT nodes in each cluster |
---|
867 | |
---|
868 | uint32_t responses = 0; // initialize RPC responses counter |
---|
869 | |
---|
870 | // mask IRQs |
---|
871 | hal_disable_irq( &save_sr); |
---|
872 | |
---|
873 | // client thread blocks itself |
---|
874 | thread_block( XPTR( local_cxy , this ) , THREAD_BLOCKED_RPC ); |
---|
875 | |
---|
876 | uint32_t x , y; |
---|
877 | |
---|
878 | #if DEBUG_BARRIER_DESTROY |
---|
879 | if( cycle > DEBUG_BARRIER_DESTROY ) |
---|
880 | printk("\n[%s] thread[%x,%x] send RPCs to release the distributed dqt_node array\n", |
---|
881 | __FUNCTION__, this->process->pid, this->trdid ); |
---|
882 | #endif |
---|
883 | |
---|
884 | for ( x = 0 ; x < x_size ; x++ ) |
---|
885 | { |
---|
886 | for ( y = 0 ; y < y_size ; y++ ) |
---|
887 | { |
---|
888 | // send RPC to existing cluster only |
---|
889 | if( LOCAL_CLUSTER->cluster_info[x][y] ) |
---|
890 | { |
---|
891 | // compute target cluster identifier |
---|
892 | cxy_t cxy = HAL_CXY_FROM_XY( x , y ); |
---|
893 | |
---|
894 | // get local pointer on dqt_nodes array in target cluster |
---|
895 | xptr_t buf_xp_xp = XPTR( barrier_cxy , &barrier_ptr->node_xp[x][y][0] ); |
---|
896 | xptr_t buf_xp = hal_remote_l64( buf_xp_xp ); |
---|
897 | void * buf = GET_PTR( buf_xp ); |
---|
898 | |
---|
899 | assert( (cxy == GET_CXY(buf_xp)) , "bad extended pointer on dqt_nodes array\n" ); |
---|
900 | |
---|
901 | // build a specific RPC descriptor |
---|
902 | rpc[cxy].rsp = &responses; |
---|
903 | rpc[cxy].blocking = false; |
---|
904 | rpc[cxy].index = RPC_KCM_FREE; |
---|
905 | rpc[cxy].thread = this; |
---|
906 | rpc[cxy].lid = this->core->lid; |
---|
907 | rpc[cxy].args[0] = (uint64_t)(intptr_t)buf; |
---|
908 | rpc[cxy].args[1] = (uint64_t)KMEM_512_BYTES; |
---|
909 | |
---|
910 | // atomically increment expected responses counter |
---|
911 | hal_atomic_add( &responses , 1 ); |
---|
912 | |
---|
913 | #if DEBUG_BARRIER_DESTROY |
---|
914 | if( cycle > DEBUG_BARRIER_DESTROY ) |
---|
915 | printk(" - target cluster(%d,%d) / buffer %x\n", x, y, buf ); |
---|
916 | #endif |
---|
917 | // send a non-blocking RPC to release 512 bytes in target cluster |
---|
918 | rpc_send( cxy , &rpc[cxy] ); |
---|
919 | } |
---|
920 | } |
---|
921 | } |
---|
922 | |
---|
923 | // client thread deschedule |
---|
924 | sched_yield("blocked on parallel rpc_kcm_free"); |
---|
925 | |
---|
926 | // restore IRQs |
---|
927 | hal_restore_irq( save_sr); |
---|
928 | |
---|
929 | // 3. release memory locally allocated for the RPC descriptors array |
---|
930 | req.type = KMEM_PAGE; |
---|
931 | req.ptr = rpc_page; |
---|
932 | kmem_free( &req ); |
---|
933 | |
---|
934 | // 4. release memory allocated for barrier descriptor |
---|
935 | xptr_t page_xp = ppm_base2page( barrier_xp ); |
---|
936 | cxy_t page_cxy = GET_CXY( page_xp ); |
---|
937 | page_t * page_ptr = GET_PTR( page_xp ); |
---|
938 | |
---|
939 | ppm_remote_free_pages( page_cxy , page_ptr ); |
---|
940 | |
---|
941 | #if DEBUG_BARRIER_DESTROY |
---|
942 | cycle = (uint32_t)hal_get_cycles(); |
---|
943 | if( cycle > DEBUG_BARRIER_DESTROY ) |
---|
944 | printk("\n[%s] thread[%x,%x] exit for barrier (%x,%x) / cycle %d\n", |
---|
945 | __FUNCTION__, this->process->pid, this->trdid, barrier_cxy, barrier_ptr, cycle ); |
---|
946 | #endif |
---|
947 | |
---|
948 | } // end dqt_barrier_destroy() |
---|
949 | |
---|
950 | //////////////////////////////////////////// |
---|
951 | void dqt_barrier_wait( xptr_t barrier_xp ) |
---|
952 | { |
---|
953 | thread_t * this = CURRENT_THREAD; |
---|
954 | |
---|
955 | // check calling thread can yield |
---|
956 | thread_assert_can_yield( this , __FUNCTION__ ); |
---|
957 | |
---|
958 | // get cluster and local pointer on DQT barrier descriptor |
---|
959 | dqt_barrier_t * barrier_ptr = GET_PTR( barrier_xp ); |
---|
960 | cxy_t barrier_cxy = GET_CXY( barrier_xp ); |
---|
961 | |
---|
962 | #if DEBUG_BARRIER_WAIT |
---|
963 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
964 | if( cycle > DEBUG_BARRIER_WAIT ) |
---|
965 | printk("\n[%s] thread[%x,%x] enter / barrier (%x,%x) / cycle %d\n", |
---|
966 | __FUNCTION__, this->process->pid, this->trdid, barrier_cxy, barrier_ptr, cycle ); |
---|
967 | #endif |
---|
968 | |
---|
969 | // get extended pointer on local bottom DQT node |
---|
970 | uint32_t x = HAL_X_FROM_CXY( local_cxy ); |
---|
971 | uint32_t y = HAL_Y_FROM_CXY( local_cxy ); |
---|
972 | xptr_t node_xp = hal_remote_l64( XPTR( barrier_cxy , &barrier_ptr->node_xp[x][y][0] ) ); |
---|
973 | |
---|
974 | // call recursive function to traverse DQT from bottom to root |
---|
975 | dqt_barrier_increment( node_xp ); |
---|
976 | |
---|
977 | #if DEBUG_BARRIER_WAIT |
---|
978 | cycle = (uint32_t)hal_get_cycles(); |
---|
979 | if( cycle > DEBUG_BARRIER_WAIT ) |
---|
980 | printk("\n[%s] thread[%x,%x] exit / barrier (%x,%x) / cycle %d\n", |
---|
981 | __FUNCTION__, this->trdid, this->process->pid, barrier_cxy, barrier_ptr, cycle ); |
---|
982 | #endif |
---|
983 | |
---|
984 | } // end dqt_barrier_wait() |
---|
985 | |
---|
986 | ////////////////////////////////////////////// |
---|
987 | void dqt_barrier_display( xptr_t barrier_xp ) |
---|
988 | { |
---|
989 | // get cluster and local pointer on DQT barrier |
---|
990 | dqt_barrier_t * barrier_ptr = GET_PTR( barrier_xp ); |
---|
991 | cxy_t barrier_cxy = GET_CXY( barrier_xp ); |
---|
992 | |
---|
993 | // get barrier global parameters |
---|
994 | uint32_t x_size = hal_remote_l32( XPTR( barrier_cxy , &barrier_ptr->x_size ) ); |
---|
995 | uint32_t y_size = hal_remote_l32( XPTR( barrier_cxy , &barrier_ptr->y_size ) ); |
---|
996 | uint32_t nthreads = hal_remote_l32( XPTR( barrier_cxy , &barrier_ptr->nthreads ) ); |
---|
997 | |
---|
998 | // compute size and number of DQT levels |
---|
999 | uint32_t z = (x_size > y_size) ? x_size : y_size; |
---|
1000 | uint32_t levels = (z < 2) ? 1 : (z < 3) ? 2 : (z < 5) ? 3 : (z < 9) ? 4 : 5; |
---|
1001 | |
---|
1002 | printk("\n***** DQT barrier : x_size %d / y_size %d / nthreads %d / levels %d *****\n", |
---|
1003 | x_size, y_size, nthreads, levels ); |
---|
1004 | |
---|
1005 | uint32_t x , y , l; |
---|
1006 | |
---|
1007 | for ( x = 0 ; x < x_size ; x++ ) |
---|
1008 | { |
---|
1009 | for ( y = 0 ; y < y_size ; y++ ) |
---|
1010 | { |
---|
1011 | printk(" - cluster[%d,%d]\n", x , y ); |
---|
1012 | |
---|
1013 | for ( l = 0 ; l < levels ; l++ ) |
---|
1014 | { |
---|
1015 | // get pointers on target node |
---|
1016 | xptr_t node_xp = hal_remote_l64( XPTR( barrier_cxy , |
---|
1017 | &barrier_ptr->node_xp[x][y][l] ) ); |
---|
1018 | dqt_node_t * node_ptr = GET_PTR( node_xp ); |
---|
1019 | cxy_t node_cxy = GET_CXY( node_xp ); |
---|
1020 | |
---|
1021 | if( node_xp != XPTR_NULL ) |
---|
1022 | { |
---|
1023 | uint32_t level = hal_remote_l32( XPTR( node_cxy , &node_ptr->level )); |
---|
1024 | uint32_t arity = hal_remote_l32( XPTR( node_cxy , &node_ptr->arity )); |
---|
1025 | uint32_t count = hal_remote_l32( XPTR( node_cxy , &node_ptr->current )); |
---|
1026 | xptr_t pa_xp = hal_remote_l32( XPTR( node_cxy , &node_ptr->parent_xp )); |
---|
1027 | xptr_t c0_xp = hal_remote_l32( XPTR( node_cxy , &node_ptr->child_xp[0] )); |
---|
1028 | xptr_t c1_xp = hal_remote_l32( XPTR( node_cxy , &node_ptr->child_xp[1] )); |
---|
1029 | xptr_t c2_xp = hal_remote_l32( XPTR( node_cxy , &node_ptr->child_xp[2] )); |
---|
1030 | xptr_t c3_xp = hal_remote_l32( XPTR( node_cxy , &node_ptr->child_xp[3] )); |
---|
1031 | |
---|
1032 | printk(" . level %d : (%x,%x) / %d on %d / P(%x,%x) / C0(%x,%x)" |
---|
1033 | " C1(%x,%x) / C2(%x,%x) / C3(%x,%x)\n", |
---|
1034 | level, node_cxy, node_ptr, count, arity, |
---|
1035 | GET_CXY(pa_xp), GET_PTR(pa_xp), |
---|
1036 | GET_CXY(c0_xp), GET_PTR(c0_xp), |
---|
1037 | GET_CXY(c1_xp), GET_PTR(c1_xp), |
---|
1038 | GET_CXY(c2_xp), GET_PTR(c2_xp), |
---|
1039 | GET_CXY(c3_xp), GET_PTR(c3_xp) ); |
---|
1040 | } |
---|
1041 | } |
---|
1042 | } |
---|
1043 | } |
---|
1044 | } // end dqt_barrier_display() |
---|
1045 | |
---|
1046 | |
---|
1047 | ////////////////////////////////////////////////////////////////////////////////////////// |
---|
1048 | // This static (recursive) function is called by the dqt_barrier_wait() function. |
---|
1049 | // It traverses the DQT from bottom to root, and decrements the "current" variables. |
---|
1050 | // For each traversed node, it blocks and deschedules if it is not the last expected |
---|
1051 | // thread. The last arrived thread reset the local node before returning. |
---|
1052 | ////////////////////////////////////////////////////////////////////////////////////////// |
---|
1053 | static void dqt_barrier_increment( xptr_t node_xp ) |
---|
1054 | { |
---|
1055 | uint32_t expected; |
---|
1056 | uint32_t sense; |
---|
1057 | uint32_t arity; |
---|
1058 | |
---|
1059 | thread_t * this = CURRENT_THREAD; |
---|
1060 | |
---|
1061 | // get node cluster and local pointer |
---|
1062 | dqt_node_t * node_ptr = GET_PTR( node_xp ); |
---|
1063 | cxy_t node_cxy = GET_CXY( node_xp ); |
---|
1064 | |
---|
1065 | // build relevant extended pointers |
---|
1066 | xptr_t arity_xp = XPTR( node_cxy , &node_ptr->arity ); |
---|
1067 | xptr_t sense_xp = XPTR( node_cxy , &node_ptr->sense ); |
---|
1068 | xptr_t current_xp = XPTR( node_cxy , &node_ptr->current ); |
---|
1069 | xptr_t lock_xp = XPTR( node_cxy , &node_ptr->lock ); |
---|
1070 | xptr_t root_xp = XPTR( node_cxy , &node_ptr->root ); |
---|
1071 | |
---|
1072 | #if DEBUG_BARRIER_WAIT |
---|
1073 | uint32_t cycle = (uint32_t)hal_get_cycles(); |
---|
1074 | uint32_t level = hal_remote_l32( XPTR( node_cxy, &node_ptr->level ) ); |
---|
1075 | if( cycle > DEBUG_BARRIER_WAIT ) |
---|
1076 | printk("\n[%s] thread[%x,%x] increments DQT node(%d,%d,%d) / cycle %d\n", |
---|
1077 | __FUNCTION__ , this->process->pid, this->trdid, |
---|
1078 | HAL_X_FROM_CXY(node_cxy), HAL_Y_FROM_CXY(node_cxy), level ); |
---|
1079 | #endif |
---|
1080 | |
---|
1081 | // get extended pointer on parent node |
---|
1082 | xptr_t parent_xp = hal_remote_l64( XPTR( node_cxy , &node_ptr->parent_xp ) ); |
---|
1083 | |
---|
1084 | // take busylock |
---|
1085 | remote_busylock_acquire( lock_xp ); |
---|
1086 | |
---|
1087 | // get sense and arity values from barrier descriptor |
---|
1088 | sense = hal_remote_l32( sense_xp ); |
---|
1089 | arity = hal_remote_l32( arity_xp ); |
---|
1090 | |
---|
1091 | // compute expected value |
---|
1092 | expected = (sense == 0) ? 1 : 0; |
---|
1093 | |
---|
1094 | // increment current number of arrived threads / get value before increment |
---|
1095 | uint32_t current = hal_remote_atomic_add( current_xp , 1 ); |
---|
1096 | |
---|
1097 | // last arrived thread reset the local node, makes the recursive call |
---|
1098 | // on parent node, and reactivates all waiting thread when returning. |
---|
1099 | // other threads block, register in queue, and deschedule. |
---|
1100 | |
---|
1101 | if ( current == (arity - 1) ) // last thread |
---|
1102 | { |
---|
1103 | |
---|
1104 | #if DEBUG_BARRIER_WAIT |
---|
1105 | if( cycle > DEBUG_BARRIER_WAIT ) |
---|
1106 | printk("\n[%s] thread[%x,%x] reset DQT node(%d,%d,%d)\n", |
---|
1107 | __FUNCTION__ , this->process->pid, this->trdid, |
---|
1108 | HAL_X_FROM_CXY(node_cxy), HAL_Y_FROM_CXY(node_cxy), level ); |
---|
1109 | #endif |
---|
1110 | // reset the current node |
---|
1111 | hal_remote_s32( sense_xp , expected ); |
---|
1112 | hal_remote_s32( current_xp , 0 ); |
---|
1113 | |
---|
1114 | // release busylock protecting the current node |
---|
1115 | remote_busylock_release( lock_xp ); |
---|
1116 | |
---|
1117 | // recursive call on parent node when current node is not the root |
---|
1118 | if( parent_xp != XPTR_NULL) dqt_barrier_increment( parent_xp ); |
---|
1119 | |
---|
1120 | // unblock all waiting threads on this node |
---|
1121 | while( xlist_is_empty( root_xp ) == false ) |
---|
1122 | { |
---|
1123 | // get pointers on first waiting thread |
---|
1124 | xptr_t thread_xp = XLIST_FIRST( root_xp , thread_t , wait_list ); |
---|
1125 | cxy_t thread_cxy = GET_CXY( thread_xp ); |
---|
1126 | thread_t * thread_ptr = GET_PTR( thread_xp ); |
---|
1127 | |
---|
1128 | #if (DEBUG_BARRIER_WAIT & 1) |
---|
1129 | trdid_t trdid = hal_remote_l32( XPTR( thread_cxy , &thread_ptr->trdid ) ); |
---|
1130 | process_t * process = hal_remote_lpt( XPTR( thread_cxy , &thread_ptr->process ) ); |
---|
1131 | pid_t pid = hal_remote_l32( XPTR( thread_cxy , &process->pid ) ); |
---|
1132 | if( cycle > DEBUG_BARRIER_WAIT ) |
---|
1133 | printk("\n[%s] thread[%x,%x] unblock thread[%x,%x]\n", |
---|
1134 | __FUNCTION__, this->process->pid, this->trdid, pid, trdid ); |
---|
1135 | #endif |
---|
1136 | // remove waiting thread from queue |
---|
1137 | xlist_unlink( XPTR( thread_cxy , &thread_ptr->wait_list ) ); |
---|
1138 | |
---|
1139 | // unblock waiting thread |
---|
1140 | thread_unblock( thread_xp , THREAD_BLOCKED_USERSYNC ); |
---|
1141 | } |
---|
1142 | } |
---|
1143 | else // not the last thread |
---|
1144 | { |
---|
1145 | // get extended pointer on xlist entry from thread |
---|
1146 | xptr_t entry_xp = XPTR( local_cxy , &this->wait_list ); |
---|
1147 | |
---|
1148 | // register calling thread in barrier waiting queue |
---|
1149 | xlist_add_last( root_xp , entry_xp ); |
---|
1150 | |
---|
1151 | // block calling thread |
---|
1152 | thread_block( XPTR( local_cxy , this ) , THREAD_BLOCKED_USERSYNC ); |
---|
1153 | |
---|
1154 | // release busylock protecting the remote_barrier |
---|
1155 | remote_busylock_release( lock_xp ); |
---|
1156 | |
---|
1157 | #if DEBUG_BARRIER_WAIT |
---|
1158 | if( cycle > DEBUG_BARRIER_WAIT ) |
---|
1159 | printk("\n[%s] thread[%x,%x] blocks on node(%d,%d,%d)\n", |
---|
1160 | __FUNCTION__ , this->process->pid, this->trdid, |
---|
1161 | HAL_X_FROM_CXY(node_cxy), HAL_Y_FROM_CXY(node_cxy), level ); |
---|
1162 | #endif |
---|
1163 | // deschedule |
---|
1164 | sched_yield("blocked on barrier"); |
---|
1165 | } |
---|
1166 | |
---|
1167 | return; |
---|
1168 | |
---|
1169 | } // end dqt_barrier_decrement() |
---|
1170 | |
---|
1171 | |
---|