1 | /*************************************************************************/ |
---|
2 | /* */ |
---|
3 | /* Copyright (c) 1994 Stanford University */ |
---|
4 | /* */ |
---|
5 | /* All rights reserved. */ |
---|
6 | /* */ |
---|
7 | /* Permission is given to use, copy, and modify this software for any */ |
---|
8 | /* non-commercial purpose as long as this copyright notice is not */ |
---|
9 | /* removed. All other uses, including redistribution in whole or in */ |
---|
10 | /* part, are forbidden without prior written permission. */ |
---|
11 | /* */ |
---|
12 | /* This software is provided with absolutely no warranty and no */ |
---|
13 | /* support. */ |
---|
14 | /* */ |
---|
15 | /*************************************************************************/ |
---|
16 | |
---|
17 | /*************************************************************************/ |
---|
18 | /* */ |
---|
19 | /* SPLASH Ocean Code */ |
---|
20 | /* */ |
---|
21 | /* This application studies the role of eddy and boundary currents in */ |
---|
22 | /* influencing large-scale ocean movements. This implementation uses */ |
---|
23 | /* dynamically allocated four-dimensional arrays for grid data storage. */ |
---|
24 | /* */ |
---|
25 | /* Main parameters are : */ |
---|
26 | /* */ |
---|
27 | /* - M : Simulate MxM ocean. M must be (power of 2) +2. */ |
---|
28 | /* - N : N = number of threads. N must be power of 4. */ |
---|
29 | /* - E : E = error tolerance for iterative relaxation. */ |
---|
30 | /* - R : R = distance between grid points in meters. */ |
---|
31 | /* - T : T = timestep in seconds. */ |
---|
32 | /* */ |
---|
33 | /*************************************************************************/ |
---|
34 | |
---|
35 | /////////////////////////////////////////////////////////////////////////// |
---|
36 | // This is the porting of the SLASH Ocean application on the GIET-VM |
---|
37 | // operating system, for the TSAR manycores architecture. |
---|
38 | // Done by Alain greiner (march 2016). |
---|
39 | /////////////////////////////////////////////////////////////////////////// |
---|
40 | |
---|
41 | #define DEFAULT_M 258 |
---|
42 | #define DEFAULT_E 1e-7 |
---|
43 | #define DEFAULT_T 28800.0 |
---|
44 | #define DEFAULT_R 20000.0 |
---|
45 | #define UP 0 |
---|
46 | #define DOWN 1 |
---|
47 | #define LEFT 2 |
---|
48 | #define RIGHT 3 |
---|
49 | #define UPLEFT 4 |
---|
50 | #define UPRIGHT 5 |
---|
51 | #define DOWNLEFT 6 |
---|
52 | #define DOWNRIGHT 7 |
---|
53 | #define PAGE_SIZE 4096 |
---|
54 | #define MAX_THREADS 1024 |
---|
55 | |
---|
56 | #include "decs.h" |
---|
57 | |
---|
58 | #include <user_lock.h> |
---|
59 | #include <user_barrier.h> |
---|
60 | #include <stdio.h> |
---|
61 | #include <stdlib.h> |
---|
62 | #include <malloc.h> |
---|
63 | |
---|
64 | // GIET specific global variables |
---|
65 | |
---|
66 | pthread_t thread_kernel[MAX_THREADS]; // identifier defined by the kernel |
---|
67 | long thread_user[MAX_THREADS]; // user index = x*yprocs + y |
---|
68 | |
---|
69 | user_lock_t tty_lock; |
---|
70 | |
---|
71 | giet_sqt_barrier_t barrier; |
---|
72 | |
---|
73 | sqt_lock_t id_lock; |
---|
74 | sqt_lock_t psiai_lock; |
---|
75 | sqt_lock_t psibi_lock; |
---|
76 | sqt_lock_t done_lock; |
---|
77 | sqt_lock_t error_lock; |
---|
78 | sqt_lock_t bar_lock; |
---|
79 | |
---|
80 | // OCEAN global variables |
---|
81 | |
---|
82 | struct multi_struct* multi; |
---|
83 | struct global_struct* global; |
---|
84 | struct Global_Private** gps; // array of pointers[nprocs] |
---|
85 | |
---|
86 | double **** psi; |
---|
87 | double **** psim; |
---|
88 | double *** psium; |
---|
89 | double *** psilm; |
---|
90 | double *** psib; |
---|
91 | double *** ga; |
---|
92 | double *** gb; |
---|
93 | double **** work1; |
---|
94 | double *** work2; |
---|
95 | double *** work3; |
---|
96 | double **** work4; |
---|
97 | double **** work5; |
---|
98 | double *** work6; |
---|
99 | double **** work7; |
---|
100 | double **** temparray; |
---|
101 | double *** tauz; |
---|
102 | double *** oldga; |
---|
103 | double *** oldgb; |
---|
104 | |
---|
105 | double * f; |
---|
106 | double **** q_multi; |
---|
107 | double **** rhs_multi; |
---|
108 | |
---|
109 | long xprocs; // number of blocks in a row (one block per proc) |
---|
110 | long yprocs; // number of blocks in a col (one block per proc) |
---|
111 | long nprocs; // total number of blocks (number of procs) |
---|
112 | |
---|
113 | const double h1 = 1000.0; |
---|
114 | const double h3 = 4000.0; |
---|
115 | const double h = 5000.0; |
---|
116 | const double lf = -5.12e11; |
---|
117 | double res = DEFAULT_R; |
---|
118 | double dtau = DEFAULT_T; |
---|
119 | const double f0 = 8.3e-5; |
---|
120 | const double beta = 2.0e-11; |
---|
121 | const double gpsr = 0.02; |
---|
122 | |
---|
123 | long im = DEFAULT_M; // number of grid points in a row |
---|
124 | long jm = DEFAULT_M; // number of grid points in a column |
---|
125 | |
---|
126 | double tolerance = DEFAULT_E; |
---|
127 | double eig2; |
---|
128 | double ysca; |
---|
129 | long jmm1; |
---|
130 | const double pi = 3.141592653589793; |
---|
131 | const double dt0 = 0.5e-4; |
---|
132 | const double outday0 = 1.0; |
---|
133 | const double outday1 = 2.0; |
---|
134 | const double outday2 = 2.0; |
---|
135 | const double outday3 = 2.0; |
---|
136 | double factjacob; |
---|
137 | double factlap; |
---|
138 | long numlev; |
---|
139 | long * imx; // array[numlev] |
---|
140 | long * jmx; // array[numlev] |
---|
141 | double * lev_res; // array[numlev] |
---|
142 | double * lev_tol; // array[numlev] |
---|
143 | const double maxwork = 10000.0; |
---|
144 | |
---|
145 | double * i_int_coeff; |
---|
146 | double * j_int_coeff; |
---|
147 | long * xpts_per_proc; |
---|
148 | long * ypts_per_proc; |
---|
149 | long minlevel; |
---|
150 | |
---|
151 | /////////////////////////////////////////// |
---|
152 | __attribute__ ((constructor)) int main() |
---|
153 | /////////////////////////////////////////// |
---|
154 | { |
---|
155 | long x; // index to scan xprocs |
---|
156 | long y; // index to scan yprocs |
---|
157 | long i; // index to scan numlev |
---|
158 | long j; // index to scan phases |
---|
159 | long x_part; |
---|
160 | long y_part; |
---|
161 | long d_size; |
---|
162 | long itemp; |
---|
163 | long jtemp; |
---|
164 | long start_time; |
---|
165 | long init_time; |
---|
166 | |
---|
167 | start_time = giet_proctime(); |
---|
168 | |
---|
169 | // compute number of threads : nprocs |
---|
170 | // as we want one thread per processor, it depends on the |
---|
171 | // hardware architecture x_size / y_size / procs per cluster |
---|
172 | unsigned int mesh_x_size; |
---|
173 | unsigned int mesh_y_size; |
---|
174 | unsigned int procs_per_cluster; |
---|
175 | |
---|
176 | giet_procs_number(&mesh_x_size, &mesh_y_size, &procs_per_cluster); |
---|
177 | nprocs = mesh_x_size * mesh_y_size * procs_per_cluster; |
---|
178 | |
---|
179 | giet_pthread_assert( (procs_per_cluster == 1) || (procs_per_cluster == 4), |
---|
180 | "[OCEAN ERROR] number of procs per cluster must be 1 or 4"); |
---|
181 | |
---|
182 | giet_pthread_assert( (mesh_x_size == 1) || (mesh_x_size == 2) || (mesh_x_size == 4) || |
---|
183 | (mesh_x_size == 8) || (mesh_y_size == 16), |
---|
184 | "[OCEAN ERROR] mesh_x_size must be 1,2,4,8,16"); |
---|
185 | |
---|
186 | giet_pthread_assert( (mesh_y_size == 1) || (mesh_y_size == 2) || (mesh_y_size == 4) || |
---|
187 | (mesh_y_size == 8) || (mesh_y_size == 16), |
---|
188 | "[OCEAN ERROR] mesh_y_size must be 1,2,4,8,16"); |
---|
189 | |
---|
190 | giet_pthread_assert( (mesh_y_size == mesh_y_size ), |
---|
191 | "[OCEAN ERROR] mesh_y_size and mesh_y_size must be equal"); |
---|
192 | |
---|
193 | // check the ocean size : M*M grid / (M-2) mut be power of 2 |
---|
194 | |
---|
195 | giet_pthread_assert( (im == 34) || (im == 66) || (im == 130) || |
---|
196 | (im == 258) || (im == 514) || (im == 1026), |
---|
197 | "[OCEAN ERROR] grid side must be 34,66,130,258,514,1026"); |
---|
198 | |
---|
199 | // initialise distributed heap |
---|
200 | for ( x = 0 ; x < mesh_x_size ; x++ ) |
---|
201 | { |
---|
202 | for ( y = 0 ; y < mesh_y_size ; y++ ) |
---|
203 | { |
---|
204 | heap_init( x , y ); |
---|
205 | } |
---|
206 | } |
---|
207 | |
---|
208 | // allocate shared TTY & initialise tty_lock |
---|
209 | giet_tty_alloc( 1 ); |
---|
210 | lock_init( &tty_lock); |
---|
211 | |
---|
212 | giet_tty_printf("\n[OCEAN] simulation with W-cycle multigrid solver\n" |
---|
213 | " Processors : %d x %d x %d\n" |
---|
214 | " Grid size : %d x %d\n", |
---|
215 | mesh_x_size , mesh_y_size , procs_per_cluster, im , jm ); |
---|
216 | |
---|
217 | // initialise distributed barrier |
---|
218 | sqt_barrier_init( &barrier , mesh_x_size , mesh_y_size , procs_per_cluster ); |
---|
219 | |
---|
220 | // initialize distributed locks |
---|
221 | sqt_lock_init( &id_lock , mesh_x_size , mesh_y_size , procs_per_cluster ); |
---|
222 | sqt_lock_init( &psiai_lock , mesh_x_size , mesh_y_size , procs_per_cluster ); |
---|
223 | sqt_lock_init( &psibi_lock , mesh_x_size , mesh_y_size , procs_per_cluster ); |
---|
224 | sqt_lock_init( &done_lock , mesh_x_size , mesh_y_size , procs_per_cluster ); |
---|
225 | sqt_lock_init( &error_lock , mesh_x_size , mesh_y_size , procs_per_cluster ); |
---|
226 | sqt_lock_init( &bar_lock , mesh_x_size , mesh_y_size , procs_per_cluster ); |
---|
227 | |
---|
228 | // allocate thread_kernel[] array : thread identidiers defined by the kernel |
---|
229 | pthread_t* thread_kernel = malloc( nprocs * sizeof(pthread_t) ); |
---|
230 | |
---|
231 | // allocate thread_user[] array : continuous thread index defined by the user |
---|
232 | long* thread_user = malloc( nprocs * sizeof(unsigned int) ); |
---|
233 | |
---|
234 | // compute number of blocks per row and per column: nprocs = xprocs * yprocs |
---|
235 | if (procs_per_cluster == 1) |
---|
236 | { |
---|
237 | xprocs = mesh_x_size; |
---|
238 | yprocs = mesh_y_size; |
---|
239 | } |
---|
240 | else |
---|
241 | { |
---|
242 | xprocs = mesh_x_size*2; |
---|
243 | yprocs = mesh_y_size*2; |
---|
244 | } |
---|
245 | |
---|
246 | // compute numlev |
---|
247 | minlevel = 0; |
---|
248 | itemp = 1; |
---|
249 | jtemp = 1; |
---|
250 | numlev = 0; |
---|
251 | minlevel = 0; |
---|
252 | while (itemp < (im - 2)) |
---|
253 | { |
---|
254 | itemp = itemp * 2; |
---|
255 | jtemp = jtemp * 2; |
---|
256 | if ((itemp / yprocs > 1) && (jtemp / xprocs > 1)) |
---|
257 | { |
---|
258 | numlev++; |
---|
259 | } |
---|
260 | } |
---|
261 | |
---|
262 | giet_pthread_assert( (numlev > 0), |
---|
263 | "[OCEAN ERROR] at least 2*2 grid points per processor"); |
---|
264 | |
---|
265 | // allocate in cluster(0,0) arrays indexed by numlev |
---|
266 | imx = (long *) malloc(numlev * sizeof(long)); |
---|
267 | jmx = (long *) malloc(numlev * sizeof(long)); |
---|
268 | lev_res = (double *)malloc(numlev * sizeof(double)); |
---|
269 | lev_tol = (double *)malloc(numlev * sizeof(double)); |
---|
270 | i_int_coeff = (double *)malloc(numlev * sizeof(double)); |
---|
271 | j_int_coeff = (double *)malloc(numlev * sizeof(double)); |
---|
272 | xpts_per_proc = (long *) malloc(numlev * sizeof(long)); |
---|
273 | ypts_per_proc = (long *) malloc(numlev * sizeof(long)); |
---|
274 | |
---|
275 | // initialize these arrays |
---|
276 | imx[numlev - 1] = im; |
---|
277 | jmx[numlev - 1] = jm; |
---|
278 | lev_res[numlev - 1] = res; |
---|
279 | lev_tol[numlev - 1] = tolerance; |
---|
280 | |
---|
281 | for (i = numlev - 2; i >= 0; i--) |
---|
282 | { |
---|
283 | imx[i] = ((imx[i + 1] - 2) / 2) + 2; |
---|
284 | jmx[i] = ((jmx[i + 1] - 2) / 2) + 2; |
---|
285 | lev_res[i] = lev_res[i + 1] * 2; |
---|
286 | } |
---|
287 | |
---|
288 | for (i = 0; i < numlev; i++) |
---|
289 | { |
---|
290 | xpts_per_proc[i] = (jmx[i] - 2) / xprocs; |
---|
291 | ypts_per_proc[i] = (imx[i] - 2) / yprocs; |
---|
292 | } |
---|
293 | for (i = numlev - 1; i >= 0; i--) |
---|
294 | { |
---|
295 | if ((xpts_per_proc[i] < 2) || (ypts_per_proc[i] < 2)) |
---|
296 | { |
---|
297 | minlevel = i + 1; |
---|
298 | break; |
---|
299 | } |
---|
300 | } |
---|
301 | |
---|
302 | // allocate in cluster(0,0) arrays of pointers **** |
---|
303 | d_size = nprocs * sizeof(double ***); |
---|
304 | psi = (double ****)malloc(d_size); |
---|
305 | psim = (double ****)malloc(d_size); |
---|
306 | work1 = (double ****)malloc(d_size); |
---|
307 | work4 = (double ****)malloc(d_size); |
---|
308 | work5 = (double ****)malloc(d_size); |
---|
309 | work7 = (double ****)malloc(d_size); |
---|
310 | temparray = (double ****)malloc(d_size); |
---|
311 | |
---|
312 | // allocate in each cluster(x,y) arrays of pointers **** |
---|
313 | d_size = 2 * sizeof(double **); |
---|
314 | for (x = 0; x < xprocs; x++) |
---|
315 | { |
---|
316 | for (y = 0; y < yprocs; y++) |
---|
317 | { |
---|
318 | long procid = y * xprocs + x; |
---|
319 | long cx = (procs_per_cluster == 1) ? x : x>>1; |
---|
320 | long cy = (procs_per_cluster == 1) ? y : y>>1; |
---|
321 | |
---|
322 | psi[procid] = (double ***)remote_malloc(d_size , cx , cy); |
---|
323 | psim[procid] = (double ***)remote_malloc(d_size , cx , cy); |
---|
324 | work1[procid] = (double ***)remote_malloc(d_size , cx , cy); |
---|
325 | work4[procid] = (double ***)remote_malloc(d_size , cx , cy); |
---|
326 | work5[procid] = (double ***)remote_malloc(d_size , cx , cy); |
---|
327 | work7[procid] = (double ***)remote_malloc(d_size , cx , cy); |
---|
328 | temparray[procid] = (double ***)remote_malloc(d_size , cx , cy); |
---|
329 | } |
---|
330 | } |
---|
331 | |
---|
332 | // allocate in cluster(0,0) arrays of pointers *** |
---|
333 | d_size = nprocs * sizeof(double **); |
---|
334 | psium = (double ***)malloc(d_size); |
---|
335 | psilm = (double ***)malloc(d_size); |
---|
336 | psib = (double ***)malloc(d_size); |
---|
337 | ga = (double ***)malloc(d_size); |
---|
338 | gb = (double ***)malloc(d_size); |
---|
339 | work2 = (double ***)malloc(d_size); |
---|
340 | work3 = (double ***)malloc(d_size); |
---|
341 | work6 = (double ***)malloc(d_size); |
---|
342 | tauz = (double ***)malloc(d_size); |
---|
343 | oldga = (double ***)malloc(d_size); |
---|
344 | oldgb = (double ***)malloc(d_size); |
---|
345 | |
---|
346 | // allocate in cluster(0,0) array of pointers gps[nprocs] |
---|
347 | gps = (struct Global_Private**)malloc((nprocs+1)*sizeof(struct Global_Private*)); |
---|
348 | |
---|
349 | // allocate in each cluster(x,y) the gps[procid] structures |
---|
350 | for (x = 0; x < xprocs; x++) |
---|
351 | { |
---|
352 | for (y = 0; y < yprocs; y++) |
---|
353 | { |
---|
354 | long procid = y * xprocs + x; |
---|
355 | long cx = (procs_per_cluster == 1) ? x : x>>1; |
---|
356 | long cy = (procs_per_cluster == 1) ? y : y>>1; |
---|
357 | |
---|
358 | gps[procid] = (struct Global_Private*)remote_malloc( |
---|
359 | sizeof(struct Global_Private) , cx , cy); |
---|
360 | |
---|
361 | gps[procid]->rel_num_x = (long *)remote_malloc(numlev * sizeof(long) , cx , cy); |
---|
362 | gps[procid]->rel_num_y = (long *)remote_malloc(numlev * sizeof(long) , cx , cy); |
---|
363 | gps[procid]->eist = (long *)remote_malloc(numlev * sizeof(long) , cx , cy); |
---|
364 | gps[procid]->ejst = (long *)remote_malloc(numlev * sizeof(long) , cx , cy); |
---|
365 | gps[procid]->oist = (long *)remote_malloc(numlev * sizeof(long) , cx , cy); |
---|
366 | gps[procid]->ojst = (long *)remote_malloc(numlev * sizeof(long) , cx , cy); |
---|
367 | gps[procid]->rlist = (long *)remote_malloc(numlev * sizeof(long) , cx , cy); |
---|
368 | gps[procid]->rljst = (long *)remote_malloc(numlev * sizeof(long) , cx , cy); |
---|
369 | gps[procid]->rlien = (long *)remote_malloc(numlev * sizeof(long) , cx , cy); |
---|
370 | gps[procid]->rljen = (long *)remote_malloc(numlev * sizeof(long) , cx , cy); |
---|
371 | gps[procid]->multi_time = 0; |
---|
372 | gps[procid]->total_time = 0; |
---|
373 | gps[procid]->sync_time = 0; |
---|
374 | gps[procid]->steps_time[0] = 0; |
---|
375 | gps[procid]->steps_time[1] = 0; |
---|
376 | gps[procid]->steps_time[2] = 0; |
---|
377 | gps[procid]->steps_time[3] = 0; |
---|
378 | gps[procid]->steps_time[4] = 0; |
---|
379 | gps[procid]->steps_time[5] = 0; |
---|
380 | gps[procid]->steps_time[6] = 0; |
---|
381 | gps[procid]->steps_time[7] = 0; |
---|
382 | gps[procid]->steps_time[8] = 0; |
---|
383 | gps[procid]->steps_time[9] = 0; |
---|
384 | } |
---|
385 | } |
---|
386 | |
---|
387 | //////////// |
---|
388 | subblock(); |
---|
389 | |
---|
390 | x_part = (jm - 2) / xprocs + 2; // nunber of grid points in block row |
---|
391 | y_part = (im - 2) / yprocs + 2; // nunber of grid points in block column |
---|
392 | |
---|
393 | d_size = x_part * y_part * sizeof(double) + y_part * sizeof(double *); |
---|
394 | |
---|
395 | global = (struct global_struct *)malloc(sizeof(struct global_struct)); |
---|
396 | |
---|
397 | // allocate in each cluster(x,y) the arrays of pointers ** |
---|
398 | for (x = 0; x < xprocs; x++) |
---|
399 | { |
---|
400 | for (y = 0; y < yprocs; y++) |
---|
401 | { |
---|
402 | long procid = y * xprocs + x; |
---|
403 | long cx = (procs_per_cluster == 1) ? x : x>>1; |
---|
404 | long cy = (procs_per_cluster == 1) ? y : y>>1; |
---|
405 | |
---|
406 | psi[procid][0] = (double **)remote_malloc(d_size , cx , cy); |
---|
407 | psi[procid][1] = (double **)remote_malloc(d_size , cx , cy); |
---|
408 | psim[procid][0] = (double **)remote_malloc(d_size , cx , cy); |
---|
409 | psim[procid][1] = (double **)remote_malloc(d_size , cx , cy); |
---|
410 | psium[procid] = (double **)remote_malloc(d_size , cx , cy); |
---|
411 | psilm[procid] = (double **)remote_malloc(d_size , cx , cy); |
---|
412 | psib[procid] = (double **)remote_malloc(d_size , cx , cy); |
---|
413 | ga[procid] = (double **)remote_malloc(d_size , cx , cy); |
---|
414 | gb[procid] = (double **)remote_malloc(d_size , cx , cy); |
---|
415 | work1[procid][0] = (double **)remote_malloc(d_size , cx , cy); |
---|
416 | work1[procid][1] = (double **)remote_malloc(d_size , cx , cy); |
---|
417 | work2[procid] = (double **)remote_malloc(d_size , cx , cy); |
---|
418 | work3[procid] = (double **)remote_malloc(d_size , cx , cy); |
---|
419 | work4[procid][0] = (double **)remote_malloc(d_size , cx , cy); |
---|
420 | work4[procid][1] = (double **)remote_malloc(d_size , cx , cy); |
---|
421 | work5[procid][0] = (double **)remote_malloc(d_size , cx , cy); |
---|
422 | work5[procid][1] = (double **)remote_malloc(d_size , cx , cy); |
---|
423 | work6[procid] = (double **)remote_malloc(d_size , cx , cy); |
---|
424 | work7[procid][0] = (double **)remote_malloc(d_size , cx , cy); |
---|
425 | work7[procid][1] = (double **)remote_malloc(d_size , cx , cy); |
---|
426 | temparray[procid][0] = (double **)remote_malloc(d_size , cx , cy); |
---|
427 | temparray[procid][1] = (double **)remote_malloc(d_size , cx , cy); |
---|
428 | tauz[procid] = (double **)remote_malloc(d_size , cx , cy); |
---|
429 | oldga[procid] = (double **)remote_malloc(d_size , cx , cy); |
---|
430 | oldgb[procid] = (double **)remote_malloc(d_size , cx , cy); |
---|
431 | } |
---|
432 | } |
---|
433 | |
---|
434 | f = (double *)malloc(im*sizeof(double)); |
---|
435 | |
---|
436 | multi = (struct multi_struct *)malloc(sizeof(struct multi_struct)); |
---|
437 | |
---|
438 | // allocate memory for q_multi and rhs_multi |
---|
439 | d_size = numlev * sizeof(double **); |
---|
440 | if (numlev % 2 == 1) // add an extra pointer for double word alignment |
---|
441 | { |
---|
442 | d_size += sizeof(double **); |
---|
443 | } |
---|
444 | |
---|
445 | for (i = 0; i < numlev; i++) |
---|
446 | { |
---|
447 | d_size += ((imx[i] - 2) / yprocs + 2) * ((jmx[i] - 2) / xprocs + 2) * sizeof(double) + |
---|
448 | ((imx[i] - 2) / yprocs + 2) * sizeof(double *); |
---|
449 | } |
---|
450 | d_size *= nprocs; |
---|
451 | if (nprocs % 2 == 1) // add an extra pointer for double word alignment |
---|
452 | { |
---|
453 | d_size += sizeof(double ***); |
---|
454 | } |
---|
455 | |
---|
456 | d_size += nprocs * sizeof(double ***); |
---|
457 | q_multi = (double ****)malloc( d_size ); |
---|
458 | rhs_multi = (double ****)malloc( d_size ); |
---|
459 | |
---|
460 | ////////// |
---|
461 | link_all(); |
---|
462 | |
---|
463 | multi->err_multi = 0.0; |
---|
464 | i_int_coeff[0] = 0.0; |
---|
465 | j_int_coeff[0] = 0.0; |
---|
466 | |
---|
467 | for (i = 0; i < numlev; i++) |
---|
468 | { |
---|
469 | i_int_coeff[i] = 1.0 / (imx[i] - 1); |
---|
470 | j_int_coeff[i] = 1.0 / (jmx[i] - 1); |
---|
471 | } |
---|
472 | |
---|
473 | global->psibi = 0.0; |
---|
474 | |
---|
475 | factjacob = -1. / (12. * res * res); |
---|
476 | factlap = 1. / (res * res); |
---|
477 | eig2 = -h * f0 * f0 / (h1 * h3 * gpsr); |
---|
478 | |
---|
479 | jmm1 = jm - 1; |
---|
480 | ysca = ((double) jmm1) * res; |
---|
481 | |
---|
482 | im = (imx[numlev-1]-2)/yprocs + 2; |
---|
483 | jm = (jmx[numlev-1]-2)/xprocs + 2; |
---|
484 | |
---|
485 | init_time = giet_proctime() - start_time; |
---|
486 | |
---|
487 | printf("\n[OCEAN] initialisation completed / start parallel execution\n"); |
---|
488 | |
---|
489 | /////////////////////////////////////////////////// |
---|
490 | // launch (N-1) other threads to execute slave() |
---|
491 | /////////////////////////////////////////////////// |
---|
492 | |
---|
493 | for (i = 1 ; i < nprocs ; i++) |
---|
494 | { |
---|
495 | thread_user[i] = i; |
---|
496 | if (giet_pthread_create( &thread_kernel[i], |
---|
497 | NULL, |
---|
498 | &slave, |
---|
499 | &thread_user[i] )) |
---|
500 | { |
---|
501 | giet_pthread_exit("[OCEAN ERROR] in giet_pthread_create()\n"); |
---|
502 | } |
---|
503 | } |
---|
504 | |
---|
505 | // main itself execute slave() |
---|
506 | thread_user[0] = 0; |
---|
507 | slave( &thread_user[0] ); |
---|
508 | |
---|
509 | // wait other threads completion |
---|
510 | for ( i = 1 ; i < nprocs ; i++ ) |
---|
511 | { |
---|
512 | if ( giet_pthread_join( thread_kernel[i], NULL ) ) |
---|
513 | { |
---|
514 | giet_pthread_exit( "[OCEAN ERROR] in giet_pthread_join()\n" ); |
---|
515 | } |
---|
516 | } |
---|
517 | |
---|
518 | /////////////////////////////////////////////// |
---|
519 | // instrumentation (display & save on disk) |
---|
520 | /////////////////////////////////////////////// |
---|
521 | |
---|
522 | char string[256]; |
---|
523 | |
---|
524 | snprintf( string , 256 , "/home/ocean_%d_%d_%d" , |
---|
525 | mesh_x_size , mesh_y_size , procs_per_cluster ); |
---|
526 | |
---|
527 | // open instrumentation file |
---|
528 | unsigned int fd = giet_fat_open( string , O_CREAT ); |
---|
529 | if ( fd < 0 ) |
---|
530 | { |
---|
531 | printf("\n[OCEAN ERROR] cannot open instrumentation file %s\n", string ); |
---|
532 | giet_pthread_exit( NULL ); |
---|
533 | } |
---|
534 | |
---|
535 | snprintf( string , 256 , "\n--- OCEAN : (%dx%dx%d) procs on (%dx%d) grid ---\n", |
---|
536 | mesh_x_size, mesh_y_size, procs_per_cluster , |
---|
537 | DEFAULT_M , DEFAULT_M ); |
---|
538 | |
---|
539 | giet_tty_printf( "%s" , string ); |
---|
540 | giet_fat_fprintf( fd , "%s" , string ); |
---|
541 | |
---|
542 | // compute instrumentation results |
---|
543 | long min_total = gps[0]->total_time; |
---|
544 | long max_total = gps[0]->total_time; |
---|
545 | long min_multi = gps[0]->multi_time; |
---|
546 | long max_multi = gps[0]->multi_time; |
---|
547 | long min_sync = gps[0]->sync_time; |
---|
548 | long max_sync = gps[0]->sync_time; |
---|
549 | |
---|
550 | for (i = 1 ; i < nprocs ; i++) |
---|
551 | { |
---|
552 | if (gps[i]->total_time > max_total) max_total = (gps[i]->total_time); |
---|
553 | if (gps[i]->total_time < min_total) min_total = (gps[i]->total_time); |
---|
554 | if (gps[i]->multi_time > max_multi) max_multi = (gps[i]->multi_time); |
---|
555 | if (gps[i]->multi_time < min_multi) min_multi = (gps[i]->multi_time); |
---|
556 | if (gps[i]->sync_time > max_sync ) max_sync = (gps[i]->sync_time ); |
---|
557 | if (gps[i]->sync_time < min_sync ) min_sync = (gps[i]->sync_time ); |
---|
558 | } |
---|
559 | |
---|
560 | snprintf( string , 256 , "\n Init Time Total Time Multi Time Sync Time\n" |
---|
561 | "MIN : %d | %d | %d | %d (cycles)\n" |
---|
562 | "MAX : %d | %d | %d | %d (cycles)\n", |
---|
563 | (int)init_time, (int)min_total, (int)min_multi, (int)min_sync, |
---|
564 | (int)init_time, (int)max_total, (int)max_multi, (int)max_sync ); |
---|
565 | |
---|
566 | giet_tty_printf("%s" , string ); |
---|
567 | giet_fat_fprintf( fd , "%s" , string ); |
---|
568 | |
---|
569 | for (i = 0; i < 10; i++) |
---|
570 | { |
---|
571 | long phase_time = 0; |
---|
572 | for (j = 0; j < nprocs; j++) |
---|
573 | { |
---|
574 | phase_time += gps[j]->steps_time[i]; |
---|
575 | } |
---|
576 | snprintf( string , 256 , " - Phase %d : %d cycles\n", |
---|
577 | (int)i , (int)(phase_time/nprocs) ); |
---|
578 | giet_tty_printf("%s" , string ); |
---|
579 | giet_fat_fprintf( fd , "%s" , string ); |
---|
580 | } |
---|
581 | |
---|
582 | // close instrumentation file and exit |
---|
583 | giet_fat_close( fd ); |
---|
584 | |
---|
585 | giet_pthread_exit("main completed"); |
---|
586 | |
---|
587 | return 0; |
---|
588 | |
---|
589 | } // end main() |
---|
590 | |
---|
591 | |
---|
592 | // Local Variables: |
---|
593 | // tab-width: 4 |
---|
594 | // c-basic-offset: 4 |
---|
595 | // c-file-offsets:((innamespace . 0)(inline-open . 0)) |
---|
596 | // indent-tabs-mode: nil |
---|
597 | // End: |
---|
598 | |
---|
599 | // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=4:softtabstop=4 |
---|