[799] | 1 | /*************************************************************************/ |
---|
| 2 | /* */ |
---|
| 3 | /* Copyright (c) 1994 Stanford University */ |
---|
| 4 | /* */ |
---|
| 5 | /* All rights reserved. */ |
---|
| 6 | /* */ |
---|
| 7 | /* Permission is given to use, copy, and modify this software for any */ |
---|
| 8 | /* non-commercial purpose as long as this copyright notice is not */ |
---|
| 9 | /* removed. All other uses, including redistribution in whole or in */ |
---|
| 10 | /* part, are forbidden without prior written permission. */ |
---|
| 11 | /* */ |
---|
| 12 | /* This software is provided with absolutely no warranty and no */ |
---|
| 13 | /* support. */ |
---|
| 14 | /* */ |
---|
| 15 | /*************************************************************************/ |
---|
| 16 | |
---|
| 17 | /////////////////////////////////////////////////////////////////////////// |
---|
[813] | 18 | // This is the port of the SPLASH OCEAN application on the GIET-VM |
---|
| 19 | // operating system, for the TSAR manycores architecture. |
---|
| 20 | // Done by Alain greiner (march 2016). |
---|
| 21 | // |
---|
| 22 | // This application studies the role of eddy and boundary currents in |
---|
| 23 | // influencing large-scale ocean movements. This implementation uses |
---|
| 24 | // dynamically allocated four-dimensional arrays for grid data storage, |
---|
| 25 | // distributed in all clusters (one square sub-grid per thread). |
---|
| 26 | // The two main parameters are : |
---|
| 27 | // - M : MxM define the grid size. M must be (power of 2) +2. |
---|
| 28 | // - N : N = number of threads. N must be power of 4. |
---|
| 29 | // Other parameters are : |
---|
| 30 | // - E : E = error tolerance for iterative relaxation. |
---|
| 31 | // - R : R = distance between grid points in meters. |
---|
| 32 | // - T : T = timestep in seconds. |
---|
[799] | 33 | /////////////////////////////////////////////////////////////////////////// |
---|
| 34 | |
---|
[813] | 35 | // parameters |
---|
[799] | 36 | #define DEFAULT_M 258 |
---|
| 37 | #define DEFAULT_E 1e-7 |
---|
| 38 | #define DEFAULT_T 28800.0 |
---|
| 39 | #define DEFAULT_R 20000.0 |
---|
[813] | 40 | |
---|
| 41 | // constants |
---|
[799] | 42 | #define UP 0 |
---|
| 43 | #define DOWN 1 |
---|
| 44 | #define LEFT 2 |
---|
| 45 | #define RIGHT 3 |
---|
| 46 | #define UPLEFT 4 |
---|
| 47 | #define UPRIGHT 5 |
---|
| 48 | #define DOWNLEFT 6 |
---|
| 49 | #define DOWNRIGHT 7 |
---|
| 50 | #define MAX_THREADS 1024 |
---|
| 51 | |
---|
| 52 | #include "decs.h" |
---|
| 53 | |
---|
| 54 | #include <user_lock.h> |
---|
| 55 | #include <user_barrier.h> |
---|
| 56 | #include <stdio.h> |
---|
| 57 | #include <stdlib.h> |
---|
| 58 | #include <malloc.h> |
---|
| 59 | |
---|
| 60 | // GIET specific global variables |
---|
| 61 | |
---|
| 62 | pthread_t thread_kernel[MAX_THREADS]; // identifier defined by the kernel |
---|
| 63 | long thread_user[MAX_THREADS]; // user index = x*yprocs + y |
---|
| 64 | |
---|
| 65 | user_lock_t tty_lock; |
---|
| 66 | |
---|
| 67 | giet_sqt_barrier_t barrier; |
---|
| 68 | |
---|
| 69 | sqt_lock_t id_lock; |
---|
| 70 | sqt_lock_t psiai_lock; |
---|
| 71 | sqt_lock_t psibi_lock; |
---|
| 72 | sqt_lock_t done_lock; |
---|
| 73 | sqt_lock_t error_lock; |
---|
| 74 | sqt_lock_t bar_lock; |
---|
| 75 | |
---|
| 76 | // OCEAN global variables |
---|
| 77 | |
---|
| 78 | struct multi_struct* multi; |
---|
| 79 | struct global_struct* global; |
---|
| 80 | struct Global_Private** gps; // array of pointers[nprocs] |
---|
| 81 | |
---|
| 82 | double **** psi; |
---|
| 83 | double **** psim; |
---|
| 84 | double *** psium; |
---|
| 85 | double *** psilm; |
---|
| 86 | double *** psib; |
---|
| 87 | double *** ga; |
---|
| 88 | double *** gb; |
---|
| 89 | double **** work1; |
---|
| 90 | double *** work2; |
---|
| 91 | double *** work3; |
---|
| 92 | double **** work4; |
---|
| 93 | double **** work5; |
---|
| 94 | double *** work6; |
---|
| 95 | double **** work7; |
---|
| 96 | double **** temparray; |
---|
| 97 | double *** tauz; |
---|
| 98 | double *** oldga; |
---|
| 99 | double *** oldgb; |
---|
| 100 | |
---|
| 101 | double * f; |
---|
| 102 | double **** q_multi; |
---|
| 103 | double **** rhs_multi; |
---|
| 104 | |
---|
| 105 | long xprocs; // number of blocks in a row (one block per proc) |
---|
| 106 | long yprocs; // number of blocks in a col (one block per proc) |
---|
| 107 | long nprocs; // total number of blocks (number of procs) |
---|
| 108 | |
---|
| 109 | const double h1 = 1000.0; |
---|
| 110 | const double h3 = 4000.0; |
---|
| 111 | const double h = 5000.0; |
---|
| 112 | const double lf = -5.12e11; |
---|
| 113 | double res = DEFAULT_R; |
---|
| 114 | double dtau = DEFAULT_T; |
---|
| 115 | const double f0 = 8.3e-5; |
---|
| 116 | const double beta = 2.0e-11; |
---|
| 117 | const double gpsr = 0.02; |
---|
| 118 | |
---|
| 119 | long im = DEFAULT_M; // number of grid points in a row |
---|
| 120 | long jm = DEFAULT_M; // number of grid points in a column |
---|
| 121 | |
---|
| 122 | double tolerance = DEFAULT_E; |
---|
| 123 | double eig2; |
---|
| 124 | double ysca; |
---|
| 125 | long jmm1; |
---|
| 126 | const double pi = 3.141592653589793; |
---|
| 127 | const double dt0 = 0.5e-4; |
---|
| 128 | const double outday0 = 1.0; |
---|
| 129 | const double outday1 = 2.0; |
---|
| 130 | const double outday2 = 2.0; |
---|
| 131 | const double outday3 = 2.0; |
---|
| 132 | double factjacob; |
---|
| 133 | double factlap; |
---|
| 134 | long numlev; |
---|
| 135 | long * imx; // array[numlev] |
---|
| 136 | long * jmx; // array[numlev] |
---|
| 137 | double * lev_res; // array[numlev] |
---|
| 138 | double * lev_tol; // array[numlev] |
---|
| 139 | const double maxwork = 10000.0; |
---|
| 140 | |
---|
| 141 | double * i_int_coeff; |
---|
| 142 | double * j_int_coeff; |
---|
| 143 | long * xpts_per_proc; |
---|
| 144 | long * ypts_per_proc; |
---|
| 145 | long minlevel; |
---|
| 146 | |
---|
| 147 | /////////////////////////////////////////// |
---|
| 148 | __attribute__ ((constructor)) int main() |
---|
| 149 | /////////////////////////////////////////// |
---|
| 150 | { |
---|
| 151 | long x; // index to scan xprocs |
---|
| 152 | long y; // index to scan yprocs |
---|
| 153 | long i; // index to scan numlev |
---|
| 154 | long j; // index to scan phases |
---|
| 155 | long x_part; |
---|
| 156 | long y_part; |
---|
| 157 | long d_size; |
---|
| 158 | long itemp; |
---|
| 159 | long jtemp; |
---|
| 160 | long start_time; |
---|
| 161 | long init_time; |
---|
| 162 | |
---|
| 163 | start_time = giet_proctime(); |
---|
| 164 | |
---|
[813] | 165 | // allocate shared TTY & initialise tty_lock |
---|
| 166 | giet_tty_alloc( 1 ); |
---|
| 167 | lock_init( &tty_lock); |
---|
| 168 | |
---|
[799] | 169 | // compute number of threads : nprocs |
---|
| 170 | // as we want one thread per processor, it depends on the |
---|
| 171 | // hardware architecture x_size / y_size / procs per cluster |
---|
| 172 | unsigned int mesh_x_size; |
---|
| 173 | unsigned int mesh_y_size; |
---|
| 174 | unsigned int procs_per_cluster; |
---|
| 175 | |
---|
| 176 | giet_procs_number(&mesh_x_size, &mesh_y_size, &procs_per_cluster); |
---|
| 177 | nprocs = mesh_x_size * mesh_y_size * procs_per_cluster; |
---|
| 178 | |
---|
| 179 | giet_pthread_assert( (procs_per_cluster == 1) || (procs_per_cluster == 4), |
---|
| 180 | "[OCEAN ERROR] number of procs per cluster must be 1 or 4"); |
---|
| 181 | |
---|
| 182 | giet_pthread_assert( (mesh_x_size == 1) || (mesh_x_size == 2) || (mesh_x_size == 4) || |
---|
| 183 | (mesh_x_size == 8) || (mesh_y_size == 16), |
---|
| 184 | "[OCEAN ERROR] mesh_x_size must be 1,2,4,8,16"); |
---|
| 185 | |
---|
| 186 | giet_pthread_assert( (mesh_y_size == 1) || (mesh_y_size == 2) || (mesh_y_size == 4) || |
---|
| 187 | (mesh_y_size == 8) || (mesh_y_size == 16), |
---|
| 188 | "[OCEAN ERROR] mesh_y_size must be 1,2,4,8,16"); |
---|
| 189 | |
---|
| 190 | giet_pthread_assert( (mesh_y_size == mesh_y_size ), |
---|
| 191 | "[OCEAN ERROR] mesh_y_size and mesh_y_size must be equal"); |
---|
| 192 | |
---|
| 193 | giet_pthread_assert( (im == 34) || (im == 66) || (im == 130) || |
---|
| 194 | (im == 258) || (im == 514) || (im == 1026), |
---|
| 195 | "[OCEAN ERROR] grid side must be 34,66,130,258,514,1026"); |
---|
[813] | 196 | |
---|
| 197 | giet_tty_printf("\n[OCEAN] simulation with W-cycle multigrid solver\n" |
---|
| 198 | " Processors : %d x %d x %d\n" |
---|
| 199 | " Grid size : %d x %d\n", |
---|
| 200 | mesh_x_size , mesh_y_size , procs_per_cluster, im , jm ); |
---|
| 201 | |
---|
[799] | 202 | // initialise distributed heap |
---|
| 203 | for ( x = 0 ; x < mesh_x_size ; x++ ) |
---|
| 204 | { |
---|
| 205 | for ( y = 0 ; y < mesh_y_size ; y++ ) |
---|
| 206 | { |
---|
| 207 | heap_init( x , y ); |
---|
| 208 | } |
---|
| 209 | } |
---|
| 210 | |
---|
[813] | 211 | // initialise distributed barrier |
---|
[799] | 212 | sqt_barrier_init( &barrier , mesh_x_size , mesh_y_size , procs_per_cluster ); |
---|
| 213 | |
---|
| 214 | // initialize distributed locks |
---|
| 215 | sqt_lock_init( &id_lock , mesh_x_size , mesh_y_size , procs_per_cluster ); |
---|
| 216 | sqt_lock_init( &psiai_lock , mesh_x_size , mesh_y_size , procs_per_cluster ); |
---|
| 217 | sqt_lock_init( &psibi_lock , mesh_x_size , mesh_y_size , procs_per_cluster ); |
---|
| 218 | sqt_lock_init( &done_lock , mesh_x_size , mesh_y_size , procs_per_cluster ); |
---|
| 219 | sqt_lock_init( &error_lock , mesh_x_size , mesh_y_size , procs_per_cluster ); |
---|
| 220 | sqt_lock_init( &bar_lock , mesh_x_size , mesh_y_size , procs_per_cluster ); |
---|
| 221 | |
---|
| 222 | // allocate thread_kernel[] array : thread identidiers defined by the kernel |
---|
| 223 | pthread_t* thread_kernel = malloc( nprocs * sizeof(pthread_t) ); |
---|
| 224 | |
---|
| 225 | // allocate thread_user[] array : continuous thread index defined by the user |
---|
| 226 | long* thread_user = malloc( nprocs * sizeof(unsigned int) ); |
---|
| 227 | |
---|
| 228 | // compute number of blocks per row and per column: nprocs = xprocs * yprocs |
---|
| 229 | if (procs_per_cluster == 1) |
---|
| 230 | { |
---|
| 231 | xprocs = mesh_x_size; |
---|
| 232 | yprocs = mesh_y_size; |
---|
| 233 | } |
---|
| 234 | else |
---|
| 235 | { |
---|
| 236 | xprocs = mesh_x_size*2; |
---|
| 237 | yprocs = mesh_y_size*2; |
---|
| 238 | } |
---|
| 239 | |
---|
| 240 | // compute numlev |
---|
| 241 | minlevel = 0; |
---|
| 242 | itemp = 1; |
---|
| 243 | jtemp = 1; |
---|
| 244 | numlev = 0; |
---|
| 245 | minlevel = 0; |
---|
| 246 | while (itemp < (im - 2)) |
---|
| 247 | { |
---|
| 248 | itemp = itemp * 2; |
---|
| 249 | jtemp = jtemp * 2; |
---|
| 250 | if ((itemp / yprocs > 1) && (jtemp / xprocs > 1)) |
---|
| 251 | { |
---|
| 252 | numlev++; |
---|
| 253 | } |
---|
| 254 | } |
---|
| 255 | |
---|
| 256 | giet_pthread_assert( (numlev > 0), |
---|
| 257 | "[OCEAN ERROR] at least 2*2 grid points per processor"); |
---|
| 258 | |
---|
| 259 | // allocate in cluster(0,0) arrays indexed by numlev |
---|
| 260 | imx = (long *) malloc(numlev * sizeof(long)); |
---|
| 261 | jmx = (long *) malloc(numlev * sizeof(long)); |
---|
| 262 | lev_res = (double *)malloc(numlev * sizeof(double)); |
---|
| 263 | lev_tol = (double *)malloc(numlev * sizeof(double)); |
---|
| 264 | i_int_coeff = (double *)malloc(numlev * sizeof(double)); |
---|
| 265 | j_int_coeff = (double *)malloc(numlev * sizeof(double)); |
---|
| 266 | xpts_per_proc = (long *) malloc(numlev * sizeof(long)); |
---|
| 267 | ypts_per_proc = (long *) malloc(numlev * sizeof(long)); |
---|
| 268 | |
---|
| 269 | // initialize these arrays |
---|
| 270 | imx[numlev - 1] = im; |
---|
| 271 | jmx[numlev - 1] = jm; |
---|
| 272 | lev_res[numlev - 1] = res; |
---|
| 273 | lev_tol[numlev - 1] = tolerance; |
---|
| 274 | |
---|
| 275 | for (i = numlev - 2; i >= 0; i--) |
---|
| 276 | { |
---|
| 277 | imx[i] = ((imx[i + 1] - 2) / 2) + 2; |
---|
| 278 | jmx[i] = ((jmx[i + 1] - 2) / 2) + 2; |
---|
| 279 | lev_res[i] = lev_res[i + 1] * 2; |
---|
| 280 | } |
---|
| 281 | |
---|
| 282 | for (i = 0; i < numlev; i++) |
---|
| 283 | { |
---|
| 284 | xpts_per_proc[i] = (jmx[i] - 2) / xprocs; |
---|
| 285 | ypts_per_proc[i] = (imx[i] - 2) / yprocs; |
---|
| 286 | } |
---|
| 287 | for (i = numlev - 1; i >= 0; i--) |
---|
| 288 | { |
---|
| 289 | if ((xpts_per_proc[i] < 2) || (ypts_per_proc[i] < 2)) |
---|
| 290 | { |
---|
| 291 | minlevel = i + 1; |
---|
| 292 | break; |
---|
| 293 | } |
---|
| 294 | } |
---|
| 295 | |
---|
| 296 | // allocate in cluster(0,0) arrays of pointers **** |
---|
| 297 | d_size = nprocs * sizeof(double ***); |
---|
| 298 | psi = (double ****)malloc(d_size); |
---|
| 299 | psim = (double ****)malloc(d_size); |
---|
| 300 | work1 = (double ****)malloc(d_size); |
---|
| 301 | work4 = (double ****)malloc(d_size); |
---|
| 302 | work5 = (double ****)malloc(d_size); |
---|
| 303 | work7 = (double ****)malloc(d_size); |
---|
| 304 | temparray = (double ****)malloc(d_size); |
---|
| 305 | |
---|
| 306 | // allocate in each cluster(x,y) arrays of pointers **** |
---|
| 307 | d_size = 2 * sizeof(double **); |
---|
| 308 | for (x = 0; x < xprocs; x++) |
---|
| 309 | { |
---|
| 310 | for (y = 0; y < yprocs; y++) |
---|
| 311 | { |
---|
| 312 | long procid = y * xprocs + x; |
---|
| 313 | long cx = (procs_per_cluster == 1) ? x : x>>1; |
---|
| 314 | long cy = (procs_per_cluster == 1) ? y : y>>1; |
---|
| 315 | |
---|
| 316 | psi[procid] = (double ***)remote_malloc(d_size , cx , cy); |
---|
| 317 | psim[procid] = (double ***)remote_malloc(d_size , cx , cy); |
---|
| 318 | work1[procid] = (double ***)remote_malloc(d_size , cx , cy); |
---|
| 319 | work4[procid] = (double ***)remote_malloc(d_size , cx , cy); |
---|
| 320 | work5[procid] = (double ***)remote_malloc(d_size , cx , cy); |
---|
| 321 | work7[procid] = (double ***)remote_malloc(d_size , cx , cy); |
---|
| 322 | temparray[procid] = (double ***)remote_malloc(d_size , cx , cy); |
---|
| 323 | } |
---|
| 324 | } |
---|
| 325 | |
---|
| 326 | // allocate in cluster(0,0) arrays of pointers *** |
---|
| 327 | d_size = nprocs * sizeof(double **); |
---|
| 328 | psium = (double ***)malloc(d_size); |
---|
| 329 | psilm = (double ***)malloc(d_size); |
---|
| 330 | psib = (double ***)malloc(d_size); |
---|
| 331 | ga = (double ***)malloc(d_size); |
---|
| 332 | gb = (double ***)malloc(d_size); |
---|
| 333 | work2 = (double ***)malloc(d_size); |
---|
| 334 | work3 = (double ***)malloc(d_size); |
---|
| 335 | work6 = (double ***)malloc(d_size); |
---|
| 336 | tauz = (double ***)malloc(d_size); |
---|
| 337 | oldga = (double ***)malloc(d_size); |
---|
| 338 | oldgb = (double ***)malloc(d_size); |
---|
| 339 | |
---|
| 340 | // allocate in cluster(0,0) array of pointers gps[nprocs] |
---|
| 341 | gps = (struct Global_Private**)malloc((nprocs+1)*sizeof(struct Global_Private*)); |
---|
| 342 | |
---|
| 343 | // allocate in each cluster(x,y) the gps[procid] structures |
---|
| 344 | for (x = 0; x < xprocs; x++) |
---|
| 345 | { |
---|
| 346 | for (y = 0; y < yprocs; y++) |
---|
| 347 | { |
---|
| 348 | long procid = y * xprocs + x; |
---|
| 349 | long cx = (procs_per_cluster == 1) ? x : x>>1; |
---|
| 350 | long cy = (procs_per_cluster == 1) ? y : y>>1; |
---|
| 351 | |
---|
| 352 | gps[procid] = (struct Global_Private*)remote_malloc( |
---|
| 353 | sizeof(struct Global_Private) , cx , cy); |
---|
| 354 | |
---|
| 355 | gps[procid]->rel_num_x = (long *)remote_malloc(numlev * sizeof(long) , cx , cy); |
---|
| 356 | gps[procid]->rel_num_y = (long *)remote_malloc(numlev * sizeof(long) , cx , cy); |
---|
| 357 | gps[procid]->eist = (long *)remote_malloc(numlev * sizeof(long) , cx , cy); |
---|
| 358 | gps[procid]->ejst = (long *)remote_malloc(numlev * sizeof(long) , cx , cy); |
---|
| 359 | gps[procid]->oist = (long *)remote_malloc(numlev * sizeof(long) , cx , cy); |
---|
| 360 | gps[procid]->ojst = (long *)remote_malloc(numlev * sizeof(long) , cx , cy); |
---|
| 361 | gps[procid]->rlist = (long *)remote_malloc(numlev * sizeof(long) , cx , cy); |
---|
| 362 | gps[procid]->rljst = (long *)remote_malloc(numlev * sizeof(long) , cx , cy); |
---|
| 363 | gps[procid]->rlien = (long *)remote_malloc(numlev * sizeof(long) , cx , cy); |
---|
| 364 | gps[procid]->rljen = (long *)remote_malloc(numlev * sizeof(long) , cx , cy); |
---|
| 365 | gps[procid]->multi_time = 0; |
---|
| 366 | gps[procid]->total_time = 0; |
---|
| 367 | gps[procid]->sync_time = 0; |
---|
| 368 | gps[procid]->steps_time[0] = 0; |
---|
| 369 | gps[procid]->steps_time[1] = 0; |
---|
| 370 | gps[procid]->steps_time[2] = 0; |
---|
| 371 | gps[procid]->steps_time[3] = 0; |
---|
| 372 | gps[procid]->steps_time[4] = 0; |
---|
| 373 | gps[procid]->steps_time[5] = 0; |
---|
| 374 | gps[procid]->steps_time[6] = 0; |
---|
| 375 | gps[procid]->steps_time[7] = 0; |
---|
| 376 | gps[procid]->steps_time[8] = 0; |
---|
| 377 | gps[procid]->steps_time[9] = 0; |
---|
| 378 | } |
---|
| 379 | } |
---|
| 380 | |
---|
| 381 | //////////// |
---|
| 382 | subblock(); |
---|
| 383 | |
---|
| 384 | x_part = (jm - 2) / xprocs + 2; // nunber of grid points in block row |
---|
| 385 | y_part = (im - 2) / yprocs + 2; // nunber of grid points in block column |
---|
| 386 | |
---|
| 387 | d_size = x_part * y_part * sizeof(double) + y_part * sizeof(double *); |
---|
| 388 | |
---|
| 389 | global = (struct global_struct *)malloc(sizeof(struct global_struct)); |
---|
| 390 | |
---|
| 391 | // allocate in each cluster(x,y) the arrays of pointers ** |
---|
| 392 | for (x = 0; x < xprocs; x++) |
---|
| 393 | { |
---|
| 394 | for (y = 0; y < yprocs; y++) |
---|
| 395 | { |
---|
| 396 | long procid = y * xprocs + x; |
---|
| 397 | long cx = (procs_per_cluster == 1) ? x : x>>1; |
---|
| 398 | long cy = (procs_per_cluster == 1) ? y : y>>1; |
---|
| 399 | |
---|
| 400 | psi[procid][0] = (double **)remote_malloc(d_size , cx , cy); |
---|
| 401 | psi[procid][1] = (double **)remote_malloc(d_size , cx , cy); |
---|
| 402 | psim[procid][0] = (double **)remote_malloc(d_size , cx , cy); |
---|
| 403 | psim[procid][1] = (double **)remote_malloc(d_size , cx , cy); |
---|
| 404 | psium[procid] = (double **)remote_malloc(d_size , cx , cy); |
---|
| 405 | psilm[procid] = (double **)remote_malloc(d_size , cx , cy); |
---|
| 406 | psib[procid] = (double **)remote_malloc(d_size , cx , cy); |
---|
| 407 | ga[procid] = (double **)remote_malloc(d_size , cx , cy); |
---|
| 408 | gb[procid] = (double **)remote_malloc(d_size , cx , cy); |
---|
| 409 | work1[procid][0] = (double **)remote_malloc(d_size , cx , cy); |
---|
| 410 | work1[procid][1] = (double **)remote_malloc(d_size , cx , cy); |
---|
| 411 | work2[procid] = (double **)remote_malloc(d_size , cx , cy); |
---|
| 412 | work3[procid] = (double **)remote_malloc(d_size , cx , cy); |
---|
| 413 | work4[procid][0] = (double **)remote_malloc(d_size , cx , cy); |
---|
| 414 | work4[procid][1] = (double **)remote_malloc(d_size , cx , cy); |
---|
| 415 | work5[procid][0] = (double **)remote_malloc(d_size , cx , cy); |
---|
| 416 | work5[procid][1] = (double **)remote_malloc(d_size , cx , cy); |
---|
| 417 | work6[procid] = (double **)remote_malloc(d_size , cx , cy); |
---|
| 418 | work7[procid][0] = (double **)remote_malloc(d_size , cx , cy); |
---|
| 419 | work7[procid][1] = (double **)remote_malloc(d_size , cx , cy); |
---|
| 420 | temparray[procid][0] = (double **)remote_malloc(d_size , cx , cy); |
---|
| 421 | temparray[procid][1] = (double **)remote_malloc(d_size , cx , cy); |
---|
| 422 | tauz[procid] = (double **)remote_malloc(d_size , cx , cy); |
---|
| 423 | oldga[procid] = (double **)remote_malloc(d_size , cx , cy); |
---|
| 424 | oldgb[procid] = (double **)remote_malloc(d_size , cx , cy); |
---|
| 425 | } |
---|
| 426 | } |
---|
| 427 | |
---|
| 428 | f = (double *)malloc(im*sizeof(double)); |
---|
| 429 | |
---|
| 430 | multi = (struct multi_struct *)malloc(sizeof(struct multi_struct)); |
---|
| 431 | |
---|
| 432 | // allocate memory for q_multi and rhs_multi |
---|
| 433 | d_size = numlev * sizeof(double **); |
---|
| 434 | if (numlev % 2 == 1) // add an extra pointer for double word alignment |
---|
| 435 | { |
---|
| 436 | d_size += sizeof(double **); |
---|
| 437 | } |
---|
| 438 | |
---|
| 439 | for (i = 0; i < numlev; i++) |
---|
| 440 | { |
---|
| 441 | d_size += ((imx[i] - 2) / yprocs + 2) * ((jmx[i] - 2) / xprocs + 2) * sizeof(double) + |
---|
| 442 | ((imx[i] - 2) / yprocs + 2) * sizeof(double *); |
---|
| 443 | } |
---|
| 444 | d_size *= nprocs; |
---|
| 445 | if (nprocs % 2 == 1) // add an extra pointer for double word alignment |
---|
| 446 | { |
---|
| 447 | d_size += sizeof(double ***); |
---|
| 448 | } |
---|
| 449 | |
---|
| 450 | d_size += nprocs * sizeof(double ***); |
---|
| 451 | q_multi = (double ****)malloc( d_size ); |
---|
| 452 | rhs_multi = (double ****)malloc( d_size ); |
---|
| 453 | |
---|
| 454 | ////////// |
---|
| 455 | link_all(); |
---|
| 456 | |
---|
| 457 | multi->err_multi = 0.0; |
---|
| 458 | i_int_coeff[0] = 0.0; |
---|
| 459 | j_int_coeff[0] = 0.0; |
---|
| 460 | |
---|
| 461 | for (i = 0; i < numlev; i++) |
---|
| 462 | { |
---|
| 463 | i_int_coeff[i] = 1.0 / (imx[i] - 1); |
---|
| 464 | j_int_coeff[i] = 1.0 / (jmx[i] - 1); |
---|
| 465 | } |
---|
| 466 | |
---|
| 467 | global->psibi = 0.0; |
---|
| 468 | |
---|
| 469 | factjacob = -1. / (12. * res * res); |
---|
| 470 | factlap = 1. / (res * res); |
---|
| 471 | eig2 = -h * f0 * f0 / (h1 * h3 * gpsr); |
---|
| 472 | |
---|
| 473 | jmm1 = jm - 1; |
---|
| 474 | ysca = ((double) jmm1) * res; |
---|
| 475 | |
---|
| 476 | im = (imx[numlev-1]-2)/yprocs + 2; |
---|
| 477 | jm = (jmx[numlev-1]-2)/xprocs + 2; |
---|
| 478 | |
---|
| 479 | init_time = giet_proctime() - start_time; |
---|
| 480 | |
---|
| 481 | printf("\n[OCEAN] initialisation completed / start parallel execution\n"); |
---|
| 482 | |
---|
| 483 | /////////////////////////////////////////////////// |
---|
| 484 | // launch (N-1) other threads to execute slave() |
---|
| 485 | /////////////////////////////////////////////////// |
---|
| 486 | |
---|
| 487 | for (i = 1 ; i < nprocs ; i++) |
---|
| 488 | { |
---|
| 489 | thread_user[i] = i; |
---|
| 490 | if (giet_pthread_create( &thread_kernel[i], |
---|
| 491 | NULL, |
---|
| 492 | &slave, |
---|
| 493 | &thread_user[i] )) |
---|
| 494 | { |
---|
| 495 | giet_pthread_exit("[OCEAN ERROR] in giet_pthread_create()\n"); |
---|
| 496 | } |
---|
| 497 | } |
---|
| 498 | |
---|
| 499 | // main itself execute slave() |
---|
| 500 | thread_user[0] = 0; |
---|
| 501 | slave( &thread_user[0] ); |
---|
| 502 | |
---|
| 503 | // wait other threads completion |
---|
| 504 | for ( i = 1 ; i < nprocs ; i++ ) |
---|
| 505 | { |
---|
| 506 | if ( giet_pthread_join( thread_kernel[i], NULL ) ) |
---|
| 507 | { |
---|
| 508 | giet_pthread_exit( "[OCEAN ERROR] in giet_pthread_join()\n" ); |
---|
| 509 | } |
---|
| 510 | } |
---|
| 511 | |
---|
| 512 | /////////////////////////////////////////////// |
---|
| 513 | // instrumentation (display & save on disk) |
---|
| 514 | /////////////////////////////////////////////// |
---|
| 515 | |
---|
| 516 | char string[256]; |
---|
| 517 | |
---|
[813] | 518 | snprintf( string , 256 , "/home/ocean_%d_%d_%d_%d_d" , |
---|
| 519 | mesh_x_size , mesh_y_size , procs_per_cluster , DEFAULT_M , DEFAULT_M ); |
---|
[799] | 520 | |
---|
| 521 | // open instrumentation file |
---|
| 522 | unsigned int fd = giet_fat_open( string , O_CREAT ); |
---|
| 523 | if ( fd < 0 ) |
---|
| 524 | { |
---|
| 525 | printf("\n[OCEAN ERROR] cannot open instrumentation file %s\n", string ); |
---|
| 526 | giet_pthread_exit( NULL ); |
---|
| 527 | } |
---|
| 528 | |
---|
[806] | 529 | snprintf( string , 256 , "\n--- OCEAN : (%dx%dx%d) procs on (%dx%d) grid ---\n", |
---|
[799] | 530 | mesh_x_size, mesh_y_size, procs_per_cluster , |
---|
| 531 | DEFAULT_M , DEFAULT_M ); |
---|
| 532 | |
---|
| 533 | giet_tty_printf( "%s" , string ); |
---|
| 534 | giet_fat_fprintf( fd , "%s" , string ); |
---|
| 535 | |
---|
| 536 | // compute instrumentation results |
---|
| 537 | long min_total = gps[0]->total_time; |
---|
| 538 | long max_total = gps[0]->total_time; |
---|
| 539 | long min_multi = gps[0]->multi_time; |
---|
| 540 | long max_multi = gps[0]->multi_time; |
---|
| 541 | long min_sync = gps[0]->sync_time; |
---|
| 542 | long max_sync = gps[0]->sync_time; |
---|
| 543 | |
---|
| 544 | for (i = 1 ; i < nprocs ; i++) |
---|
| 545 | { |
---|
| 546 | if (gps[i]->total_time > max_total) max_total = (gps[i]->total_time); |
---|
| 547 | if (gps[i]->total_time < min_total) min_total = (gps[i]->total_time); |
---|
| 548 | if (gps[i]->multi_time > max_multi) max_multi = (gps[i]->multi_time); |
---|
| 549 | if (gps[i]->multi_time < min_multi) min_multi = (gps[i]->multi_time); |
---|
| 550 | if (gps[i]->sync_time > max_sync ) max_sync = (gps[i]->sync_time ); |
---|
| 551 | if (gps[i]->sync_time < min_sync ) min_sync = (gps[i]->sync_time ); |
---|
| 552 | } |
---|
| 553 | |
---|
| 554 | snprintf( string , 256 , "\n Init Time Total Time Multi Time Sync Time\n" |
---|
| 555 | "MIN : %d | %d | %d | %d (cycles)\n" |
---|
| 556 | "MAX : %d | %d | %d | %d (cycles)\n", |
---|
| 557 | (int)init_time, (int)min_total, (int)min_multi, (int)min_sync, |
---|
| 558 | (int)init_time, (int)max_total, (int)max_multi, (int)max_sync ); |
---|
| 559 | |
---|
| 560 | giet_tty_printf("%s" , string ); |
---|
| 561 | giet_fat_fprintf( fd , "%s" , string ); |
---|
| 562 | |
---|
| 563 | for (i = 0; i < 10; i++) |
---|
| 564 | { |
---|
| 565 | long phase_time = 0; |
---|
| 566 | for (j = 0; j < nprocs; j++) |
---|
| 567 | { |
---|
| 568 | phase_time += gps[j]->steps_time[i]; |
---|
| 569 | } |
---|
| 570 | snprintf( string , 256 , " - Phase %d : %d cycles\n", |
---|
| 571 | (int)i , (int)(phase_time/nprocs) ); |
---|
| 572 | giet_tty_printf("%s" , string ); |
---|
| 573 | giet_fat_fprintf( fd , "%s" , string ); |
---|
| 574 | } |
---|
| 575 | |
---|
[806] | 576 | // close instrumentation file and exit |
---|
| 577 | giet_fat_close( fd ); |
---|
| 578 | |
---|
[799] | 579 | giet_pthread_exit("main completed"); |
---|
| 580 | |
---|
| 581 | return 0; |
---|
| 582 | |
---|
| 583 | } // end main() |
---|
| 584 | |
---|
| 585 | |
---|
| 586 | // Local Variables: |
---|
| 587 | // tab-width: 4 |
---|
| 588 | // c-basic-offset: 4 |
---|
| 589 | // c-file-offsets:((innamespace . 0)(inline-open . 0)) |
---|
| 590 | // indent-tabs-mode: nil |
---|
| 591 | // End: |
---|
| 592 | |
---|
| 593 | // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=4:softtabstop=4 |
---|