/*************************************************************************/ /* */ /* Copyright (c) 1994 Stanford University */ /* */ /* All rights reserved. */ /* */ /* Permission is given to use, copy, and modify this software for any */ /* non-commercial purpose as long as this copyright notice is not */ /* removed. All other uses, including redistribution in whole or in */ /* part, are forbidden without prior written permission. */ /* */ /* This software is provided with absolutely no warranty and no */ /* support. */ /* */ /*************************************************************************/ /*************************************************************************/ /* */ /* SPLASH Ocean Code */ /* */ /* This application studies the role of eddy and boundary currents in */ /* influencing large-scale ocean movements. This implementation uses */ /* dynamically allocated four-dimensional arrays for grid data storage. */ /* */ /* Main parameters are : */ /* */ /* - M : Simulate MxM ocean. M must be (power of 2) +2. */ /* - N : N = number of threads. N must be power of 4. */ /* - E : E = error tolerance for iterative relaxation. */ /* - R : R = distance between grid points in meters. */ /* - T : T = timestep in seconds. */ /* */ /*************************************************************************/ /////////////////////////////////////////////////////////////////////////// // This is the porting of the SLASH Ocean application on the GIET-VM // operating system, for the TSAR manycores architecture. // Done by Alain greiner (march 2016). /////////////////////////////////////////////////////////////////////////// #define DEFAULT_M 258 #define DEFAULT_E 1e-7 #define DEFAULT_T 28800.0 #define DEFAULT_R 20000.0 #define UP 0 #define DOWN 1 #define LEFT 2 #define RIGHT 3 #define UPLEFT 4 #define UPRIGHT 5 #define DOWNLEFT 6 #define DOWNRIGHT 7 #define PAGE_SIZE 4096 #define MAX_THREADS 1024 #include "decs.h" #include #include #include #include #include // GIET specific global variables pthread_t thread_kernel[MAX_THREADS]; // identifier defined by the kernel long thread_user[MAX_THREADS]; // user index = x*yprocs + y user_lock_t tty_lock; giet_sqt_barrier_t barrier; sqt_lock_t id_lock; sqt_lock_t psiai_lock; sqt_lock_t psibi_lock; sqt_lock_t done_lock; sqt_lock_t error_lock; sqt_lock_t bar_lock; // OCEAN global variables struct multi_struct* multi; struct global_struct* global; struct Global_Private** gps; // array of pointers[nprocs] double **** psi; double **** psim; double *** psium; double *** psilm; double *** psib; double *** ga; double *** gb; double **** work1; double *** work2; double *** work3; double **** work4; double **** work5; double *** work6; double **** work7; double **** temparray; double *** tauz; double *** oldga; double *** oldgb; double * f; double **** q_multi; double **** rhs_multi; long xprocs; // number of blocks in a row (one block per proc) long yprocs; // number of blocks in a col (one block per proc) long nprocs; // total number of blocks (number of procs) const double h1 = 1000.0; const double h3 = 4000.0; const double h = 5000.0; const double lf = -5.12e11; double res = DEFAULT_R; double dtau = DEFAULT_T; const double f0 = 8.3e-5; const double beta = 2.0e-11; const double gpsr = 0.02; long im = DEFAULT_M; // number of grid points in a row long jm = DEFAULT_M; // number of grid points in a column double tolerance = DEFAULT_E; double eig2; double ysca; long jmm1; const double pi = 3.141592653589793; const double dt0 = 0.5e-4; const double outday0 = 1.0; const double outday1 = 2.0; const double outday2 = 2.0; const double outday3 = 2.0; double factjacob; double factlap; long numlev; long * imx; // array[numlev] long * jmx; // array[numlev] double * lev_res; // array[numlev] double * lev_tol; // array[numlev] const double maxwork = 10000.0; double * i_int_coeff; double * j_int_coeff; long * xpts_per_proc; long * ypts_per_proc; long minlevel; /////////////////////////////////////////// __attribute__ ((constructor)) int main() /////////////////////////////////////////// { long x; // index to scan xprocs long y; // index to scan yprocs long i; // index to scan numlev long j; // index to scan phases long x_part; long y_part; long d_size; long itemp; long jtemp; long start_time; long init_time; start_time = giet_proctime(); // compute number of threads : nprocs // as we want one thread per processor, it depends on the // hardware architecture x_size / y_size / procs per cluster unsigned int mesh_x_size; unsigned int mesh_y_size; unsigned int procs_per_cluster; giet_procs_number(&mesh_x_size, &mesh_y_size, &procs_per_cluster); nprocs = mesh_x_size * mesh_y_size * procs_per_cluster; giet_pthread_assert( (procs_per_cluster == 1) || (procs_per_cluster == 4), "[OCEAN ERROR] number of procs per cluster must be 1 or 4"); giet_pthread_assert( (mesh_x_size == 1) || (mesh_x_size == 2) || (mesh_x_size == 4) || (mesh_x_size == 8) || (mesh_y_size == 16), "[OCEAN ERROR] mesh_x_size must be 1,2,4,8,16"); giet_pthread_assert( (mesh_y_size == 1) || (mesh_y_size == 2) || (mesh_y_size == 4) || (mesh_y_size == 8) || (mesh_y_size == 16), "[OCEAN ERROR] mesh_y_size must be 1,2,4,8,16"); giet_pthread_assert( (mesh_y_size == mesh_y_size ), "[OCEAN ERROR] mesh_y_size and mesh_y_size must be equal"); // check the ocean size : M*M grid / (M-2) mut be power of 2 giet_pthread_assert( (im == 34) || (im == 66) || (im == 130) || (im == 258) || (im == 514) || (im == 1026), "[OCEAN ERROR] grid side must be 34,66,130,258,514,1026"); // initialise distributed heap for ( x = 0 ; x < mesh_x_size ; x++ ) { for ( y = 0 ; y < mesh_y_size ; y++ ) { heap_init( x , y ); } } // allocate shared TTY & initialise tty_lock giet_tty_alloc( 1 ); lock_init( &tty_lock); giet_tty_printf("\n[OCEAN] simulation with W-cycle multigrid solver\n" " Processors : %d x %d x %d\n" " Grid size : %d x %d\n", mesh_x_size , mesh_y_size , procs_per_cluster, im , jm ); // initialise distributed barrier sqt_barrier_init( &barrier , mesh_x_size , mesh_y_size , procs_per_cluster ); // initialize distributed locks sqt_lock_init( &id_lock , mesh_x_size , mesh_y_size , procs_per_cluster ); sqt_lock_init( &psiai_lock , mesh_x_size , mesh_y_size , procs_per_cluster ); sqt_lock_init( &psibi_lock , mesh_x_size , mesh_y_size , procs_per_cluster ); sqt_lock_init( &done_lock , mesh_x_size , mesh_y_size , procs_per_cluster ); sqt_lock_init( &error_lock , mesh_x_size , mesh_y_size , procs_per_cluster ); sqt_lock_init( &bar_lock , mesh_x_size , mesh_y_size , procs_per_cluster ); // allocate thread_kernel[] array : thread identidiers defined by the kernel pthread_t* thread_kernel = malloc( nprocs * sizeof(pthread_t) ); // allocate thread_user[] array : continuous thread index defined by the user long* thread_user = malloc( nprocs * sizeof(unsigned int) ); // compute number of blocks per row and per column: nprocs = xprocs * yprocs if (procs_per_cluster == 1) { xprocs = mesh_x_size; yprocs = mesh_y_size; } else { xprocs = mesh_x_size*2; yprocs = mesh_y_size*2; } // compute numlev minlevel = 0; itemp = 1; jtemp = 1; numlev = 0; minlevel = 0; while (itemp < (im - 2)) { itemp = itemp * 2; jtemp = jtemp * 2; if ((itemp / yprocs > 1) && (jtemp / xprocs > 1)) { numlev++; } } giet_pthread_assert( (numlev > 0), "[OCEAN ERROR] at least 2*2 grid points per processor"); // allocate in cluster(0,0) arrays indexed by numlev imx = (long *) malloc(numlev * sizeof(long)); jmx = (long *) malloc(numlev * sizeof(long)); lev_res = (double *)malloc(numlev * sizeof(double)); lev_tol = (double *)malloc(numlev * sizeof(double)); i_int_coeff = (double *)malloc(numlev * sizeof(double)); j_int_coeff = (double *)malloc(numlev * sizeof(double)); xpts_per_proc = (long *) malloc(numlev * sizeof(long)); ypts_per_proc = (long *) malloc(numlev * sizeof(long)); // initialize these arrays imx[numlev - 1] = im; jmx[numlev - 1] = jm; lev_res[numlev - 1] = res; lev_tol[numlev - 1] = tolerance; for (i = numlev - 2; i >= 0; i--) { imx[i] = ((imx[i + 1] - 2) / 2) + 2; jmx[i] = ((jmx[i + 1] - 2) / 2) + 2; lev_res[i] = lev_res[i + 1] * 2; } for (i = 0; i < numlev; i++) { xpts_per_proc[i] = (jmx[i] - 2) / xprocs; ypts_per_proc[i] = (imx[i] - 2) / yprocs; } for (i = numlev - 1; i >= 0; i--) { if ((xpts_per_proc[i] < 2) || (ypts_per_proc[i] < 2)) { minlevel = i + 1; break; } } // allocate in cluster(0,0) arrays of pointers **** d_size = nprocs * sizeof(double ***); psi = (double ****)malloc(d_size); psim = (double ****)malloc(d_size); work1 = (double ****)malloc(d_size); work4 = (double ****)malloc(d_size); work5 = (double ****)malloc(d_size); work7 = (double ****)malloc(d_size); temparray = (double ****)malloc(d_size); // allocate in each cluster(x,y) arrays of pointers **** d_size = 2 * sizeof(double **); for (x = 0; x < xprocs; x++) { for (y = 0; y < yprocs; y++) { long procid = y * xprocs + x; long cx = (procs_per_cluster == 1) ? x : x>>1; long cy = (procs_per_cluster == 1) ? y : y>>1; psi[procid] = (double ***)remote_malloc(d_size , cx , cy); psim[procid] = (double ***)remote_malloc(d_size , cx , cy); work1[procid] = (double ***)remote_malloc(d_size , cx , cy); work4[procid] = (double ***)remote_malloc(d_size , cx , cy); work5[procid] = (double ***)remote_malloc(d_size , cx , cy); work7[procid] = (double ***)remote_malloc(d_size , cx , cy); temparray[procid] = (double ***)remote_malloc(d_size , cx , cy); } } // allocate in cluster(0,0) arrays of pointers *** d_size = nprocs * sizeof(double **); psium = (double ***)malloc(d_size); psilm = (double ***)malloc(d_size); psib = (double ***)malloc(d_size); ga = (double ***)malloc(d_size); gb = (double ***)malloc(d_size); work2 = (double ***)malloc(d_size); work3 = (double ***)malloc(d_size); work6 = (double ***)malloc(d_size); tauz = (double ***)malloc(d_size); oldga = (double ***)malloc(d_size); oldgb = (double ***)malloc(d_size); // allocate in cluster(0,0) array of pointers gps[nprocs] gps = (struct Global_Private**)malloc((nprocs+1)*sizeof(struct Global_Private*)); // allocate in each cluster(x,y) the gps[procid] structures for (x = 0; x < xprocs; x++) { for (y = 0; y < yprocs; y++) { long procid = y * xprocs + x; long cx = (procs_per_cluster == 1) ? x : x>>1; long cy = (procs_per_cluster == 1) ? y : y>>1; gps[procid] = (struct Global_Private*)remote_malloc( sizeof(struct Global_Private) , cx , cy); gps[procid]->rel_num_x = (long *)remote_malloc(numlev * sizeof(long) , cx , cy); gps[procid]->rel_num_y = (long *)remote_malloc(numlev * sizeof(long) , cx , cy); gps[procid]->eist = (long *)remote_malloc(numlev * sizeof(long) , cx , cy); gps[procid]->ejst = (long *)remote_malloc(numlev * sizeof(long) , cx , cy); gps[procid]->oist = (long *)remote_malloc(numlev * sizeof(long) , cx , cy); gps[procid]->ojst = (long *)remote_malloc(numlev * sizeof(long) , cx , cy); gps[procid]->rlist = (long *)remote_malloc(numlev * sizeof(long) , cx , cy); gps[procid]->rljst = (long *)remote_malloc(numlev * sizeof(long) , cx , cy); gps[procid]->rlien = (long *)remote_malloc(numlev * sizeof(long) , cx , cy); gps[procid]->rljen = (long *)remote_malloc(numlev * sizeof(long) , cx , cy); gps[procid]->multi_time = 0; gps[procid]->total_time = 0; gps[procid]->sync_time = 0; gps[procid]->steps_time[0] = 0; gps[procid]->steps_time[1] = 0; gps[procid]->steps_time[2] = 0; gps[procid]->steps_time[3] = 0; gps[procid]->steps_time[4] = 0; gps[procid]->steps_time[5] = 0; gps[procid]->steps_time[6] = 0; gps[procid]->steps_time[7] = 0; gps[procid]->steps_time[8] = 0; gps[procid]->steps_time[9] = 0; } } //////////// subblock(); x_part = (jm - 2) / xprocs + 2; // nunber of grid points in block row y_part = (im - 2) / yprocs + 2; // nunber of grid points in block column d_size = x_part * y_part * sizeof(double) + y_part * sizeof(double *); global = (struct global_struct *)malloc(sizeof(struct global_struct)); // allocate in each cluster(x,y) the arrays of pointers ** for (x = 0; x < xprocs; x++) { for (y = 0; y < yprocs; y++) { long procid = y * xprocs + x; long cx = (procs_per_cluster == 1) ? x : x>>1; long cy = (procs_per_cluster == 1) ? y : y>>1; psi[procid][0] = (double **)remote_malloc(d_size , cx , cy); psi[procid][1] = (double **)remote_malloc(d_size , cx , cy); psim[procid][0] = (double **)remote_malloc(d_size , cx , cy); psim[procid][1] = (double **)remote_malloc(d_size , cx , cy); psium[procid] = (double **)remote_malloc(d_size , cx , cy); psilm[procid] = (double **)remote_malloc(d_size , cx , cy); psib[procid] = (double **)remote_malloc(d_size , cx , cy); ga[procid] = (double **)remote_malloc(d_size , cx , cy); gb[procid] = (double **)remote_malloc(d_size , cx , cy); work1[procid][0] = (double **)remote_malloc(d_size , cx , cy); work1[procid][1] = (double **)remote_malloc(d_size , cx , cy); work2[procid] = (double **)remote_malloc(d_size , cx , cy); work3[procid] = (double **)remote_malloc(d_size , cx , cy); work4[procid][0] = (double **)remote_malloc(d_size , cx , cy); work4[procid][1] = (double **)remote_malloc(d_size , cx , cy); work5[procid][0] = (double **)remote_malloc(d_size , cx , cy); work5[procid][1] = (double **)remote_malloc(d_size , cx , cy); work6[procid] = (double **)remote_malloc(d_size , cx , cy); work7[procid][0] = (double **)remote_malloc(d_size , cx , cy); work7[procid][1] = (double **)remote_malloc(d_size , cx , cy); temparray[procid][0] = (double **)remote_malloc(d_size , cx , cy); temparray[procid][1] = (double **)remote_malloc(d_size , cx , cy); tauz[procid] = (double **)remote_malloc(d_size , cx , cy); oldga[procid] = (double **)remote_malloc(d_size , cx , cy); oldgb[procid] = (double **)remote_malloc(d_size , cx , cy); } } f = (double *)malloc(im*sizeof(double)); multi = (struct multi_struct *)malloc(sizeof(struct multi_struct)); // allocate memory for q_multi and rhs_multi d_size = numlev * sizeof(double **); if (numlev % 2 == 1) // add an extra pointer for double word alignment { d_size += sizeof(double **); } for (i = 0; i < numlev; i++) { d_size += ((imx[i] - 2) / yprocs + 2) * ((jmx[i] - 2) / xprocs + 2) * sizeof(double) + ((imx[i] - 2) / yprocs + 2) * sizeof(double *); } d_size *= nprocs; if (nprocs % 2 == 1) // add an extra pointer for double word alignment { d_size += sizeof(double ***); } d_size += nprocs * sizeof(double ***); q_multi = (double ****)malloc( d_size ); rhs_multi = (double ****)malloc( d_size ); ////////// link_all(); multi->err_multi = 0.0; i_int_coeff[0] = 0.0; j_int_coeff[0] = 0.0; for (i = 0; i < numlev; i++) { i_int_coeff[i] = 1.0 / (imx[i] - 1); j_int_coeff[i] = 1.0 / (jmx[i] - 1); } global->psibi = 0.0; factjacob = -1. / (12. * res * res); factlap = 1. / (res * res); eig2 = -h * f0 * f0 / (h1 * h3 * gpsr); jmm1 = jm - 1; ysca = ((double) jmm1) * res; im = (imx[numlev-1]-2)/yprocs + 2; jm = (jmx[numlev-1]-2)/xprocs + 2; init_time = giet_proctime() - start_time; printf("\n[OCEAN] initialisation completed / start parallel execution\n"); /////////////////////////////////////////////////// // launch (N-1) other threads to execute slave() /////////////////////////////////////////////////// for (i = 1 ; i < nprocs ; i++) { thread_user[i] = i; if (giet_pthread_create( &thread_kernel[i], NULL, &slave, &thread_user[i] )) { giet_pthread_exit("[OCEAN ERROR] in giet_pthread_create()\n"); } } // main itself execute slave() thread_user[0] = 0; slave( &thread_user[0] ); // wait other threads completion for ( i = 1 ; i < nprocs ; i++ ) { if ( giet_pthread_join( thread_kernel[i], NULL ) ) { giet_pthread_exit( "[OCEAN ERROR] in giet_pthread_join()\n" ); } } /////////////////////////////////////////////// // instrumentation (display & save on disk) /////////////////////////////////////////////// char string[256]; snprintf( string , 256 , "/home/ocean_%d_%d_%d" , mesh_x_size , mesh_y_size , procs_per_cluster ); // open instrumentation file unsigned int fd = giet_fat_open( string , O_CREAT ); if ( fd < 0 ) { printf("\n[OCEAN ERROR] cannot open instrumentation file %s\n", string ); giet_pthread_exit( NULL ); } snprintf( string , 256 , "\n--- OCEAN : (%dx%dx%d) procs on (%dx%d) grid ---\n", mesh_x_size, mesh_y_size, procs_per_cluster , DEFAULT_M , DEFAULT_M ); giet_tty_printf( "%s" , string ); giet_fat_fprintf( fd , "%s" , string ); // compute instrumentation results long min_total = gps[0]->total_time; long max_total = gps[0]->total_time; long min_multi = gps[0]->multi_time; long max_multi = gps[0]->multi_time; long min_sync = gps[0]->sync_time; long max_sync = gps[0]->sync_time; for (i = 1 ; i < nprocs ; i++) { if (gps[i]->total_time > max_total) max_total = (gps[i]->total_time); if (gps[i]->total_time < min_total) min_total = (gps[i]->total_time); if (gps[i]->multi_time > max_multi) max_multi = (gps[i]->multi_time); if (gps[i]->multi_time < min_multi) min_multi = (gps[i]->multi_time); if (gps[i]->sync_time > max_sync ) max_sync = (gps[i]->sync_time ); if (gps[i]->sync_time < min_sync ) min_sync = (gps[i]->sync_time ); } snprintf( string , 256 , "\n Init Time Total Time Multi Time Sync Time\n" "MIN : %d | %d | %d | %d (cycles)\n" "MAX : %d | %d | %d | %d (cycles)\n", (int)init_time, (int)min_total, (int)min_multi, (int)min_sync, (int)init_time, (int)max_total, (int)max_multi, (int)max_sync ); giet_tty_printf("%s" , string ); giet_fat_fprintf( fd , "%s" , string ); for (i = 0; i < 10; i++) { long phase_time = 0; for (j = 0; j < nprocs; j++) { phase_time += gps[j]->steps_time[i]; } snprintf( string , 256 , " - Phase %d : %d cycles\n", (int)i , (int)(phase_time/nprocs) ); giet_tty_printf("%s" , string ); giet_fat_fprintf( fd , "%s" , string ); } // close instrumentation file and exit giet_fat_close( fd ); giet_pthread_exit("main completed"); return 0; } // end main() // Local Variables: // tab-width: 4 // c-basic-offset: 4 // c-file-offsets:((innamespace . 0)(inline-open . 0)) // indent-tabs-mode: nil // End: // vim: filetype=cpp:expandtab:shiftwidth=4:tabstop=4:softtabstop=4