int main(int argc, char **argv) {
double dum[3] = {1.,1.,1.};
double x1, x2, sx, sy, tm, an, gc;
- XBT_ATTRIB_UNUSED double tt;
double Mops;
double epsilon=1.0E-8, a = 1220703125., s=271828183.;
double t1, t2, t3, t4;
int m;
int mk=16;
int nk = (int)(pow(2,mk)),
- nq=10,
+ nq=10,
np, node, no_nodes, i, ik, kk, l, k, nit, no_large_nodes, np_add, k_offset;
int verified;
char size[500]; // mind the size of the string to represent a big number
- double *x = (double *) malloc (2*nk*sizeof(double));
- double *q = (double *) malloc (nq*sizeof(double));
+ double *x = (double *) SMPI_SHARED_MALLOC (2*nk*sizeof(double));
+ double *q = (double *) SMPI_SHARED_MALLOC (nq*sizeof(double));
MPI_Init( &argc, &argv );
MPI_Comm_size( MPI_COMM_WORLD, &no_nodes);
fprintf(stdout," Number of random numbers generated: %s\n",size);
fprintf(stdout," Number of active processes: %d\n",no_nodes);
}
- verified = FALSE;
/* Compute the number of "batches" of random number pairs generated per processor. Adjust if the number of processors
* does not evenly divide the total number */
for (i=0;i<2*nk;i++) {
x[i] = -1e99;
}
- Mops = log(sqrt(abs(1)));
/* Synchronize before placing time stamp */
MPI_Barrier( MPI_COMM_WORLD );
t2 = randlc(&t1, &t1);
}
an = t1;
- tt = s;
gc = 0;
- tt = 0.;
sx = 0.;
sy = 0.;
for (i=0; i < nq ; i++) {
k_offset = no_large_nodes*(np+1) + (node-no_large_nodes)*np -1;
int stop = FALSE;
- for(k = 1; k <= np; k++) {// SMPI_SAMPLE_LOCAL(0.25 * np, 0.03) {
+ SMPI_SAMPLE_GLOBAL(k = 1, k <= np, k++, 0.25 * np, 0.03){
stop = FALSE;
kk = k_offset + k ;
t1 = s;
for (i=1;i<=100 && !stop;i++) {
ik = kk / 2;
if (2 * ik != kk) {
- t3 = randlc(&t1, &t2);
+ randlc(&t1, &t2);
}
if (ik==0)
stop = TRUE;
else {
- t3 = randlc(&t2, &t2);
+ randlc(&t2, &t2);
kk = ik;
}
}
MPI_Allreduce(&tm, x, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD);
tm = x[0];
- free(x);
+ SMPI_SHARED_FREE(x);
if(node == root) {
nit = 0;
fprintf(stdout,"Random numbers: %f\n",(timer_read(3)/1000));
}
- free(q);
+ SMPI_SHARED_FREE(q);
MPI_Finalize();
return 0;