1 /*************************************************************************
3 * N A S P A R A L L E L B E N C H M A R K S 3.3 *
7 *************************************************************************
9 * This benchmark is part of the NAS Parallel Benchmark 3.3 suite. *
11 * Permission to use, copy, distribute and modify this software *
12 * for any purpose with or without fee is hereby granted. We *
13 * request, however, that all derived work reference the NAS *
14 * Parallel Benchmarks 3.3. This software is provided "as is" *
15 * without express or implied warranty. *
17 * Information on NPB 3.3, including the technical report, the *
18 * original specifications, source code, results and information *
19 * on how to submit new results, is available at: *
21 * http: www.nas.nasa.gov/Software/NPB *
23 * Send comments or suggestions to npb@nas.nasa.gov *
24 * Send bug reports to npb-bugs@nas.nasa.gov *
26 * NAS Parallel Benchmarks Group *
27 * NASA Ames Research Center *
29 * Moffett Field, CA 94035-1000 *
31 * E-mail: npb@nas.nasa.gov *
32 * Fax: (650) 604-3957 *
34 *************************************************************************
36 * Author: M. Frumkin * *
38 *************************************************************************/
46 #include "nas_common.h"
47 #include "simgrid/instr.h" //TRACE_
49 int timer_on=0,timers_tot=64;
50 double start[64], elapsed[64];
58 static int verify(char *bmname,double rnm2){
59 double verify_value=0.0;
60 double epsilon=1.0E-8;
64 if(strstr(bmname,"BH")){
65 verify_value=30892725.0;
66 }else if(strstr(bmname,"WH")){
67 verify_value=67349758.0;
68 }else if(strstr(bmname,"SH")){
69 verify_value=58875767.0;
71 fprintf(stderr,"No such benchmark as %s.\n",bmname);
74 }else if(class=='W') {
75 if(strstr(bmname,"BH")){
76 verify_value = 4102461.0;
77 }else if(strstr(bmname,"WH")){
78 verify_value = 204280762.0;
79 }else if(strstr(bmname,"SH")){
80 verify_value = 186944764.0;
82 fprintf(stderr,"No such benchmark as %s.\n",bmname);
85 }else if(class=='A') {
86 if(strstr(bmname,"BH")){
87 verify_value = 17809491.0;
88 }else if(strstr(bmname,"WH")){
89 verify_value = 1289925229.0;
90 }else if(strstr(bmname,"SH")){
91 verify_value = 610856482.0;
93 fprintf(stderr,"No such benchmark as %s.\n",bmname);
96 }else if(class=='B') {
97 if(strstr(bmname,"BH")){
98 verify_value = 4317114.0;
99 }else if(strstr(bmname,"WH")){
100 verify_value = 7877279917.0;
101 }else if(strstr(bmname,"SH")){
102 verify_value = 1836863082.0;
104 fprintf(stderr,"No such benchmark as %s.\n",bmname);
107 }else if(class=='C' || class == 'D') {
110 fprintf(stderr,"No such class as %c.\n",class);
112 fprintf(stderr," %s L2 Norm = %f\n",bmname,rnm2);
114 fprintf(stderr," No verification was performed.\n");
115 }else if( rnm2 - verify_value < epsilon && rnm2 - verify_value > -epsilon) { /* abs here does not work on ALTIX */
117 fprintf(stderr," Deviation = %f\n",(rnm2 - verify_value));
120 fprintf(stderr," The correct verification value = %f\n",verify_value);
121 fprintf(stderr," Got value = %f\n",rnm2);
129 static int ipowMod(int a,long long int n,int md){
135 fprintf(stderr,"ipowMod: exponent must be nonnegative exp=%lld\n",n);
136 exp=-n; /* temp fix */
139 fprintf(stderr,"ipowMod: module must be positive mod=%d",md);
160 static DGraph *buildSH(const char cls){
161 /* Nodes of the graph must be topologically sorted to avoid MPI deadlock. */
163 unsigned int numSources=num_sources; /* must be power of 2 */
164 unsigned int numOfLayers=0;
165 unsigned int tmpS=numSources>>1;
166 int firstLayerNode=0;
169 unsigned int mask=0x0;
175 snprintf(nm,BLOCK_SIZE,"DT_SH.%c",cls);
182 for(i=0;i<numSources;i++){
183 snprintf(nm,BLOCK_SIZE,"Source.%d",i);
187 for(j=0;j<numOfLayers;j++){
189 for(i=0;i<numSources;i++){
190 snprintf(nm,BLOCK_SIZE,"Comparator.%d",(i+j*firstLayerNode));
194 ndid=firstLayerNode+ndoff;
195 ar=newArc(dg->node[ndid],nd);
198 ndid=firstLayerNode+ndoff;
199 ar=newArc(dg->node[ndid],nd);
202 firstLayerNode+=numSources;
204 mask=0x00000001<<numOfLayers;
205 for(i=0;i<numSources;i++){
206 snprintf(nm,BLOCK_SIZE,"Sink.%d",i);
210 ndid=firstLayerNode+ndoff;
211 ar=newArc(dg->node[ndid],nd);
214 ndid=firstLayerNode+ndoff;
215 ar=newArc(dg->node[ndid],nd);
221 static DGraph *buildWH(const char cls){
222 /* Nodes of the graph must be topologically sorted to avoid MPI deadlock. */
224 int numSources=num_sources,maxInDeg=4;
225 int numLayerNodes=numSources,firstLayerNode=0;
226 int totComparators=0;
227 int numPrevLayerNodes=numLayerNodes;
230 DGNode *nd=NULL,*source=NULL,*tmp=NULL,*snd=NULL;
234 snprintf(nm,BLOCK_SIZE,"DT_WH.%c",cls);
237 for(i=0;i<numSources;i++){
238 snprintf(nm,BLOCK_SIZE,"Sink.%d",i);
243 numPrevLayerNodes=numLayerNodes;
244 while(numLayerNodes>maxInDeg){
245 numLayerNodes=numLayerNodes/maxInDeg;
246 if(numLayerNodes*maxInDeg<numPrevLayerNodes)
248 for(i=0;i<numLayerNodes;i++){
249 snprintf(nm,BLOCK_SIZE,"Comparator.%d",totComparators);
252 id=AttachNode(dg,nd);
253 for(j=0;j<maxInDeg;j++){
255 if(sid>=numPrevLayerNodes)
257 snd=dg->node[firstLayerNode+sid];
258 ar=newArc(dg->node[id],snd);
262 firstLayerNode+=numPrevLayerNodes;
263 numPrevLayerNodes=numLayerNodes;
265 source=newNode((char*)"Source");
266 AttachNode(dg,source);
267 for(i=0;i<numPrevLayerNodes;i++){
268 nd=dg->node[firstLayerNode+i];
269 ar=newArc(source,nd);
273 for(i=0;i<dg->numNodes/2;i++){ /* Topological sorting */
275 dg->node[i]=dg->node[dg->numNodes-1-i];
277 dg->node[dg->numNodes-1-i]=tmp;
278 dg->node[dg->numNodes-1-i]->id=dg->numNodes-1-i;
283 static DGraph *buildBH(const char cls){
284 /* Nodes of the graph must be topologically sorted to avoid MPI deadlock.*/
286 int numSources=num_sources,maxInDeg=4;
287 int numLayerNodes=numSources,firstLayerNode=0;
289 DGNode *nd=NULL, *snd=NULL, *sink=NULL;
291 int totComparators=0;
292 int numPrevLayerNodes=numLayerNodes;
296 snprintf(nm,BLOCK_SIZE,"DT_BH.%c",cls);
299 for(i=0;i<numSources;i++){
300 snprintf(nm,BLOCK_SIZE,"Source.%d",i);
304 while(numLayerNodes>maxInDeg){
305 numLayerNodes=numLayerNodes/maxInDeg;
306 if(numLayerNodes*maxInDeg<numPrevLayerNodes)
308 for(i=0;i<numLayerNodes;i++){
309 snprintf(nm,BLOCK_SIZE,"Comparator.%d",totComparators);
312 id=AttachNode(dg,nd);
313 for(j=0;j<maxInDeg;j++){
315 if(sid>=numPrevLayerNodes)
317 snd=dg->node[firstLayerNode+sid];
318 ar=newArc(snd,dg->node[id]);
322 firstLayerNode+=numPrevLayerNodes;
323 numPrevLayerNodes=numLayerNodes;
325 sink=newNode((char*)"Sink");
327 for(i=0;i<numPrevLayerNodes;i++){
328 nd=dg->node[firstLayerNode+i];
340 static Arr *newArr(int len){
341 Arr *arr=(Arr *)malloc(sizeof(Arr)); //Arr *arr=(Arr *)SMPI_SHARED_MALLOC(sizeof(Arr));
343 arr->val=(double *)malloc(len*sizeof(double)); //arr->val=(double *)SMPI_SHARED_MALLOC(len*sizeof(double));
347 static double CheckVal(Arr *feat){
349 for(int i=0;i<feat->len;i++){
350 csum+=feat->val[i]*feat->val[i]/feat->len; /* The truncation does not work since result will be 0 for large len */
355 static int GetFNumDPar(int* mean, int* stdev){
361 static int GetFeatureNum(char *mbname,int id){
362 double tran=314159265.0;
364 double denom=randlc(&tran,&A);
366 int mean=num_samples,stdev=128;
368 GetFNumDPar(&mean,&stdev);
369 rtfs=ipowMod((int)(1/denom)*(int)cval,(long long int) (2*id+1),2*stdev);
370 if(rtfs<0) rtfs=-rtfs;
375 static Arr* RandomFeatures(char *bmname,int fdim,int id){
376 int len=GetFeatureNum(bmname,id)*fdim;
377 Arr* feat=newArr(len);
378 int nxg=2,nyg=2,nzg=2,nfg=5;
379 int nx=421,ny=419,nz=1427,nf=3527;
380 long long int expon=(len*(id+1))%3141592;
381 int seedx=ipowMod(nxg,expon,nx), seedy=ipowMod(nyg,expon,ny), seedz=ipowMod(nzg,expon,nz),seedf=ipowMod(nfg,expon,nf);
387 for(int i=0;i<len;i+=fdim){
388 seedx=(seedx*nxg)%nx;
389 seedy=(seedy*nyg)%ny;
390 seedz=(seedz*nzg)%nz;
391 seedf=(seedf*nfg)%nf;
393 feat->val[i+1]=seedy;
394 feat->val[i+2]=seedz;
395 feat->val[i+3]=seedf;
399 fprintf(stderr,"** RandomFeatures time in node %d = %f\n",id,timer_read(id+1));
404 static void Resample(Arr *a,int blen){
405 long long int i=0,j=0,jlo=0,jhi=0;
407 double *nval=(double *)malloc(blen*sizeof(double));
408 //double *nval=(double *)SMPI_SHARED_MALLOC(blen*sizeof(double));
409 for(i=0;i<blen;i++) nval[i]=0.0;
410 for(i=1;i<a->len-1;i++){
411 jlo=(int)(0.5*(2*i-1)*(blen/a->len));
412 jhi=(int)(0.5*(2*i+1)*(blen/a->len));
414 avval=a->val[i]/(jhi-jlo+1);
415 for(j=jlo;j<=jhi;j++){
420 nval[blen-1]=a->val[a->len-1];
421 free(a->val); //SMPI_SHARED_FREE(a->val);
427 static Arr* WindowFilter(Arr *a, Arr* b,int w){
429 double rms0=0.0,rms1=0.0,rmsm1=0.0;
430 double weight=((double) (w+1))/(w+2);
437 if(a->len<b->len) Resample(a,b->len);
438 if(a->len>b->len) Resample(b,a->len);
439 for(i=fielddim;i<a->len-fielddim;i+=fielddim){
440 rms0=(a->val[i]-b->val[i])*(a->val[i]-b->val[i]) +(a->val[i+1]-b->val[i+1])*(a->val[i+1]-b->val[i+1])
441 +(a->val[i+2]-b->val[i+2])*(a->val[i+2]-b->val[i+2]) +(a->val[i+3]-b->val[i+3])*(a->val[i+3]-b->val[i+3]);
443 rms1=(a->val[j]-b->val[j])*(a->val[j]-b->val[j]) +(a->val[j+1]-b->val[j+1])*(a->val[j+1]-b->val[j+1])
444 +(a->val[j+2]-b->val[j+2])*(a->val[j+2]-b->val[j+2]) +(a->val[j+3]-b->val[j+3])*(a->val[j+3]-b->val[j+3]);
446 rmsm1=(a->val[j]-b->val[j])*(a->val[j]-b->val[j]) +(a->val[j+1]-b->val[j+1])*(a->val[j+1]-b->val[j+1])
447 +(a->val[j+2]-b->val[j+2])*(a->val[j+2]-b->val[j+2]) +(a->val[j+3]-b->val[j+3])*(a->val[j+3]-b->val[j+3]);
456 a->val[i]=weight*b->val[i];
457 a->val[i+1]=weight*b->val[i+1];
458 a->val[i+2]=weight*b->val[i+2];
459 a->val[i+3]=weight*b->val[i+3];
462 a->val[i]=weight*b->val[j];
463 a->val[i+1]=weight*b->val[j+1];
464 a->val[i+2]=weight*b->val[j+2];
465 a->val[i+3]=weight*b->val[j+3];
466 }else { /*if(k==-1)*/
468 a->val[i]=weight*b->val[j];
469 a->val[i+1]=weight*b->val[j+1];
470 a->val[i+2]=weight*b->val[j+2];
471 a->val[i+3]=weight*b->val[j+3];
476 fprintf(stderr,"** WindowFilter time in node %d = %f\n",(w-1),timer_read(w));
481 static int SendResults(DGraph *dg,DGNode *nd,Arr *feat){
487 TRACE_smpi_set_category ("SendResults");
488 for(i=0;i<nd->outDegree;i++){
493 if(head->address!=nd->address){
494 MPI_Send(&feat->len,1,MPI_INT,head->address,tag,MPI_COMM_WORLD);
495 MPI_Send(feat->val,feat->len,MPI_DOUBLE,head->address,tag,MPI_COMM_WORLD);
499 TRACE_smpi_set_category (NULL);
503 static Arr* CombineStreams(DGraph *dg,DGNode *nd){
504 Arr *resfeat=newArr(num_samples*fielddim);
509 Arr *feat=NULL,*featp=NULL;
511 if(nd->inDegree==0) return NULL;
512 for(i=0;i<nd->inDegree;i++){
516 if(tail->address!=nd->address){
519 MPI_Recv(&len,1,MPI_INT,tail->address,tag,MPI_COMM_WORLD,&status);
521 MPI_Recv(feat->val,feat->len,MPI_DOUBLE,tail->address,tag,MPI_COMM_WORLD,&status);
522 resfeat=WindowFilter(resfeat,feat,nd->id);
523 free(feat);//SMPI_SHARED_FREE(feat);
525 featp=(Arr *)tail->feat;
526 feat=newArr(featp->len);
527 memcpy(feat->val,featp->val,featp->len*sizeof(double));
528 resfeat=WindowFilter(resfeat,feat,nd->id);
529 free(feat);//SMPI_SHARED_FREE(feat);
533 for(i=0;i<resfeat->len;i++)
534 resfeat->val[i]=((int)resfeat->val[i])/nd->inDegree;
539 static double Reduce(Arr *a,int w){
545 retv=(int)(w*CheckVal(a));/* The casting needed for node and array dependent verifcation */
548 fprintf(stderr,"** Reduce time in node %d = %f\n",(w-1),timer_read(w));
553 static double ReduceStreams(DGraph *dg,DGNode *nd){
561 TRACE_smpi_set_category ("ReduceStreams");
563 for(i=0;i<nd->inDegree;i++){
565 if(ar->head!=nd) continue;
567 if(tail->address!=nd->address){
571 MPI_Recv(&len,1,MPI_INT,tail->address,tag,MPI_COMM_WORLD,&status);
573 MPI_Recv(feat->val,feat->len,MPI_DOUBLE,tail->address,tag,MPI_COMM_WORLD,&status);
574 csum+=Reduce(feat,(nd->id+1));
575 free(feat);//SMPI_SHARED_FREE(feat);
577 csum+=Reduce(tail->feat,(nd->id+1));
580 if(nd->inDegree>0)csum=(((long long int)csum)/nd->inDegree);
581 retv=(nd->id+1)*csum;
585 static int ProcessNodes(DGraph *dg,int me){
588 int i=0,verified=0,tag;
593 TRACE_smpi_set_category ("ProcessNodes");
595 for(i=0;i<dg->numNodes;i++){
597 if(nd->address!=me) continue;
598 if(strstr(nd->name,"Source")){
599 nd->feat=RandomFeatures(dg->name,fielddim,nd->id);
600 SendResults(dg,nd,nd->feat);
601 }else if(strstr(nd->name,"Sink")){
602 chksum=ReduceStreams(dg,nd);
603 tag=dg->numArcs+nd->id; /* make these to avoid clash with arc tags */
604 MPI_Send(&chksum,1,MPI_DOUBLE,0,tag,MPI_COMM_WORLD);
606 feat=CombineStreams(dg,nd);
607 SendResults(dg,nd,feat);
611 TRACE_smpi_set_category ("ProcessNodes");
613 if(me==0){ /* Report node */
616 for(i=0;i<dg->numNodes;i++){
618 if(!strstr(nd->name,"Sink")) continue;
619 tag=dg->numArcs+nd->id; /* make these to avoid clash with arc tags */
620 MPI_Recv(&rchksum,1,MPI_DOUBLE,nd->address,tag,MPI_COMM_WORLD,&status);
623 verified=verify(dg->name,chksum);
628 int main(int argc,char **argv ){
629 int my_rank,comm_size;
632 int verified=0, featnum=0;
633 double bytes_sent=2.0,tot_time=0.0;
635 MPI_Init( &argc, &argv );
636 MPI_Comm_rank( MPI_COMM_WORLD, &my_rank );
637 MPI_Comm_size( MPI_COMM_WORLD, &comm_size );
639 TRACE_smpi_set_category ("begin");
640 get_info(argc, argv, &nprocs, &class);
641 check_info(DT, nprocs, class);
643 if (class == 'S') { num_samples=1728; deviation=128; num_sources=4; }
644 else if (class == 'W') { num_samples=1728*8; deviation=128*2; num_sources=4*2; }
645 else if (class == 'A') { num_samples=1728*64; deviation=128*4; num_sources=4*4; }
646 else if (class == 'B') { num_samples=1728*512; deviation=128*8; num_sources=4*8; }
647 else if (class == 'C') { num_samples=1728*4096; deviation=128*16; num_sources=4*16; }
648 else if (class == 'D') { num_samples=1728*4096*8; deviation=128*32; num_sources=4*32; }
650 printf("setparams: Internal error: invalid class type %c\n", class);
654 if(argc!=4 || (strncmp(argv[3],"BH",2)!=0 && strncmp(argv[3],"WH",2)!=0 && strncmp(argv[3],"SH",2)!=0)){
656 fprintf(stderr,"** Usage: mpirun -np N ../bin/dt.S GraphName\n");
657 fprintf(stderr,"** Where \n - N is integer number of MPI processes\n");
658 fprintf(stderr," - S is the class S, W, or A \n");
659 fprintf(stderr," - GraphName is the communication graph name BH, WH, or SH.\n");
660 fprintf(stderr," - the number of MPI processes N should not be be less than \n");
661 fprintf(stderr," the number of nodes in the graph\n");
667 if(strncmp(argv[3],"BH",2)==0){
669 }else if(strncmp(argv[3],"WH",2)==0){
671 }else if(strncmp(argv[3],"SH",2)==0){
675 if(timer_on != 0 && dg && dg->numNodes+1>timers_tot){
678 fprintf(stderr,"Not enough timers. Node timeing is off. \n");
680 if(dg && dg->numNodes>comm_size){
682 fprintf(stderr,"** The number of MPI processes should not be less than \n");
683 fprintf(stderr,"** the number of nodes in the graph\n");
684 fprintf(stderr,"** Number of MPI processes = %d\n",comm_size);
685 fprintf(stderr,"** Number nodes in the graph = %d\n",dg->numNodes);
690 for(i=0;i<dg->numNodes;i++){
691 dg->node[i]->address=i;
694 printf( "\n\n NAS Parallel Benchmarks 3.3 -- DT Benchmark\n\n" );
699 verified=ProcessNodes(dg,my_rank);
700 TRACE_smpi_set_category ("end");
702 featnum=num_samples*fielddim;
703 bytes_sent=featnum*dg->numArcs;
707 tot_time=timer_read(0);
708 c_print_results( dg->name, class, featnum, 0, 0, dg->numNodes, 0, comm_size, tot_time, bytes_sent/tot_time,
709 "bytes transmitted", verified);