SIONlib  1.7.1
Scalable I/O library for parallel access to task-local files
partest_util.c
1 /****************************************************************************
2 ** SIONLIB http://www.fz-juelich.de/jsc/sionlib **
3 *****************************************************************************
4 ** Copyright (c) 2008-2016 **
5 ** Forschungszentrum Juelich, Juelich Supercomputing Centre **
6 ** **
7 ** See the file COPYRIGHT in the package base directory for details **
8 ****************************************************************************/
9 #include <stdio.h>
10 #include <stdlib.h>
11 #include <string.h>
12 #include <ctype.h>
13 #include <unistd.h>
14 #include <mpi.h>
15 #include <time.h>
16 #include <math.h>
17 
18 #include "partest_util.h"
19 
20 int barrier_after_start(MPI_Comm comm)
21 {
22  MPI_Barrier(comm);
23  return (1);
24 }
25 
26 int barrier_after_malloc(MPI_Comm comm)
27 {
28  MPI_Barrier(comm);
29  return (1);
30 }
31 
32 int barrier_after_open(MPI_Comm comm)
33 {
34  MPI_Barrier(comm);
35  return (1);
36 }
37 
38 int barrier_after_write(MPI_Comm comm)
39 {
40  MPI_Barrier(comm);
41  return (1);
42 }
43 
44 int barrier_after_read(MPI_Comm comm)
45 {
46  MPI_Barrier(comm);
47  return (1);
48 }
49 
50 int barrier_after_close(MPI_Comm comm)
51 {
52  MPI_Barrier(comm);
53  return (1);
54 }
55 
56 int barrier_before_unlink(MPI_Comm comm)
57 {
58  MPI_Barrier(comm);
59  return (1);
60 }
61 
62 int barrier_after_unlink(MPI_Comm comm)
63 {
64  MPI_Barrier(comm);
65  return (1);
66 }
67 
68 int write_timings ( char *set, int method, double *timings, sion_int64 *stats,
69  _test_communicators *communicators,
70  _test_options *options, int collective ) {
71  char cbuffer1[2*MAXCHARLEN];
72  char cbuffer2[2*MAXCHARLEN];
73  char cbuffer3[2*MAXCHARLEN];
74  char cbuffer4[2*MAXCHARLEN];
75 
76  if(method == TIMINGS_METHOD_WRITE) {
77  sprintf(cbuffer1,
78  "timings[%06d] T_STAT %s %s b=%10.2f MiB t=%10.6fs #chunks=%ld bw=%10.4f MB/s (%10.4f MiB/s) ion=%d nf=%d\n",
79  communicators->all_rank,set,"WRITE",
80  1.0 * stats[STATS_BYTES_WR_WROTE] / toMiB,
81  timings[TIMINGS_WR_TOTAL],
82  (long) stats[STATS_BYTES_WR_NUM_CHUNKS],
83  1.0 * stats[STATS_BYTES_WR_WROTE] / toMB / timings[TIMINGS_WR_TOTAL],
84  1.0 * stats[STATS_BYTES_WR_WROTE] / toMiB / timings[TIMINGS_WR_TOTAL],
85  communicators->ionode_number,
86  options->numfiles);
87 
88  sprintf(cbuffer2,
89  "timings[%06d] T_PHASE %s %s create=%.5fs, create_cls=%.5fs, open=%.5fs, write=%.5fs, close=%.5fs, tlog=%.4fs\n",
90  communicators->all_rank,set,"WRITE",
91  timings[TIMINGS_WR_CREATE],timings[TIMINGS_WR_CREATE_CLOSE],timings[TIMINGS_WR_OPEN],timings[TIMINGS_WR_WRITE],timings[TIMINGS_WR_CLOSE],timings[TIMINGS_MSGS]);
92 
93  sprintf(cbuffer3,
94  "timings[%06d] T_FILE_BARRIER %s %s open=%.4fs, write=%.4fs, close=%.4fs\n",
95  communicators->all_rank,set,"WRITE",
96  timings[TIMINGS_WR_OPEN_BARR_FILE],timings[TIMINGS_WR_WRITE_BARR_FILE],timings[TIMINGS_WR_CLOSE_BARR_FILE]);
97 
98  sprintf(cbuffer4,
99  "timings[%06d] T_GLOBAL_BARRIER %s %s create=%.4fs, create_cls=%.4fs, open=%.4fs, write=%.4fs, close=%.4fs\n",
100  communicators->all_rank,set,"WRITE",
101  timings[TIMINGS_WR_CREATE_BARR_OPEN],timings[TIMINGS_WR_CREATE_BARR_CLOSE],timings[TIMINGS_WR_OPEN_BARR_FILE],timings[TIMINGS_WR_WRITE_BARR_FILE],timings[TIMINGS_WR_CLOSE_BARR_FILE]);
102  } else {
103  sprintf(cbuffer1,
104  "timings[%06d] T_STAT %s %s b=%10.2f MiB t=%10.6fs #chunks=%ld bw=%10.4f MB/s (%10.4f MiB/s) ion=%d nf=%d\n",
105  communicators->all_rank,set,"READ",
106  1.0 * stats[STATS_BYTES_RD_READ] / toMiB,
107  timings[TIMINGS_RD_TOTAL],
108  (long) stats[STATS_BYTES_RD_NUM_CHUNKS],
109  1.0 * stats[STATS_BYTES_RD_READ] / toMB / timings[TIMINGS_RD_TOTAL],
110  1.0 * stats[STATS_BYTES_RD_READ] / toMiB / timings[TIMINGS_RD_TOTAL],
111  communicators->ionode_number,
112  options->numfiles);
113 
114  sprintf(cbuffer2,
115  "timings[%06d] T_PHASE %s %s open=%.5fs, read=%.5fs, close=%.5fs, tlog=%.4fs\n",
116  communicators->all_rank,set,"READ",
117  timings[TIMINGS_RD_OPEN],timings[TIMINGS_RD_READ],timings[TIMINGS_RD_CLOSE],timings[TIMINGS_MSGS]);
118 
119  sprintf(cbuffer3,
120  "timings[%06d] T_FILE_BARRIER %s %s open=%.4fs, read=%.4fs, close=%.4fs\n",
121  communicators->all_rank,set,"READ",
122  timings[TIMINGS_RD_OPEN_BARR_FILE],timings[TIMINGS_RD_READ_BARR_FILE],timings[TIMINGS_RD_CLOSE_BARR_FILE]);
123 
124  sprintf(cbuffer4,
125  "timings[%06d] T_GLOBAL_BARRIER %s %s open=%.4fs, read=%.4fs, close=%.4fs\n",
126  communicators->all_rank,set,"READ",
127  timings[TIMINGS_RD_OPEN_BARR_FILE],timings[TIMINGS_RD_READ_BARR_FILE],timings[TIMINGS_RD_CLOSE_BARR_FILE]);
128  }
129 
130  if(collective) {
131  collective_print_gather(cbuffer1, communicators->work);
132  collective_print_gather(cbuffer2, communicators->work);
133  collective_print_gather(cbuffer3, communicators->work);
134  collective_print_gather(cbuffer4, communicators->work);
135  } else {
136  fprintf(stderr,"%s", cbuffer1);
137  fprintf(stderr,"%s", cbuffer2);
138  fprintf(stderr,"%s", cbuffer3);
139  fprintf(stderr,"%s", cbuffer4);
140  }
141 
142  return(1);
143 }
144 
145 int collective_print_gather(char *cbuffer, MPI_Comm comm)
146 {
147  int rank, size, p;
148  char *lbuffer;
149 
150 
151  MPI_Comm_size(comm, &size);
152  MPI_Comm_rank(comm, &rank);
153 
154  if(size*MAXCHARLEN > 2*1024*1024) {
155  return(collective_print(cbuffer,comm));
156  }
157 
158  if(rank==0) {
159  lbuffer = (char *) malloc(MAXCHARLEN * size);
160  if(!lbuffer) {
161  fprintf(stderr,"could allocate buffer of size %d\n",MAXCHARLEN * size);
162  MPI_Abort(comm,1);
163  }
164  }
165  else lbuffer = NULL;
166 
167 
168 
169  MPI_Gather(cbuffer, MAXCHARLEN, MPI_CHAR, lbuffer, MAXCHARLEN, MPI_CHAR, 0, comm);
170 
171  if (rank == 0) {
172 
173  for (p = 0; p < size; p++) {
174  fprintf(stderr, "%s", lbuffer + p * MAXCHARLEN);
175  }
176  }
177 
178  if(rank==0) free(lbuffer);
179 
180  return (1);
181 }
182 
183 int collective_print(char *cbuffer, MPI_Comm comm)
184 {
185  int rank, size, p;
186  int dummy = 0;
187  char lbuffer[MAXCHARLEN];
188  MPI_Status status;
189 
190  MPI_Comm_size(comm, &size);
191  MPI_Comm_rank(comm, &rank);
192  if (rank == 0) {
193  fprintf(stderr, "%s", cbuffer);
194 
195  for (p = 1; p < size; p++) {
196  if (p > 0) {
197  MPI_Send(&dummy, 1, MPI_INT, p, WAKEUP, comm);
198  MPI_Recv(lbuffer, MAXCHARLEN, MPI_CHAR, p, COLPRINT, comm, &status);
199  if (strlen(lbuffer) > 0)
200  fprintf(stderr, "%s", lbuffer);
201  }
202  }
203 
204  }
205  else {
206 
207  MPI_Recv(&dummy, 1, MPI_INT, 0, WAKEUP, comm, &status);
208  MPI_Send(cbuffer, MAXCHARLEN, MPI_CHAR, 0, COLPRINT, comm);
209 
210  }
211  return (1);
212 }
213