SIONlib  1.7.4
Scalable I/O library for parallel access to task-local files
partest_util.c
1 /****************************************************************************
2 ** SIONLIB http://www.fz-juelich.de/jsc/sionlib **
3 *****************************************************************************
4 ** Copyright (c) 2008-2019 **
5 ** Forschungszentrum Juelich, Juelich Supercomputing Centre **
6 ** **
7 ** See the file COPYRIGHT in the package base directory for details **
8 ****************************************************************************/
9 #define _XOPEN_SOURCE 700
10 
11 #include <stdio.h>
12 #include <stdlib.h>
13 #include <string.h>
14 #include <ctype.h>
15 #include <unistd.h>
16 #include <mpi.h>
17 #include <time.h>
18 #include <math.h>
19 
20 #include "partest_util.h"
21 
22 int barrier_after_start(MPI_Comm comm)
23 {
24  MPI_Barrier(comm);
25  return (1);
26 }
27 
28 int barrier_after_malloc(MPI_Comm comm)
29 {
30  MPI_Barrier(comm);
31  return (1);
32 }
33 
34 int barrier_after_open(MPI_Comm comm)
35 {
36  MPI_Barrier(comm);
37  return (1);
38 }
39 
40 int barrier_after_write(MPI_Comm comm)
41 {
42  MPI_Barrier(comm);
43  return (1);
44 }
45 
46 int barrier_after_read(MPI_Comm comm)
47 {
48  MPI_Barrier(comm);
49  return (1);
50 }
51 
52 int barrier_after_close(MPI_Comm comm)
53 {
54  MPI_Barrier(comm);
55  return (1);
56 }
57 
58 int barrier_before_unlink(MPI_Comm comm)
59 {
60  MPI_Barrier(comm);
61  return (1);
62 }
63 
64 int barrier_after_unlink(MPI_Comm comm)
65 {
66  MPI_Barrier(comm);
67  return (1);
68 }
69 
70 int write_timings ( char *set, int method, double *timings, sion_int64 *stats,
71  _test_communicators *communicators,
72  _test_options *options, int collective ) {
73  char cbuffer1[2*MAXCHARLEN];
74  char cbuffer2[2*MAXCHARLEN];
75  char cbuffer3[2*MAXCHARLEN];
76  char cbuffer4[2*MAXCHARLEN];
77 
78  if(method == TIMINGS_METHOD_WRITE) {
79  sprintf(cbuffer1,
80  "timings[%06d] T_STAT %s %s b=%10.2f MiB t=%10.6fs #chunks=%ld bw=%10.4f MB/s (%10.4f MiB/s) ion=%d nf=%d\n",
81  communicators->all_rank,set,"WRITE",
82  1.0 * stats[STATS_BYTES_WR_WROTE] / toMiB,
83  timings[TIMINGS_WR_TOTAL],
84  (long) stats[STATS_BYTES_WR_NUM_CHUNKS],
85  1.0 * stats[STATS_BYTES_WR_WROTE] / toMB / timings[TIMINGS_WR_TOTAL],
86  1.0 * stats[STATS_BYTES_WR_WROTE] / toMiB / timings[TIMINGS_WR_TOTAL],
87  communicators->ionode_number,
88  options->numfiles);
89 
90  sprintf(cbuffer2,
91  "timings[%06d] T_PHASE %s %s create=%.5fs, create_cls=%.5fs, open=%.5fs, write=%.5fs, close=%.5fs, tlog=%.4fs\n",
92  communicators->all_rank,set,"WRITE",
93  timings[TIMINGS_WR_CREATE],timings[TIMINGS_WR_CREATE_CLOSE],timings[TIMINGS_WR_OPEN],timings[TIMINGS_WR_WRITE],timings[TIMINGS_WR_CLOSE],timings[TIMINGS_MSGS]);
94 
95  sprintf(cbuffer3,
96  "timings[%06d] T_FILE_BARRIER %s %s open=%.4fs, write=%.4fs, close=%.4fs\n",
97  communicators->all_rank,set,"WRITE",
98  timings[TIMINGS_WR_OPEN_BARR_FILE],timings[TIMINGS_WR_WRITE_BARR_FILE],timings[TIMINGS_WR_CLOSE_BARR_FILE]);
99 
100  sprintf(cbuffer4,
101  "timings[%06d] T_GLOBAL_BARRIER %s %s create=%.4fs, create_cls=%.4fs, open=%.4fs, write=%.4fs, close=%.4fs\n",
102  communicators->all_rank,set,"WRITE",
103  timings[TIMINGS_WR_CREATE_BARR_OPEN],timings[TIMINGS_WR_CREATE_BARR_CLOSE],timings[TIMINGS_WR_OPEN_BARR_FILE],timings[TIMINGS_WR_WRITE_BARR_FILE],timings[TIMINGS_WR_CLOSE_BARR_FILE]);
104  } else {
105  sprintf(cbuffer1,
106  "timings[%06d] T_STAT %s %s b=%10.2f MiB t=%10.6fs #chunks=%ld bw=%10.4f MB/s (%10.4f MiB/s) ion=%d nf=%d\n",
107  communicators->all_rank,set,"READ",
108  1.0 * stats[STATS_BYTES_RD_READ] / toMiB,
109  timings[TIMINGS_RD_TOTAL],
110  (long) stats[STATS_BYTES_RD_NUM_CHUNKS],
111  1.0 * stats[STATS_BYTES_RD_READ] / toMB / timings[TIMINGS_RD_TOTAL],
112  1.0 * stats[STATS_BYTES_RD_READ] / toMiB / timings[TIMINGS_RD_TOTAL],
113  communicators->ionode_number,
114  options->numfiles);
115 
116  sprintf(cbuffer2,
117  "timings[%06d] T_PHASE %s %s open=%.5fs, read=%.5fs, close=%.5fs, tlog=%.4fs\n",
118  communicators->all_rank,set,"READ",
119  timings[TIMINGS_RD_OPEN],timings[TIMINGS_RD_READ],timings[TIMINGS_RD_CLOSE],timings[TIMINGS_MSGS]);
120 
121  sprintf(cbuffer3,
122  "timings[%06d] T_FILE_BARRIER %s %s open=%.4fs, read=%.4fs, close=%.4fs\n",
123  communicators->all_rank,set,"READ",
124  timings[TIMINGS_RD_OPEN_BARR_FILE],timings[TIMINGS_RD_READ_BARR_FILE],timings[TIMINGS_RD_CLOSE_BARR_FILE]);
125 
126  sprintf(cbuffer4,
127  "timings[%06d] T_GLOBAL_BARRIER %s %s open=%.4fs, read=%.4fs, close=%.4fs\n",
128  communicators->all_rank,set,"READ",
129  timings[TIMINGS_RD_OPEN_BARR_FILE],timings[TIMINGS_RD_READ_BARR_FILE],timings[TIMINGS_RD_CLOSE_BARR_FILE]);
130  }
131 
132  if(collective) {
133  collective_print_gather(cbuffer1, communicators->work);
134  collective_print_gather(cbuffer2, communicators->work);
135  collective_print_gather(cbuffer3, communicators->work);
136  collective_print_gather(cbuffer4, communicators->work);
137  } else {
138  fprintf(stderr,"%s", cbuffer1);
139  fprintf(stderr,"%s", cbuffer2);
140  fprintf(stderr,"%s", cbuffer3);
141  fprintf(stderr,"%s", cbuffer4);
142  }
143 
144  return(1);
145 }
146 
147 int collective_print_gather(char *cbuffer, MPI_Comm comm)
148 {
149  int rank, size, p;
150  char *lbuffer;
151 
152 
153  MPI_Comm_size(comm, &size);
154  MPI_Comm_rank(comm, &rank);
155 
156  if(size*MAXCHARLEN > 2*1024*1024) {
157  return(collective_print(cbuffer,comm));
158  }
159 
160  if(rank==0) {
161  lbuffer = (char *) malloc(MAXCHARLEN * size);
162  if(!lbuffer) {
163  fprintf(stderr,"could allocate buffer of size %d\n",MAXCHARLEN * size);
164  MPI_Abort(comm,1);
165  }
166  }
167  else lbuffer = NULL;
168 
169 
170 
171  MPI_Gather(cbuffer, MAXCHARLEN, MPI_CHAR, lbuffer, MAXCHARLEN, MPI_CHAR, 0, comm);
172 
173  if (rank == 0) {
174 
175  for (p = 0; p < size; p++) {
176  fprintf(stderr, "%s", lbuffer + p * MAXCHARLEN);
177  }
178  }
179 
180  if(rank==0) free(lbuffer);
181 
182  return (1);
183 }
184 
185 int collective_print(char *cbuffer, MPI_Comm comm)
186 {
187  int rank, size, p;
188  int dummy = 0;
189  char lbuffer[MAXCHARLEN];
190  MPI_Status status;
191 
192  MPI_Comm_size(comm, &size);
193  MPI_Comm_rank(comm, &rank);
194  if (rank == 0) {
195  fprintf(stderr, "%s", cbuffer);
196 
197  for (p = 1; p < size; p++) {
198  if (p > 0) {
199  MPI_Send(&dummy, 1, MPI_INT, p, WAKEUP, comm);
200  MPI_Recv(lbuffer, MAXCHARLEN, MPI_CHAR, p, COLPRINT, comm, &status);
201  if (strlen(lbuffer) > 0)
202  fprintf(stderr, "%s", lbuffer);
203  }
204  }
205 
206  }
207  else {
208 
209  MPI_Recv(&dummy, 1, MPI_INT, 0, WAKEUP, comm, &status);
210  MPI_Send(cbuffer, MAXCHARLEN, MPI_CHAR, 0, COLPRINT, comm);
211 
212  }
213  return (1);
214 }
215