SIONlib  1.7.7
Scalable I/O library for parallel access to task-local files
sion_ompi_internal_gen.c
Go to the documentation of this file.
1 /****************************************************************************
2 ** SIONLIB http://www.fz-juelich.de/jsc/sionlib **
3 *****************************************************************************
4 ** Copyright (c) 2008-2019 **
5 ** Forschungszentrum Juelich, Juelich Supercomputing Centre **
6 ** **
7 ** See the file COPYRIGHT in the package base directory for details **
8 ****************************************************************************/
9 
14 #define _XOPEN_SOURCE 700
15 
16 #include <stdlib.h>
17 #include <stdio.h>
18 #include <stdarg.h>
19 #include <string.h>
20 #include <time.h>
21 
22 #include "mpi.h"
23 #include "omp.h"
24 
25 #include <sys/time.h>
26 
27 #include <sys/types.h>
28 #include <fcntl.h>
29 
30 #include <unistd.h>
31 
32 #include "sion.h"
33 #include "sion_debug.h"
34 #include "sion_error_handler.h"
35 #include "sion_internal.h"
36 #include "sion_fd.h"
37 #include "sion_filedesc.h"
38 #include "sion_printts.h"
39 
40 #include "sion_ompi_internal_gen.h"
41 
42 #ifdef SION_OMPI
43 
44 #include "sion_ompi.h"
45 
46 int _sion_gen_info_from_gcomm_ompi(int numFiles, MPI_Comm gComm, int *filenumber, int *lrank, int *lsize)
47 {
48  int gtasks, gRank;
49  int rc = SION_SUCCESS;
50  int task_per_file;
51 
52 
53  MPI_Comm_size(gComm, &gtasks);
54  MPI_Comm_rank(gComm, &gRank);
55  DPRINTFP((1, "_sion_get_info_from_splitted_comm_mpi", gRank, "enter: gcomm: %d of %d, numfiles=%d\n",
56  gRank,gtasks,numFiles));
57 
58  if (gtasks < numFiles) {
59  return(_sion_errorprint_ompi(SION_NOT_SUCCESS,_SION_ERROR_RETURN,
60  "_sion_gen_info_from_gcomm_mpi: Number of files(%d) cannot be bigger the the number of tasks(%d), aborting...\n",
61  numFiles, gtasks));
62  }
63 
64  task_per_file = gtasks / numFiles;
65 
66  /* remaining tasks are added to last communicator */
67  if (gRank >= (numFiles * task_per_file)) {
68  *filenumber = numFiles - 1;
69  }
70  else {
71  *filenumber = gRank / task_per_file;
72  }
73 
74  if(*filenumber == numFiles - 1) {
75  *lrank=gRank-(numFiles - 1)*task_per_file;
76  *lsize=gtasks-(numFiles - 1)*task_per_file;
77  } else {
78  *lrank=gRank%task_per_file;
79  *lsize=task_per_file;
80  }
81 
82  DPRINTFP((1, "_sion_get_info_from_splitted_comm_mpi", gRank, "Global communicator divided in %d local communicators (%d: %d of %d)\n",
83  numFiles,*filenumber,*lrank,*lsize));
84 
85  return (rc);
86 }
87 
88 int _sion_get_info_from_splitted_comm_ompi(MPI_Comm gComm, MPI_Comm lComm, int *numComm, int *CommNumber, int *lrank, int *lsize) {
89  int gSize, gRank, lSize, lRank;
90  int *allRanks=NULL, *allSizes=NULL, i, ntasks;
91  int ncomms, color;
92  int rc = SION_SUCCESS;
93  MPI_Status status;
94 
95  MPI_Comm_size(gComm, &gSize);
96  MPI_Comm_rank(gComm, &gRank);
97 
98  if (lComm != MPI_COMM_NULL) {
99  MPI_Comm_size(lComm, &lSize);
100  MPI_Comm_rank(lComm, &lRank);
101  }
102  else {
103  lSize = gSize;
104  lRank = gRank;
105  }
106 
107  DPRINTFP((32, "_sion_get_info_from_splitted_comm_mpi", gRank, "lRank: %d\n", lRank));
108 
109  if (gRank == 0) {
110 
111  allRanks = (int *) malloc(gSize * sizeof(int));
112  allSizes = (int *) malloc(gSize * sizeof(int));
113  if ((allRanks == NULL) || (allSizes == NULL)) {
114  free(allRanks);
115  free(allSizes);
116  return(_sion_errorprint_ompi(SION_NOT_SUCCESS,_SION_ERROR_RETURN,"_sion_get_info_from_splitted_comm_mpi: Cannot allocate temp arrays of size %lu\n", (unsigned long) gSize * sizeof(int)));
117  }
118  }
119 
120  /* Gather all local ranks to task 0 for counting.. */
121  MPI_Gather(&lRank, 1, MPI_INT, allRanks, 1, MPI_INT, 0, gComm);
122  MPI_Gather(&lSize, 1, MPI_INT, allSizes, 1, MPI_INT, 0, gComm);
123 
124  ncomms = 0;
125  ntasks = 0;
126  if (gRank == 0) {
127  for (i = 0; i < gSize; i++) {
128  if (allRanks[i] == 0) {
129  /* Use the current number of the communicator for the file suffix!! */
130  if (i != 0) {
131  MPI_Send(&ncomms, 1, MPI_INT, i, i, gComm);
132  DPRINTFP((32, "sion_paropen_comms_mpi", gRank, "Sent ncomms=%05d to %d with TAG %d\n", ncomms, i, i));
133  }
134  else {
135  /* it's my self */
136  color = ncomms;
137  }
138  ncomms++;
139  ntasks += allSizes[i];
140  }
141  }
142  }
143  /* Recv the num of communicator from global 0 =>used for the suffix */
144  if ((lRank == 0) && (gRank != 0)) {
145  MPI_Recv(&color, 1, MPI_INT, 0, gRank, gComm, &status);
146  DPRINTFP((32, "sion_paropen_comms_mpi", gRank, "Received ncomms=%05d from %d with TAG %d\n", color, status.MPI_SOURCE, status.MPI_TAG));
147 
148  }
149 
150  MPI_Bcast(&ncomms, 1, MPI_INT, 0, gComm);
151  MPI_Bcast(&ntasks, 1, MPI_INT, 0, gComm);
152 
153 
154  DPRINTFP((32, "_sion_get_info_from_splitted_comm_mpi", gRank, "#Comms=%05d #Tasks=%05d #TotalTasks=%05d\n", ncomms, ntasks, gSize));
155 
156  if (lComm != MPI_COMM_NULL) {
157  MPI_Bcast(&color, 1, MPI_INT, 0, lComm);
158  }
159 
160  if (gRank == 0) {
161  if(allRanks) free(allRanks);
162  if(allSizes) free(allSizes);
163  }
164  /* return parameters */
165  *numComm = ncomms;
166  *CommNumber = color;
167  *lrank = lRank;
168  *lsize = lSize;
169 
170  return (rc);
171 }
172 
173 int _sion_errorprint_ompi(int rc, int level, const char *format, ...)
174 {
175  int rank=-1;
176  va_list ap;
177 
178  int thread = omp_get_thread_num();
179  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
180 
181  va_start(ap, format);
182  rc=__sion_errorprint_vargs(rc, level, rank, thread, format, ap);
183  va_end(ap);
184 
185  return (rc);
186 }
187 
188 
189 /* Help Functions */
190 
191 int _sion_map_rank_ompi_to_mpi(int ompi_rank, int num_threads) {
192  int mpi_rank = 0;
193 
194  mpi_rank = (int) (ompi_rank / num_threads);
195 
196  return mpi_rank;
197 }
198 
199 int _sion_map_rank_ompi_to_thread_num(int ompi_rank, int num_threads) {
200  int thread_num = 0;
201 
202  thread_num = (int) (ompi_rank % num_threads);
203 
204  return thread_num;
205 }
206 
207 
208 int _sion_map_rank_mpi_to_ompi(int mpi_rank, int num_threads, int thread_num) {
209  int ompi_rank = 0;
210 
211  ompi_rank = mpi_rank * num_threads + thread_num;
212 
213  return ompi_rank;
214 }
215 
216 int _sion_get_size_ompi(int mpi_size, int num_threads) {
217  int ompi_size = 0;
218 
219  ompi_size = mpi_size * num_threads;
220 
221  return ompi_size;
222 }
223 
224 
225 /* end of ifdef OMPI */
226 #endif
Sion Time Stamp Header.