mpi_partition_tests.cpp
Go to the documentation of this file.
1 // Copyright 2019, University of Maryland and the MANGO development team.
2 //
3 // This file is part of MANGO.
4 //
5 // MANGO is free software: you can redistribute it and/or modify it
6 // under the terms of the GNU Lesser General Public License as
7 // published by the Free Software Foundation, either version 3 of the
8 // License, or (at your option) any later version.
9 //
10 // MANGO is distributed in the hope that it will be useful, but
11 // WITHOUT ANY WARRANTY; without even the implied warranty of
12 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 // Lesser General Public License for more details.
14 //
15 // You should have received a copy of the GNU Lesser General Public
16 // License along with MANGO. If not, see
17 // <https://www.gnu.org/licenses/>.
18 
19 #include "catch.hpp"
20 #include "mango.hpp"
21 
22 // Should I be using 'Generators' instead of putting CHECK's inside loops?
23 // https://www.reddit.com/r/cpp/comments/a6bdee/just_found_catch2_c_unit_test_framework_supports/
24 
25 /*
26 TEST_CASE("MPI_Partition.set_custom(): Verify that an exception is thrown if input communicators supplied do not make sense.","[mpi_partition]") {
27  mango::MPI_Partition mp;
28  CHECK_THROWS(mp.set_custom(MPI_COMM_WORLD, MPI_COMM_WORLD, MPI_COMM_WORLD));
29 }
30 */
31 
32 
33 TEST_CASE("MPI_Partition: Verify that calling getters before initialization causes exceptions","[mpi_partition]") {
35  CHECK_THROWS(mp.get_comm_world());
36  CHECK_THROWS(mp.get_comm_worker_groups());
37  CHECK_THROWS(mp.get_comm_group_leaders());
38 
39  CHECK_THROWS(mp.get_proc0_world());
40  CHECK_THROWS(mp.get_proc0_worker_groups());
41 
42  CHECK_THROWS(mp.get_rank_world());
43  CHECK_THROWS(mp.get_rank_worker_groups());
44  CHECK_THROWS(mp.get_rank_group_leaders());
45 
46  CHECK_THROWS(mp.get_N_procs_world());
47  CHECK_THROWS(mp.get_N_procs_worker_groups());
48  CHECK_THROWS(mp.get_N_procs_group_leaders());
49 }
50 
51 
52 TEST_CASE("MPI_Partition.init(): Verify that all the properties make sense when N_worker_groups=1.","[mpi_partition]") {
53  int rank_world, N_procs_world;
54  MPI_Comm_rank(MPI_COMM_WORLD, &rank_world);
55  MPI_Comm_size(MPI_COMM_WORLD, &N_procs_world);
56 
58  mp.set_N_worker_groups(1); // There is only 1 worker group, so only 1 group leader
59  mp.init(MPI_COMM_WORLD);
60 
61  CHECK(mp.get_N_worker_groups() == 1);
62 
63  CHECK(mp.get_rank_world() == rank_world);
64  CHECK(mp.get_rank_worker_groups() == rank_world);
65  CHECK(mp.get_rank_group_leaders() == (rank_world==0 ? 0 : -1));
66 
67  CHECK(mp.get_N_procs_world() == N_procs_world);
68  CHECK(mp.get_N_procs_worker_groups() == N_procs_world);
69  CHECK(mp.get_N_procs_group_leaders() == (rank_world==0 ? 1 : -1));
70 
71  CHECK(mp.get_proc0_world() == (rank_world==0));
72  CHECK(mp.get_proc0_worker_groups() == (rank_world==0));
73 }
74 
75 
76 TEST_CASE("MPI_Partition.init(): Verify that all the properties make sense when N_worker_groups=N_procs_world or more, or when N_worker_groups <= 0.","[mpi_partition]") {
77  // When N_worker_groups is <= 0, MPI_Partition.init() should set N_worker_groups equal to the number of available processors.
78 
79  int rank_world, N_procs_world;
80  MPI_Comm_rank(MPI_COMM_WORLD, &rank_world);
81  MPI_Comm_size(MPI_COMM_WORLD, &N_procs_world);
82 
83  //for (int mode = 0; mode < 2; mode++) {
84  //for (int shift = 0; shift<5; shift++) {
85  auto mode = GENERATE(range(0,2)); // so mode = 0 or 1.
86  auto shift = GENERATE(range(0,5)); // so shift = 0, 1, 2, 3, or 4.
87  int N_worker_groups_requested;
88  if (mode==0) {
89  N_worker_groups_requested = N_procs_world + shift;
90  } else {
91  N_worker_groups_requested = -shift; // so N_worker_groups is <= 0.
92  }
94  mp.set_N_worker_groups(N_worker_groups_requested);
95  mp.init(MPI_COMM_WORLD);
96 
97  CAPTURE(rank_world, N_worker_groups_requested, mode, shift);
98 
99  CHECK(mp.get_N_worker_groups() == N_procs_world);
100 
101  CHECK(mp.get_rank_world() == rank_world);
102  CHECK(mp.get_rank_worker_groups() == 0);
103  CHECK(mp.get_rank_group_leaders() == rank_world);
104 
105  CHECK(mp.get_N_procs_world() == N_procs_world);
106  CHECK(mp.get_N_procs_worker_groups() == 1);
107  CHECK(mp.get_N_procs_group_leaders() == N_procs_world);
108 
109  CHECK(mp.get_proc0_world() == (rank_world==0));
110  CHECK(mp.get_proc0_worker_groups());
111  //}
112  //}
113 }
114 
115 
116 TEST_CASE("MPI_Partition.init(): Verify that for any choice of N_worker_groups, parameters are within the expected ranges.","[mpi_partition]") {
117  int rank_world, N_procs_world;
118  MPI_Comm_rank(MPI_COMM_WORLD, &rank_world);
119  MPI_Comm_size(MPI_COMM_WORLD, &N_procs_world);
120 
121  // Try values of N_worker_groups somewhat outside the range [1, N_procs_world] too, just to make sure these cases are handled correctly:
122  for (int N_worker_groups_requested = -1; N_worker_groups_requested <= N_procs_world + 2; N_worker_groups_requested++) {
124  mp.set_N_worker_groups(N_worker_groups_requested);
125  mp.init(MPI_COMM_WORLD);
126 
127  CAPTURE(rank_world, N_worker_groups_requested);
128 
129  CHECK(mp.get_N_worker_groups() >= 1);
130  CHECK(mp.get_N_worker_groups() <= N_procs_world);
131 
132  CHECK(mp.get_rank_world() == rank_world);
133  CHECK(mp.get_rank_worker_groups() >= 0);
134  CHECK(mp.get_rank_worker_groups() < N_procs_world);
135 
136  CHECK(mp.get_N_procs_world() == N_procs_world);
137  CHECK(mp.get_N_procs_worker_groups() >= 1);
138  CHECK(mp.get_N_procs_worker_groups() <= N_procs_world);
139 
140  CHECK(mp.get_proc0_world() == (rank_world==0));
141 
142  bool proc0_worker_groups = mp.get_proc0_worker_groups();
143  if (proc0_worker_groups) {
144  CHECK(mp.get_rank_group_leaders() >= 0);
145  CHECK(mp.get_rank_group_leaders() < N_procs_world);
146  CHECK(mp.get_N_procs_group_leaders() >= 1);
147  CHECK(mp.get_N_procs_group_leaders() <= N_procs_world);
148  } else {
149  CHECK(mp.get_rank_group_leaders() == -1);
150  CHECK(mp.get_N_procs_group_leaders() == -1);
151  }
152 
153  // The sizes of the worker groups should be relatively even, with a difference of no more than 1 between the largest and the smallest.
154  int N_procs_worker_groups_max = mp.get_N_procs_worker_groups();
155  int N_procs_worker_groups_min = mp.get_N_procs_worker_groups();
156  if (rank_world==0) {
157  MPI_Reduce(MPI_IN_PLACE, &N_procs_worker_groups_max, 1, MPI_INT, MPI_MAX, 0, MPI_COMM_WORLD);
158  MPI_Reduce(MPI_IN_PLACE, &N_procs_worker_groups_min, 1, MPI_INT, MPI_MIN, 0, MPI_COMM_WORLD);
159  CHECK(N_procs_worker_groups_max - N_procs_worker_groups_min <= 1);
160  CHECK(N_procs_worker_groups_max - N_procs_worker_groups_min >= 0);
161  } else {
162  MPI_Reduce(&N_procs_worker_groups_max, NULL, 1, MPI_INT, MPI_MAX, 0, MPI_COMM_WORLD);
163  MPI_Reduce(&N_procs_worker_groups_min, NULL, 1, MPI_INT, MPI_MIN, 0, MPI_COMM_WORLD);
164  }
165  }
166 }
167 
168 
169 TEST_CASE("MPI_Partition.set_custom(): Verify that parameters are correct for the case in which every proc is a group leader.","[mpi_partition]") {
170  int rank_world, N_procs_world;
171  MPI_Comm_rank(MPI_COMM_WORLD, &rank_world);
172  MPI_Comm_size(MPI_COMM_WORLD, &N_procs_world);
173 
175  mp.set_custom(MPI_COMM_WORLD, MPI_COMM_WORLD, MPI_COMM_SELF);
176 
177  CAPTURE(rank_world);
178  CHECK(mp.get_N_worker_groups() == N_procs_world);
179 
180  CHECK(mp.get_rank_world() == rank_world);
181  CHECK(mp.get_rank_worker_groups() == 0);
182  CHECK(mp.get_rank_group_leaders() == rank_world);
183 
184  CHECK(mp.get_N_procs_world() == N_procs_world);
185  CHECK(mp.get_N_procs_worker_groups() == 1);
186  CHECK(mp.get_N_procs_group_leaders() == N_procs_world);
187 
188  CHECK(mp.get_proc0_world() == (rank_world==0));
189  CHECK(mp.get_proc0_worker_groups());
190 }
191 
192 
193 TEST_CASE("MPI_Partition.set_custom(): Verify that parameters are correct for the case in which there is a single worker group.","[mpi_partition]") {
194  int rank_world, N_procs_world;
195  MPI_Comm_rank(MPI_COMM_WORLD, &rank_world);
196  MPI_Comm_size(MPI_COMM_WORLD, &N_procs_world);
197 
198  int color;
199  if (rank_world==0) {
200  color = 0;
201  } else {
202  color = MPI_UNDEFINED;
203  }
204  MPI_Comm mpi_comm_proc0;
205  MPI_Comm_split(MPI_COMM_WORLD, color, rank_world, &mpi_comm_proc0); // key = rank_world. The key doesn't really matter here.
206 
208  mp.set_custom(MPI_COMM_WORLD, mpi_comm_proc0, MPI_COMM_WORLD);
209 
210  CAPTURE(rank_world);
211  CHECK(mp.get_N_worker_groups() == 1);
212 
213  CHECK(mp.get_rank_world() == rank_world);
214  CHECK(mp.get_rank_worker_groups() == rank_world);
215  CHECK(mp.get_rank_group_leaders() == (rank_world==0 ? 0 : -1));
216 
217  CHECK(mp.get_N_procs_world() == N_procs_world);
218  CHECK(mp.get_N_procs_worker_groups() == N_procs_world);
219  CHECK(mp.get_N_procs_group_leaders() == (rank_world==0 ? 1 : -1));
220 
221  CHECK(mp.get_proc0_world() == (rank_world==0));
222  CHECK(mp.get_proc0_worker_groups() == (rank_world==0));
223 }
224 
225 
226 TEST_CASE("MPI_Partition.set_custom(): Verify that for any choice of N_worker_groups, if we generate communicators using MPI_Partition.init() and supply them as inputs to set_custom(), the results of set_custom() are identical to init().","[mpi_partition]") {
227  int rank_world, N_procs_world;
228  MPI_Comm_rank(MPI_COMM_WORLD, &rank_world);
229  MPI_Comm_size(MPI_COMM_WORLD, &N_procs_world);
230  //CAPTURE(N_procs_world,rank_world);
231 
232  for (int N_worker_groups_requested = 1; N_worker_groups_requested <= N_procs_world; N_worker_groups_requested++) {
233  //auto N_worker_groups_requested = GENERATE( range(1, 5) ); // Really we need to only go up to N_procs_world rather than 5, but if I try this with GENERATE_COPY in catch2, I get an abort signal.
234  mango::MPI_Partition mp_init, mp_custom;
235  mp_init.set_N_worker_groups(N_worker_groups_requested);
236  mp_init.init(MPI_COMM_WORLD);
237  mp_custom.set_custom(MPI_COMM_WORLD, mp_init.get_comm_group_leaders(), mp_init.get_comm_worker_groups());
238 
239  CAPTURE(rank_world);
240  CHECK(mp_init.get_proc0_world() == mp_custom.get_proc0_world());
241  CHECK(mp_init.get_proc0_worker_groups() == mp_custom.get_proc0_worker_groups());
242 
243  CHECK(mp_init.get_rank_world() == mp_custom.get_rank_world());
244  CHECK(mp_init.get_rank_worker_groups() == mp_custom.get_rank_worker_groups());
245  CHECK(mp_init.get_rank_group_leaders() == mp_custom.get_rank_group_leaders());
246 
247  CHECK(mp_init.get_N_procs_world() == mp_custom.get_N_procs_world());
248  CHECK(mp_init.get_N_procs_worker_groups() == mp_custom.get_N_procs_worker_groups());
249  CHECK(mp_init.get_N_procs_group_leaders() == mp_custom.get_N_procs_group_leaders());
250 
251  CHECK(mp_init.get_worker_group() == mp_custom.get_worker_group());
252  CHECK(mp_init.get_N_worker_groups() == mp_custom.get_N_worker_groups());
253  CHECK(mp_init.get_N_worker_groups() == N_worker_groups_requested);
254  }
255 }
256 
257 
258 /*
259 TEST_CASE("minimal example") {
260  int N;
261  MPI_Comm_size(MPI_COMM_WORLD, &N);
262  // int N=4;
263  auto j = GENERATE_REF(range(1,N));
264  CHECK(j > 0);
265 }
266 */
267 
268 /*
269 TEST_CASE("minimal example 2") {
270  int N;
271  MPI_Comm_size(MPI_COMM_WORLD, &N);
272  for (int j = 1; j <= N; j++) {
273  CHECK(j > 0);
274  }
275 }
276 */
mango::MPI_Partition::set_N_worker_groups
void set_N_worker_groups(int N_worker_groups)
Set the number of worker groups to the given integer.
Definition: mpi_partition.cpp:106
mango::MPI_Partition::get_comm_world
MPI_Comm get_comm_world()
Get the MPI communicator for MANGO's world communicator.
Definition: mpi_partition.cpp:41
mango::MPI_Partition::get_proc0_worker_groups
bool get_proc0_worker_groups()
Determine whether this MPI processor has rank 0 in MANGO's "worker groups" communicator.
Definition: mpi_partition.cpp:61
mango::MPI_Partition::get_comm_group_leaders
MPI_Comm get_comm_group_leaders()
Get the MPI communicator for MANGO's "group leaders" communicator.
Definition: mpi_partition.cpp:51
mango::MPI_Partition::get_worker_group
int get_worker_group()
Returns an integer indicating the worker group to which this MPI process belongs.
Definition: mpi_partition.cpp:96
mango::MPI_Partition::set_custom
void set_custom(MPI_Comm comm_world, MPI_Comm comm_group_leaders, MPI_Comm comm_worker_groups)
Use a user-supplied partitioning of the MPI processes into worker groups.
Definition: mpi_partition_set_custom.cpp:24
mango.hpp
mango::MPI_Partition::get_rank_group_leaders
int get_rank_group_leaders()
Get the MPI rank of this processor in MANGO's "group leaders" communicator.
Definition: mpi_partition.cpp:76
mango::MPI_Partition::init
void init(MPI_Comm comm_world)
Divide up a given "world" communicator into worker groups.
Definition: mpi_partition_init.cpp:24
mango::MPI_Partition::get_N_procs_group_leaders
int get_N_procs_group_leaders()
Get the number of processors in MANGO's "group leaders" communicator.
Definition: mpi_partition.cpp:91
mango::MPI_Partition::get_N_procs_worker_groups
int get_N_procs_worker_groups()
Get the number of processors in MANGO's "worker groups" communicator.
Definition: mpi_partition.cpp:86
mango::MPI_Partition::get_rank_world
int get_rank_world()
Get the MPI rank of this processor in MANGO's world communicator.
Definition: mpi_partition.cpp:66
mango::MPI_Partition::get_proc0_world
bool get_proc0_world()
Determine whether this MPI processor has rank 0 in MANGO's world communicator.
Definition: mpi_partition.cpp:56
mango::MPI_Partition::get_N_worker_groups
int get_N_worker_groups()
Returns the number of worker groups.
Definition: mpi_partition.cpp:101
mango::MPI_Partition::get_rank_worker_groups
int get_rank_worker_groups()
Get the MPI rank of this processor in MANGO's "worker groups" communicator.
Definition: mpi_partition.cpp:71
mango::MPI_Partition::get_comm_worker_groups
MPI_Comm get_comm_worker_groups()
Get the MPI communicator for MANGO's "worker groups" communicator.
Definition: mpi_partition.cpp:46
mango::MPI_Partition::get_N_procs_world
int get_N_procs_world()
Get the number of processors in MANGO's world communicator.
Definition: mpi_partition.cpp:81
mango::MPI_Partition
A class for dividing the set of MPI processes into worker groups.
Definition: mango.hpp:195
TEST_CASE
TEST_CASE("MPI_Partition: Verify that calling getters before initialization causes exceptions","[mpi_partition]")
Definition: mpi_partition_tests.cpp:33