33 TEST_CASE(
"MPI_Partition: Verify that calling getters before initialization causes exceptions",
"[mpi_partition]") {
52 TEST_CASE(
"MPI_Partition.init(): Verify that all the properties make sense when N_worker_groups=1.",
"[mpi_partition]") {
53 int rank_world, N_procs_world;
54 MPI_Comm_rank(MPI_COMM_WORLD, &rank_world);
55 MPI_Comm_size(MPI_COMM_WORLD, &N_procs_world);
59 mp.
init(MPI_COMM_WORLD);
76 TEST_CASE(
"MPI_Partition.init(): Verify that all the properties make sense when N_worker_groups=N_procs_world or more, or when N_worker_groups <= 0.",
"[mpi_partition]") {
79 int rank_world, N_procs_world;
80 MPI_Comm_rank(MPI_COMM_WORLD, &rank_world);
81 MPI_Comm_size(MPI_COMM_WORLD, &N_procs_world);
85 auto mode = GENERATE(range(0,2));
86 auto shift = GENERATE(range(0,5));
87 int N_worker_groups_requested;
89 N_worker_groups_requested = N_procs_world + shift;
91 N_worker_groups_requested = -shift;
95 mp.
init(MPI_COMM_WORLD);
97 CAPTURE(rank_world, N_worker_groups_requested, mode, shift);
116 TEST_CASE(
"MPI_Partition.init(): Verify that for any choice of N_worker_groups, parameters are within the expected ranges.",
"[mpi_partition]") {
117 int rank_world, N_procs_world;
118 MPI_Comm_rank(MPI_COMM_WORLD, &rank_world);
119 MPI_Comm_size(MPI_COMM_WORLD, &N_procs_world);
122 for (
int N_worker_groups_requested = -1; N_worker_groups_requested <= N_procs_world + 2; N_worker_groups_requested++) {
125 mp.
init(MPI_COMM_WORLD);
127 CAPTURE(rank_world, N_worker_groups_requested);
143 if (proc0_worker_groups) {
157 MPI_Reduce(MPI_IN_PLACE, &N_procs_worker_groups_max, 1, MPI_INT, MPI_MAX, 0, MPI_COMM_WORLD);
158 MPI_Reduce(MPI_IN_PLACE, &N_procs_worker_groups_min, 1, MPI_INT, MPI_MIN, 0, MPI_COMM_WORLD);
159 CHECK(N_procs_worker_groups_max - N_procs_worker_groups_min <= 1);
160 CHECK(N_procs_worker_groups_max - N_procs_worker_groups_min >= 0);
162 MPI_Reduce(&N_procs_worker_groups_max, NULL, 1, MPI_INT, MPI_MAX, 0, MPI_COMM_WORLD);
163 MPI_Reduce(&N_procs_worker_groups_min, NULL, 1, MPI_INT, MPI_MIN, 0, MPI_COMM_WORLD);
169 TEST_CASE(
"MPI_Partition.set_custom(): Verify that parameters are correct for the case in which every proc is a group leader.",
"[mpi_partition]") {
170 int rank_world, N_procs_world;
171 MPI_Comm_rank(MPI_COMM_WORLD, &rank_world);
172 MPI_Comm_size(MPI_COMM_WORLD, &N_procs_world);
175 mp.
set_custom(MPI_COMM_WORLD, MPI_COMM_WORLD, MPI_COMM_SELF);
193 TEST_CASE(
"MPI_Partition.set_custom(): Verify that parameters are correct for the case in which there is a single worker group.",
"[mpi_partition]") {
194 int rank_world, N_procs_world;
195 MPI_Comm_rank(MPI_COMM_WORLD, &rank_world);
196 MPI_Comm_size(MPI_COMM_WORLD, &N_procs_world);
202 color = MPI_UNDEFINED;
204 MPI_Comm mpi_comm_proc0;
205 MPI_Comm_split(MPI_COMM_WORLD, color, rank_world, &mpi_comm_proc0);
208 mp.
set_custom(MPI_COMM_WORLD, mpi_comm_proc0, MPI_COMM_WORLD);
226 TEST_CASE(
"MPI_Partition.set_custom(): Verify that for any choice of N_worker_groups, if we generate communicators using MPI_Partition.init() and supply them as inputs to set_custom(), the results of set_custom() are identical to init().",
"[mpi_partition]") {
227 int rank_world, N_procs_world;
228 MPI_Comm_rank(MPI_COMM_WORLD, &rank_world);
229 MPI_Comm_size(MPI_COMM_WORLD, &N_procs_world);
232 for (
int N_worker_groups_requested = 1; N_worker_groups_requested <= N_procs_world; N_worker_groups_requested++) {
236 mp_init.
init(MPI_COMM_WORLD);