evaluate_set_in_parallel.cpp
Go to the documentation of this file.
1 // Copyright 2019, University of Maryland and the MANGO development team.
2 //
3 // This file is part of MANGO.
4 //
5 // MANGO is free software: you can redistribute it and/or modify it
6 // under the terms of the GNU Lesser General Public License as
7 // published by the Free Software Foundation, either version 3 of the
8 // License, or (at your option) any later version.
9 //
10 // MANGO is distributed in the hope that it will be useful, but
11 // WITHOUT ANY WARRANTY; without even the implied warranty of
12 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 // Lesser General Public License for more details.
14 //
15 // You should have received a copy of the GNU Lesser General Public
16 // License along with MANGO. If not, see
17 // <https://www.gnu.org/licenses/>.
18 
19 #include <iostream>
20 #include <iomanip>
21 #include <cstring>
22 #include <cmath>
23 #include <ctime>
24 #include "mpi.h"
25 #include "mango.hpp"
26 #include "Least_squares_solver.hpp"
27 
28 void mango::Solver::evaluate_set_in_parallel(vector_function_type vector_function, int N_terms, int N_set, double* state_vectors, double* results, bool* failures) {
29 
30  // state_vectors should have been allocated with size N_parameters * N_set.
31  // results should have been allocated with size N_terms * N_set.
32  // failures should have been allocated with size N_set.
33 
34  // All group leaders (but not workers) should call this subroutine.
35 
36  // To simplify code in this file, make some copies of variables.
37  MPI_Comm mpi_comm_group_leaders = mpi_partition->get_comm_group_leaders();
38  bool proc0_world = mpi_partition->get_proc0_world();
39  int mpi_rank_world = mpi_partition->get_rank_world();
40  int mpi_rank_group_leaders = mpi_partition->get_rank_group_leaders();
41  int N_worker_groups = mpi_partition->get_N_worker_groups();
42 
43  int data;
44  int j_set, j_parameter;
45 
46  if (verbose > 0) std::cout << "Hello from evaluate_set_in_parallel from proc " << mpi_rank_world << std::endl;
47 
48  /*
49  if (proc0_world) {
50  // Tell the group leaders to start this subroutine
51  data = 1;
52  MPI_Bcast(&data,1,MPI_INT,0,mpi_comm_group_leaders);
53  }
54  */
55 
56  // Make sure all procs agree on the input data.
57  MPI_Bcast(&N_set, 1, MPI_INT, 0, mpi_comm_group_leaders);
58  MPI_Bcast(&N_parameters, 1, MPI_INT, 0, mpi_comm_group_leaders);
59  MPI_Bcast(state_vectors, N_set*N_parameters, MPI_DOUBLE, 0, mpi_comm_group_leaders);
60  // We didn't actually need to send all the state vectors to all procs, only the subset of the state vectors
61  // that a given proc will actually be responsible for. But the communication time is negligible compared
62  // to evaluations of the objective function usually, and the approach here has the benefit of simplicity.
63 
64  // Each proc now evaluates the user function for its share of the set
65  int failed_int;
66  for(j_set=0; j_set < N_set; j_set++) {
67  if ((j_set % N_worker_groups) == mpi_rank_group_leaders) {
68  // Note that the use of &residual_functions[j_set*N_terms] in the next line means that j_terms must be the least-signficiant dimension in residual_functions.
69  vector_function(&N_parameters, &state_vectors[j_set*N_parameters], &N_terms, &results[j_set*N_terms], &failed_int, problem, user_data);
70  // I should record the failures here.
71  }
72  }
73 
74  // Send results back to the world master.
75  // Make sure not to reduce over MPI_COMM_WORLD, since then the residual function values will be multiplied by # of workers per worker group.
76  if (proc0_world) {
77  MPI_Reduce(MPI_IN_PLACE, results, N_set * N_terms, MPI_DOUBLE, MPI_SUM, 0, mpi_comm_group_leaders);
78  } else {
79  MPI_Reduce(results, results, N_set * N_terms, MPI_DOUBLE, MPI_SUM, 0, mpi_comm_group_leaders);
80  }
81 
82  // Record the results in order in the output file. At the same time, check for any best-yet values of the
83  // objective function.
84  double total_objective_function;
85  bool failed = false;
86  clock_t now;
87  if (proc0_world) {
88  for(j_set=0; j_set<N_set; j_set++) {
89  //current_residuals = &residual_functions[j_set*N_terms];
90  //total_objective_function = residuals_to_single_objective(current_residuals);
91  record_function_evaluation_pointer(&state_vectors[j_set*N_parameters], &results[j_set*N_terms], failed);
92  }
93  }
94 
95 }
Least_squares_solver.hpp
mango::Solver::record_function_evaluation_pointer
virtual void record_function_evaluation_pointer(const double *, double *, bool)
Definition: Solver.cpp:86
mango::Solver::problem
Problem * problem
Definition: Solver.hpp:66
mango::Solver::verbose
int verbose
Definition: Solver.hpp:63
mango::MPI_Partition::get_comm_group_leaders
MPI_Comm get_comm_group_leaders()
Get the MPI communicator for MANGO's "group leaders" communicator.
Definition: mpi_partition.cpp:51
mango::Solver::user_data
void * user_data
Definition: Solver.hpp:64
mango::vector_function_type
void(* vector_function_type)(int *N_parameters, const double *state_vector, int *N_terms, double *residuals, int *failed, mango::Problem *problem, void *user_data)
Format for the user-supplied subroutine that computes the residuals for a least-squares optimization ...
Definition: mango.hpp:439
mango::Solver::mpi_partition
MPI_Partition * mpi_partition
Definition: Solver.hpp:65
mango::Solver::evaluate_set_in_parallel
void evaluate_set_in_parallel(vector_function_type, int, int, double *, double *, bool *)
Definition: evaluate_set_in_parallel.cpp:28
mango.hpp
mango::MPI_Partition::get_rank_group_leaders
int get_rank_group_leaders()
Get the MPI rank of this processor in MANGO's "group leaders" communicator.
Definition: mpi_partition.cpp:76
mango::MPI_Partition::get_rank_world
int get_rank_world()
Get the MPI rank of this processor in MANGO's world communicator.
Definition: mpi_partition.cpp:66
mango::Solver::N_parameters
int N_parameters
Definition: Solver.hpp:43
mango::MPI_Partition::get_proc0_world
bool get_proc0_world()
Determine whether this MPI processor has rank 0 in MANGO's world communicator.
Definition: mpi_partition.cpp:56
mango::MPI_Partition::get_N_worker_groups
int get_N_worker_groups()
Returns the number of worker groups.
Definition: mpi_partition.cpp:101