optimize_hopspack.cpp
Go to the documentation of this file.
1 // Copyright 2019, University of Maryland and the MANGO development team.
2 //
3 // This file is part of MANGO.
4 //
5 // MANGO is free software: you can redistribute it and/or modify it
6 // under the terms of the GNU Lesser General Public License as
7 // published by the Free Software Foundation, either version 3 of the
8 // License, or (at your option) any later version.
9 //
10 // MANGO is distributed in the hope that it will be useful, but
11 // WITHOUT ANY WARRANTY; without even the implied warranty of
12 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 // Lesser General Public License for more details.
14 //
15 // You should have received a copy of the GNU Lesser General Public
16 // License along with MANGO. If not, see
17 // <https://www.gnu.org/licenses/>.
18 
19 #include <iostream>
20 #include <iomanip>
21 #include <stdexcept>
22 #include <cmath>
23 #include "mango.hpp"
24 #include "Solver.hpp"
25 #include "Package_hopspack.hpp"
26 
27 #ifdef MANGO_HOPSPACK_AVAILABLE
28 #include "HOPSPACK_GenProcComm.hpp"
29 #include "HOPSPACK_ParameterList.hpp"
30 #include "HOPSPACK_Vector.hpp"
31 #include "HOPSPACK_ExecutorSerial.hpp"
32 #include "HOPSPACK_Hopspack.hpp"
33 #include "HOPSPACK_MangoEvaluator.hpp"
34 
35 int HOPSPACK_startProcComm(HOPSPACK::GenProcComm &, MPI_Comm);
36 int HOPSPACK_behaveAsMaster(HOPSPACK::GenProcComm &, HOPSPACK::ParameterList*, mango::Solver*);
37 int HOPSPACK_behaveAsWorker(const int, HOPSPACK::GenProcComm &, mango::Solver*);
38 #endif
39 
40 
42 #ifdef MANGO_HOPSPACK_AVAILABLE
43 
44  // Shorthand:
45  MPI_Comm mpi_comm_group_leaders = solver->mpi_partition->get_comm_group_leaders();
46  int mpi_rank_group_leaders = solver->mpi_partition->get_rank_group_leaders();
47  bool proc0 = (mpi_rank_group_leaders == 0);
48  int N_parameters = solver->N_parameters;
49 
50  if (solver->verbose > 0) std::cout << "Entering optimize_hopspack" << std::endl;
51 
52  // if (mpi_partition.get_N_procs_group_leaders() < 2) throw std::runtime_error("Hopspack requires at least 2 worker groups");
53 
54  // Initialize HOPSPACK's MPI machinery. This code is based on code in HOPSPACK_main_mpi.cpp.
55  using HOPSPACK::GenProcComm;
56  GenProcComm & cGPC = GenProcComm::getTheInstance();
57  int nProcRank = HOPSPACK_startProcComm(cGPC, solver->mpi_partition->get_comm_group_leaders());
58  if (nProcRank == -1) throw std::runtime_error("Error starting MPI in HOPSPACK");
59 
60  if (solver->verbose > 0) std::cout << "Done initializing HOPSPACK MPI variables." << std::endl;
61 
62  // Set HOPSPACK's parameters:
63  // See the HOPSPACK manual for the paramter sublists and parameters.
64  using HOPSPACK::ParameterList;
65  ParameterList hopspack_parameters;
66  if (proc0) {
67  HOPSPACK::ParameterList* cProblemParams = &(hopspack_parameters.getOrSetSublist ("Problem Definition"));
68  cProblemParams->setParameter("Objective Type","Minimize");
69  cProblemParams->setParameter("Number Unknowns", N_parameters);
70 
71  // If bound constraints are available, hopefully hopspack can automatically set the scaling. Otherwise just try a uniform scaling.
72  if (!solver->bound_constraints_set) {
73  HOPSPACK::Vector scaling(N_parameters, 1.0);
74  cProblemParams->setParameter("Scaling",scaling);
75  }
76 
77  // Set the initial condition:
78  HOPSPACK::Vector x0(N_parameters, 0.0);
79  for (int j=0; j<N_parameters; j++) x0[j] = solver->state_vector[j];
80  cProblemParams->setParameter("Initial X",x0);
81 
82  // Set bound constraints, if they are available:
83  if (solver->bound_constraints_set) {
84  HOPSPACK::Vector hopspack_lower_bounds(N_parameters, 0.0);
85  HOPSPACK::Vector hopspack_upper_bounds(N_parameters, 0.0);
86  // Eventually I may want to replace large values with HOPSPACK::dne() ?
87  for (int j=0; j<N_parameters; j++) hopspack_lower_bounds[j] = solver->lower_bounds[j];
88  for (int j=0; j<N_parameters; j++) hopspack_upper_bounds[j] = solver->upper_bounds[j];
89  cProblemParams->setParameter("Lower Bounds",hopspack_lower_bounds);
90  cProblemParams->setParameter("Upper Bounds",hopspack_upper_bounds);
91  }
92 
93  HOPSPACK::ParameterList* cMedParams = &(hopspack_parameters.getOrSetSublist ("Mediator"));
94  //cout << "MJL Number Processors:" << cMedParams->getParameter("Number Processors",-17) << endl;
95  cMedParams->setParameter("Number Processors",solver->mpi_partition->get_N_procs_group_leaders());
96  cMedParams->setParameter("Citizen Count",1);
97  cMedParams->setParameter("Maximum Evaluations",solver->max_function_evaluations);
98 
99  HOPSPACK::ParameterList* cCitizenParams = &(hopspack_parameters.getOrSetSublist ("Citizen 1"));
100  cCitizenParams->setParameter("Type","GSS");
101  cCitizenParams->setParameter("Step Tolerance",1e-5);
102 
103  if (solver->verbose > 0) {
104  cProblemParams->setParameter("Display",2);
105  cMedParams->setParameter("Display",2);
106  cCitizenParams->setParameter("Display",1);
107  } else {
108  cProblemParams->setParameter("Display",0);
109  cMedParams->setParameter("Display",0);
110  cCitizenParams->setParameter("Display",0);
111  }
112 
113  if (solver->verbose > 0) std::cout << "Done setting HOPSPACK parameters." << std::endl;
114  }
115 
116  // Run HOPSPACK
117  int nReturnValue;
118  if (solver->mpi_partition->get_N_worker_groups()==1) {
119  // Run serial version of hopspack.
120  // The code for this is based on HOPSPACK_main_serial.cpp.
121  if (solver->verbose > 0) std::cout << "Beginning serial version of HOPSPACK." << std::endl;
122  //HOPSPACK::MangoEvaluator* pEvaluator = HOPSPACK::MangoEvaluator::newInstance (hopspack_parameters.sublist ("Evaluator"));
123  HOPSPACK::MangoEvaluator* pEvaluator = new HOPSPACK::MangoEvaluator (hopspack_parameters, solver);
124  if (pEvaluator == NULL) throw std::runtime_error("Error constructing MangoEvaluator for serial HOPSPACK!");
125  HOPSPACK::ExecutorSerial* pExecutor = new HOPSPACK::ExecutorSerial (pEvaluator);
126  HOPSPACK::Hopspack optimizer (pExecutor);
127  if (optimizer.setInputParameters (hopspack_parameters, solver) == true) optimizer.solve();
128  delete pEvaluator;
129  delete pExecutor;
130  nReturnValue = 0;
131 
132  } else {
133  // Run MPI version of hopspack
134  if (proc0) {
135  if (solver->verbose > 0) std::cout << "Beginning MPI version of HOPSPACK." << std::endl;
136  nReturnValue = HOPSPACK_behaveAsMaster(cGPC, &hopspack_parameters, solver);
137  } else {
138  nReturnValue = HOPSPACK_behaveAsWorker(nProcRank, cGPC, solver);
139  }
140  }
141 
142  if (nReturnValue != 0) throw std::runtime_error("Error! HOPSPACK returned an error code.");
143 
144  /* 20200131 I'll think about what to do with this next bit of code later.
145  // If we ran the MPI version of hopspack, here we get the data from the optimum transferred to proc 0.
146  int proc_index, best_proc_index;
147  double candidate_best_objective_function, apparent_best_objective_function;
148  bool was_I_best;
149  if (solver->mpi_partition->get_N_worker_groups() > 1) {
150  if (proc0) {
151  // Poll all other procs for their best objective function
152  for (proc_index = 1; proc_index < solver->mpi_partition->get_N_procs_group_leaders(); proc_index++) {
153  MPI_Recv(&candidate_best_objective_function, 1, MPI_DOUBLE, proc_index, proc_index, mpi_comm_group_leaders, MPI_STATUS_IGNORE);
154  //std::cout << "Best objective function on group leader " << proc_index << ": " << std::scientific << candidate_best_objective_function << std::endl;
155  if (proc_index==1 || (candidate_best_objective_function < apparent_best_objective_function)) {
156  apparent_best_objective_function = candidate_best_objective_function;
157  best_proc_index = proc_index;
158  }
159  }
160  //std::cout << "Best_proc_index: " << best_proc_index << std::endl;
161 
162  // Verify apparent_best_objective_function here coincides with the best_objective_function we found in write_hopspack_line_to_file:
163  if (std::fabs(apparent_best_objective_function - best_objective_function) > 1e-30) {
164  std::cerr << "WARNING!!! Significant difference between apparent_best_objective_function and best_objective_function." << std::endl;
165  std::cerr << "apparent_best_objective_function: " << std::scientific << apparent_best_objective_function << " best_objective_function: " << best_objective_function
166  << " diff: " << apparent_best_objective_function - best_objective_function << std::endl;
167  }
168 
169  // Tell each proc whether they were the one that acheieved the best objective function
170  MPI_Bcast(&best_proc_index, 1, MPI_INT, 0, mpi_comm_group_leaders);
171 
172  // Receive details of the optimum from the appropriate process:
173  MPI_Recv(best_state_vector, N_parameters, MPI_DOUBLE, best_proc_index, best_proc_index, mpi_comm_group_leaders, MPI_STATUS_IGNORE);
174  if (least_squares) MPI_Recv(best_residual_function, N_terms, MPI_DOUBLE, best_proc_index, best_proc_index, mpi_comm_group_leaders, MPI_STATUS_IGNORE);
175 
176  } else {
177  // Handle procs other than 0.
178 
179  // Tell proc0 the best objective function achieved on this proc.
180  MPI_Send(&best_objective_function, 1, MPI_DOUBLE, 0, mpi_rank_group_leaders, mpi_comm_group_leaders);
181  // Hear from proc0 which proc achieved the best objective function:
182  MPI_Bcast(&best_proc_index, 1, MPI_INT, 0, mpi_comm_group_leaders);
183  // If it was me, then send details of the optimum
184  if (mpi_rank_group_leaders == best_proc_index) {
185  MPI_Send(best_state_vector, N_parameters, MPI_DOUBLE, 0, best_proc_index, mpi_comm_group_leaders);
186  if (least_squares) MPI_Send(best_residual_function, N_terms, MPI_DOUBLE, 0, best_proc_index, mpi_comm_group_leaders);
187  }
188  }
189  }
190  // Done transferring data about the optimum to proc 0.
191  */
192 
193 #else
194  // MANGO_HOPSPACK_AVAILABLE is not defined
195  throw std::runtime_error("Error! The HOPSPACK algorithm was requested, but Mango was compiled without HOPSPACK support.");
196 #endif
197 }
198 
199 /////////////////////////////////////////////////////////////////////////////////
200 /////////////////////////////////////////////////////////////////////////////////
201 
202 /*
203 void mango::problem::write_hopspack_line_to_file(std::string line, double objective_function) {
204  // This subroutine only ever is called on proc0_world.
205  // 'line' is almost all of the line of the output file, except that the global # of function evaluations (the first element of the line)
206  // is missing. This is because the line was generated on a proc >0, but only proc 0 knows the global # of function evaluations.
207 
208  clock_t now = clock();
209 
210  // In the case N_worker_groups=1 (i.e. a serial hopspack run), function_evaluations has already been incremented on this proc (0)
211  // in objective_function_wrapper() or residual_function_wrapper().
212  if (mpi_partition.get_N_worker_groups() > 1) function_evaluations += 1; // This line is how proc 0 keeps track of the total number of function evaluations.
213 
214  //std::cout << "write_hopspack_line_to_file: function_evaluations is now " << function_evaluations << ", now=" << now << ", f=" << std::scientific << objective_function;
215 
216  // This next line is how proc0_world keeps track of which function evaluation was the optimum.
217  if (function_evaluations == 1 || objective_function < best_objective_function) {
218  best_function_evaluation = function_evaluations;
219  best_objective_function = objective_function;
220  best_time = now;
221  }
222  // In a serial hopspack run (i.e. N_worker_groups=1), best_time was already recorded in objective_function_wrapper() or residual_function_wrapper(),
223  // and the 'if' block above did not executre, since objective_function is no longer < best_objective_function.
224  // For consistency, then, we should use that time, not the time measured above in this subroutine.
225  if (best_function_evaluation == function_evaluations) now = best_time;
226 
227  // Now actually write the line of the file.
228  write_function_evaluation_and_time(now);
229  output_file << line << std::endl << std::flush;
230 }
231 */
232 
233 /////////////////////////////////////////////////////////////////////////////////
234 /////////////////////////////////////////////////////////////////////////////////
235 
237  throw std::runtime_error("Error! mango somehow got to Package_hopspack::optimize_least_squares. This should never happen.");
238 }
mango::Solver::lower_bounds
double * lower_bounds
Definition: Solver.hpp:54
mango::Package_hopspack::optimize_least_squares
void optimize_least_squares(Least_squares_solver *)
Definition: optimize_hopspack.cpp:236
mango::Solver::upper_bounds
double * upper_bounds
Definition: Solver.hpp:55
Package_hopspack.hpp
mango::Solver::verbose
int verbose
Definition: Solver.hpp:63
mango::MPI_Partition::get_comm_group_leaders
MPI_Comm get_comm_group_leaders()
Get the MPI communicator for MANGO's "group leaders" communicator.
Definition: mpi_partition.cpp:51
mango::Least_squares_solver
Definition: Least_squares_solver.hpp:28
Solver.hpp
mango::Solver::state_vector
double * state_vector
Definition: Solver.hpp:58
mango::Package_hopspack::optimize
void optimize(Solver *)
Definition: optimize_hopspack.cpp:41
mango::Solver::bound_constraints_set
bool bound_constraints_set
Definition: Solver.hpp:53
mango::Solver::mpi_partition
MPI_Partition * mpi_partition
Definition: Solver.hpp:65
mango.hpp
mango::MPI_Partition::get_rank_group_leaders
int get_rank_group_leaders()
Get the MPI rank of this processor in MANGO's "group leaders" communicator.
Definition: mpi_partition.cpp:76
mango::Solver::max_function_evaluations
int max_function_evaluations
Definition: Solver.hpp:62
mango::MPI_Partition::get_N_procs_group_leaders
int get_N_procs_group_leaders()
Get the number of processors in MANGO's "group leaders" communicator.
Definition: mpi_partition.cpp:91
mango::Solver::N_parameters
int N_parameters
Definition: Solver.hpp:43
mango::Solver
Definition: Solver.hpp:31
mango::MPI_Partition::get_N_worker_groups
int get_N_worker_groups()
Returns the number of worker groups.
Definition: mpi_partition.cpp:101