1 // Ceres Solver - A fast non-linear least squares minimizer
2 // Copyright 2012 Google Inc. All rights reserved.
3 // http://code.google.com/p/ceres-solver/
4 //
5 // Redistribution and use in source and binary forms, with or without
6 // modification, are permitted provided that the following conditions are met:
7 //
8 // * Redistributions of source code must retain the above copyright notice,
9 //   this list of conditions and the following disclaimer.
10 // * Redistributions in binary form must reproduce the above copyright notice,
11 //   this list of conditions and the following disclaimer in the documentation
12 //   and/or other materials provided with the distribution.
13 // * Neither the name of Google Inc. nor the names of its contributors may be
14 //   used to endorse or promote products derived from this software without
15 //   specific prior written permission.
16 //
17 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21 // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 // POSSIBILITY OF SUCH DAMAGE.
28 //
29 // Author: sameeragarwal@google.com (Sameer Agarwal)
30 
31 #include "ceres/trust_region_minimizer.h"
32 
33 #include <algorithm>
34 #include <cstdlib>
35 #include <cmath>
36 #include <cstring>
37 #include <limits>
38 #include <string>
39 #include <vector>
40 
41 #include "Eigen/Core"
42 #include "ceres/array_utils.h"
43 #include "ceres/evaluator.h"
44 #include "ceres/file.h"
45 #include "ceres/internal/eigen.h"
46 #include "ceres/internal/scoped_ptr.h"
47 #include "ceres/line_search.h"
48 #include "ceres/linear_least_squares_problems.h"
49 #include "ceres/sparse_matrix.h"
50 #include "ceres/stringprintf.h"
51 #include "ceres/trust_region_strategy.h"
52 #include "ceres/types.h"
53 #include "ceres/wall_time.h"
54 #include "glog/logging.h"
55 
56 namespace ceres {
57 namespace internal {
58 namespace {
59 
DoLineSearch(const Minimizer::Options & options,const Vector & x,const Vector & gradient,const double cost,const Vector & delta,Evaluator * evaluator)60 LineSearch::Summary DoLineSearch(const Minimizer::Options& options,
61                                  const Vector& x,
62                                  const Vector& gradient,
63                                  const double cost,
64                                  const Vector& delta,
65                                  Evaluator* evaluator) {
66   LineSearchFunction line_search_function(evaluator);
67 
68   LineSearch::Options line_search_options;
69   line_search_options.is_silent = true;
70   line_search_options.interpolation_type =
71       options.line_search_interpolation_type;
72   line_search_options.min_step_size = options.min_line_search_step_size;
73   line_search_options.sufficient_decrease =
74       options.line_search_sufficient_function_decrease;
75   line_search_options.max_step_contraction =
76       options.max_line_search_step_contraction;
77   line_search_options.min_step_contraction =
78       options.min_line_search_step_contraction;
79   line_search_options.max_num_iterations =
80       options.max_num_line_search_step_size_iterations;
81   line_search_options.sufficient_curvature_decrease =
82       options.line_search_sufficient_curvature_decrease;
83   line_search_options.max_step_expansion =
84       options.max_line_search_step_expansion;
85   line_search_options.function = &line_search_function;
86 
87   string message;
88   scoped_ptr<LineSearch>
89       line_search(CHECK_NOTNULL(
90                       LineSearch::Create(ceres::ARMIJO,
91                                          line_search_options,
92                                          &message)));
93   LineSearch::Summary summary;
94   line_search_function.Init(x, delta);
95   // Try the trust region step.
96   line_search->Search(1.0, cost, gradient.dot(delta), &summary);
97   if (!summary.success) {
98     // If that was not successful, try the negative gradient as a
99     // search direction.
100     line_search_function.Init(x, -gradient);
101     line_search->Search(1.0, cost, -gradient.squaredNorm(), &summary);
102   }
103   return summary;
104 }
105 
106 }  // namespace
107 
108 // Compute a scaling vector that is used to improve the conditioning
109 // of the Jacobian.
EstimateScale(const SparseMatrix & jacobian,double * scale) const110 void TrustRegionMinimizer::EstimateScale(const SparseMatrix& jacobian,
111                                          double* scale) const {
112   jacobian.SquaredColumnNorm(scale);
113   for (int i = 0; i < jacobian.num_cols(); ++i) {
114     scale[i] = 1.0 / (1.0 + sqrt(scale[i]));
115   }
116 }
117 
Init(const Minimizer::Options & options)118 void TrustRegionMinimizer::Init(const Minimizer::Options& options) {
119   options_ = options;
120   sort(options_.trust_region_minimizer_iterations_to_dump.begin(),
121        options_.trust_region_minimizer_iterations_to_dump.end());
122 }
123 
Minimize(const Minimizer::Options & options,double * parameters,Solver::Summary * summary)124 void TrustRegionMinimizer::Minimize(const Minimizer::Options& options,
125                                     double* parameters,
126                                     Solver::Summary* summary) {
127   double start_time = WallTimeInSeconds();
128   double iteration_start_time =  start_time;
129   Init(options);
130 
131   Evaluator* evaluator = CHECK_NOTNULL(options_.evaluator);
132   SparseMatrix* jacobian = CHECK_NOTNULL(options_.jacobian);
133   TrustRegionStrategy* strategy = CHECK_NOTNULL(options_.trust_region_strategy);
134 
135   const bool is_not_silent = !options.is_silent;
136 
137   // If the problem is bounds constrained, then enable the use of a
138   // line search after the trust region step has been computed. This
139   // line search will automatically use a projected test point onto
140   // the feasible set, there by guaranteeing the feasibility of the
141   // final output.
142   //
143   // TODO(sameeragarwal): Make line search available more generally.
144   const bool use_line_search = options.is_constrained;
145 
146   summary->termination_type = NO_CONVERGENCE;
147   summary->num_successful_steps = 0;
148   summary->num_unsuccessful_steps = 0;
149 
150   const int num_parameters = evaluator->NumParameters();
151   const int num_effective_parameters = evaluator->NumEffectiveParameters();
152   const int num_residuals = evaluator->NumResiduals();
153 
154   Vector residuals(num_residuals);
155   Vector trust_region_step(num_effective_parameters);
156   Vector delta(num_effective_parameters);
157   Vector x_plus_delta(num_parameters);
158   Vector gradient(num_effective_parameters);
159   Vector model_residuals(num_residuals);
160   Vector scale(num_effective_parameters);
161   Vector negative_gradient(num_effective_parameters);
162   Vector projected_gradient_step(num_parameters);
163 
164   IterationSummary iteration_summary;
165   iteration_summary.iteration = 0;
166   iteration_summary.step_is_valid = false;
167   iteration_summary.step_is_successful = false;
168   iteration_summary.cost_change = 0.0;
169   iteration_summary.gradient_max_norm = 0.0;
170   iteration_summary.gradient_norm = 0.0;
171   iteration_summary.step_norm = 0.0;
172   iteration_summary.relative_decrease = 0.0;
173   iteration_summary.trust_region_radius = strategy->Radius();
174   iteration_summary.eta = options_.eta;
175   iteration_summary.linear_solver_iterations = 0;
176   iteration_summary.step_solver_time_in_seconds = 0;
177 
178   VectorRef x_min(parameters, num_parameters);
179   Vector x = x_min;
180   // Project onto the feasible set.
181   if (options.is_constrained) {
182     delta.setZero();
183     if (!evaluator->Plus(x.data(), delta.data(), x_plus_delta.data())) {
184       summary->message =
185           "Unable to project initial point onto the feasible set.";
186       summary->termination_type = FAILURE;
187       LOG_IF(WARNING, is_not_silent) << "Terminating: " << summary->message;
188       return;
189     }
190     x_min = x_plus_delta;
191     x = x_plus_delta;
192   }
193 
194   double x_norm = x.norm();
195 
196   // Do initial cost and Jacobian evaluation.
197   double cost = 0.0;
198   if (!evaluator->Evaluate(x.data(),
199                            &cost,
200                            residuals.data(),
201                            gradient.data(),
202                            jacobian)) {
203     summary->message = "Residual and Jacobian evaluation failed.";
204     summary->termination_type = FAILURE;
205     LOG_IF(WARNING, is_not_silent) << "Terminating: " << summary->message;
206     return;
207   }
208 
209   negative_gradient = -gradient;
210   if (!evaluator->Plus(x.data(),
211                        negative_gradient.data(),
212                        projected_gradient_step.data())) {
213     summary->message = "Unable to compute gradient step.";
214     summary->termination_type = FAILURE;
215     LOG(ERROR) << "Terminating: " << summary->message;
216     return;
217   }
218 
219   summary->initial_cost = cost + summary->fixed_cost;
220   iteration_summary.cost = cost + summary->fixed_cost;
221   iteration_summary.gradient_max_norm =
222     (x - projected_gradient_step).lpNorm<Eigen::Infinity>();
223   iteration_summary.gradient_norm = (x - projected_gradient_step).norm();
224 
225   if (iteration_summary.gradient_max_norm <= options.gradient_tolerance) {
226     summary->message = StringPrintf("Gradient tolerance reached. "
227                                     "Gradient max norm: %e <= %e",
228                                     iteration_summary.gradient_max_norm,
229                                     options_.gradient_tolerance);
230     summary->termination_type = CONVERGENCE;
231     VLOG_IF(1, is_not_silent) << "Terminating: " << summary->message;
232     return;
233   }
234 
235   if (options_.jacobi_scaling) {
236     EstimateScale(*jacobian, scale.data());
237     jacobian->ScaleColumns(scale.data());
238   } else {
239     scale.setOnes();
240   }
241 
242   iteration_summary.iteration_time_in_seconds =
243       WallTimeInSeconds() - iteration_start_time;
244   iteration_summary.cumulative_time_in_seconds =
245       WallTimeInSeconds() - start_time
246       + summary->preprocessor_time_in_seconds;
247   summary->iterations.push_back(iteration_summary);
248 
249   int num_consecutive_nonmonotonic_steps = 0;
250   double minimum_cost = cost;
251   double reference_cost = cost;
252   double accumulated_reference_model_cost_change = 0.0;
253   double candidate_cost = cost;
254   double accumulated_candidate_model_cost_change = 0.0;
255   int num_consecutive_invalid_steps = 0;
256   bool inner_iterations_are_enabled = options.inner_iteration_minimizer != NULL;
257   while (true) {
258     bool inner_iterations_were_useful = false;
259     if (!RunCallbacks(options, iteration_summary, summary)) {
260       return;
261     }
262 
263     iteration_start_time = WallTimeInSeconds();
264     if (iteration_summary.iteration >= options_.max_num_iterations) {
265       summary->message = "Maximum number of iterations reached.";
266       summary->termination_type = NO_CONVERGENCE;
267       VLOG_IF(1, is_not_silent) << "Terminating: " << summary->message;
268       return;
269     }
270 
271     const double total_solver_time = iteration_start_time - start_time +
272         summary->preprocessor_time_in_seconds;
273     if (total_solver_time >= options_.max_solver_time_in_seconds) {
274       summary->message = "Maximum solver time reached.";
275       summary->termination_type = NO_CONVERGENCE;
276       VLOG_IF(1, is_not_silent) << "Terminating: " << summary->message;
277       return;
278     }
279 
280     const double strategy_start_time = WallTimeInSeconds();
281     TrustRegionStrategy::PerSolveOptions per_solve_options;
282     per_solve_options.eta = options_.eta;
283     if (find(options_.trust_region_minimizer_iterations_to_dump.begin(),
284              options_.trust_region_minimizer_iterations_to_dump.end(),
285              iteration_summary.iteration) !=
286         options_.trust_region_minimizer_iterations_to_dump.end()) {
287       per_solve_options.dump_format_type =
288           options_.trust_region_problem_dump_format_type;
289       per_solve_options.dump_filename_base =
290           JoinPath(options_.trust_region_problem_dump_directory,
291                    StringPrintf("ceres_solver_iteration_%03d",
292                                 iteration_summary.iteration));
293     } else {
294       per_solve_options.dump_format_type = TEXTFILE;
295       per_solve_options.dump_filename_base.clear();
296     }
297 
298     TrustRegionStrategy::Summary strategy_summary =
299         strategy->ComputeStep(per_solve_options,
300                               jacobian,
301                               residuals.data(),
302                               trust_region_step.data());
303 
304     if (strategy_summary.termination_type == LINEAR_SOLVER_FATAL_ERROR) {
305       summary->message =
306           "Linear solver failed due to unrecoverable "
307           "non-numeric causes. Please see the error log for clues. ";
308       summary->termination_type = FAILURE;
309       LOG_IF(WARNING, is_not_silent) << "Terminating: " << summary->message;
310       return;
311     }
312 
313     iteration_summary = IterationSummary();
314     iteration_summary.iteration = summary->iterations.back().iteration + 1;
315     iteration_summary.step_solver_time_in_seconds =
316         WallTimeInSeconds() - strategy_start_time;
317     iteration_summary.linear_solver_iterations =
318         strategy_summary.num_iterations;
319     iteration_summary.step_is_valid = false;
320     iteration_summary.step_is_successful = false;
321 
322     double model_cost_change = 0.0;
323     if (strategy_summary.termination_type != LINEAR_SOLVER_FAILURE) {
324       // new_model_cost
325       //  = 1/2 [f + J * step]^2
326       //  = 1/2 [ f'f + 2f'J * step + step' * J' * J * step ]
327       // model_cost_change
328       //  = cost - new_model_cost
329       //  = f'f/2  - 1/2 [ f'f + 2f'J * step + step' * J' * J * step]
330       //  = -f'J * step - step' * J' * J * step / 2
331       model_residuals.setZero();
332       jacobian->RightMultiply(trust_region_step.data(), model_residuals.data());
333       model_cost_change =
334           - model_residuals.dot(residuals + model_residuals / 2.0);
335 
336       if (model_cost_change < 0.0) {
337         VLOG_IF(1, is_not_silent)
338             << "Invalid step: current_cost: " << cost
339             << " absolute difference " << model_cost_change
340             << " relative difference " << (model_cost_change / cost);
341       } else {
342         iteration_summary.step_is_valid = true;
343       }
344     }
345 
346     if (!iteration_summary.step_is_valid) {
347       // Invalid steps can happen due to a number of reasons, and we
348       // allow a limited number of successive failures, and return with
349       // FAILURE if this limit is exceeded.
350       if (++num_consecutive_invalid_steps >=
351           options_.max_num_consecutive_invalid_steps) {
352         summary->message = StringPrintf(
353             "Number of successive invalid steps more "
354             "than Solver::Options::max_num_consecutive_invalid_steps: %d",
355             options_.max_num_consecutive_invalid_steps);
356         summary->termination_type = FAILURE;
357         LOG_IF(WARNING, is_not_silent) << "Terminating: " << summary->message;
358         return;
359       }
360 
361       // We are going to try and reduce the trust region radius and
362       // solve again. To do this, we are going to treat this iteration
363       // as an unsuccessful iteration. Since the various callbacks are
364       // still executed, we are going to fill the iteration summary
365       // with data that assumes a step of length zero and no progress.
366       iteration_summary.cost = cost + summary->fixed_cost;
367       iteration_summary.cost_change = 0.0;
368       iteration_summary.gradient_max_norm =
369           summary->iterations.back().gradient_max_norm;
370       iteration_summary.gradient_norm =
371           summary->iterations.back().gradient_norm;
372       iteration_summary.step_norm = 0.0;
373       iteration_summary.relative_decrease = 0.0;
374       iteration_summary.eta = options_.eta;
375     } else {
376       // The step is numerically valid, so now we can judge its quality.
377       num_consecutive_invalid_steps = 0;
378 
379       // Undo the Jacobian column scaling.
380       delta = (trust_region_step.array() * scale.array()).matrix();
381 
382       // Try improving the step further by using an ARMIJO line
383       // search.
384       //
385       // TODO(sameeragarwal): What happens to trust region sizing as
386       // it interacts with the line search ?
387       if (use_line_search) {
388         const LineSearch::Summary line_search_summary =
389             DoLineSearch(options, x, gradient, cost, delta, evaluator);
390         if (line_search_summary.success) {
391           delta *= line_search_summary.optimal_step_size;
392         }
393       }
394 
395       double new_cost = std::numeric_limits<double>::max();
396       if (evaluator->Plus(x.data(), delta.data(), x_plus_delta.data())) {
397         if (!evaluator->Evaluate(x_plus_delta.data(),
398                                 &new_cost,
399                                 NULL,
400                                 NULL,
401                                 NULL)) {
402           LOG(WARNING) << "Step failed to evaluate. "
403                        << "Treating it as a step with infinite cost";
404           new_cost = numeric_limits<double>::max();
405         }
406       } else {
407         LOG(WARNING) << "x_plus_delta = Plus(x, delta) failed. "
408                      << "Treating it as a step with infinite cost";
409       }
410 
411       if (new_cost < std::numeric_limits<double>::max()) {
412         // Check if performing an inner iteration will make it better.
413         if (inner_iterations_are_enabled) {
414           ++summary->num_inner_iteration_steps;
415           double inner_iteration_start_time = WallTimeInSeconds();
416           const double x_plus_delta_cost = new_cost;
417           Vector inner_iteration_x = x_plus_delta;
418           Solver::Summary inner_iteration_summary;
419           options.inner_iteration_minimizer->Minimize(options,
420                                                       inner_iteration_x.data(),
421                                                       &inner_iteration_summary);
422           if (!evaluator->Evaluate(inner_iteration_x.data(),
423                                    &new_cost,
424                                    NULL, NULL, NULL)) {
425             VLOG_IF(2, is_not_silent) << "Inner iteration failed.";
426             new_cost = x_plus_delta_cost;
427           } else {
428             x_plus_delta = inner_iteration_x;
429             // Boost the model_cost_change, since the inner iteration
430             // improvements are not accounted for by the trust region.
431             model_cost_change +=  x_plus_delta_cost - new_cost;
432             VLOG_IF(2, is_not_silent)
433                 << "Inner iteration succeeded; Current cost: " << cost
434                 << " Trust region step cost: " << x_plus_delta_cost
435                 << " Inner iteration cost: " << new_cost;
436 
437             inner_iterations_were_useful = new_cost < cost;
438 
439             const double inner_iteration_relative_progress =
440                 1.0 - new_cost / x_plus_delta_cost;
441             // Disable inner iterations once the relative improvement
442             // drops below tolerance.
443             inner_iterations_are_enabled =
444                 (inner_iteration_relative_progress >
445                  options.inner_iteration_tolerance);
446             VLOG_IF(2, is_not_silent && !inner_iterations_are_enabled)
447                 << "Disabling inner iterations. Progress : "
448                 << inner_iteration_relative_progress;
449           }
450           summary->inner_iteration_time_in_seconds +=
451               WallTimeInSeconds() - inner_iteration_start_time;
452         }
453       }
454 
455       iteration_summary.step_norm = (x - x_plus_delta).norm();
456 
457       // Convergence based on parameter_tolerance.
458       const double step_size_tolerance =  options_.parameter_tolerance *
459           (x_norm + options_.parameter_tolerance);
460       if (iteration_summary.step_norm <= step_size_tolerance) {
461         summary->message =
462             StringPrintf("Parameter tolerance reached. "
463                          "Relative step_norm: %e <= %e.",
464                          (iteration_summary.step_norm /
465                           (x_norm + options_.parameter_tolerance)),
466                          options_.parameter_tolerance);
467         summary->termination_type = CONVERGENCE;
468         VLOG_IF(1, is_not_silent) << "Terminating: " << summary->message;
469         return;
470       }
471 
472       iteration_summary.cost_change =  cost - new_cost;
473       const double absolute_function_tolerance =
474           options_.function_tolerance * cost;
475       if (fabs(iteration_summary.cost_change) < absolute_function_tolerance) {
476         summary->message =
477             StringPrintf("Function tolerance reached. "
478                          "|cost_change|/cost: %e <= %e",
479                          fabs(iteration_summary.cost_change) / cost,
480                          options_.function_tolerance);
481         summary->termination_type = CONVERGENCE;
482         VLOG_IF(1, is_not_silent) << "Terminating: " << summary->message;
483         return;
484       }
485 
486       const double relative_decrease =
487           iteration_summary.cost_change / model_cost_change;
488 
489       const double historical_relative_decrease =
490           (reference_cost - new_cost) /
491           (accumulated_reference_model_cost_change + model_cost_change);
492 
493       // If monotonic steps are being used, then the relative_decrease
494       // is the usual ratio of the change in objective function value
495       // divided by the change in model cost.
496       //
497       // If non-monotonic steps are allowed, then we take the maximum
498       // of the relative_decrease and the
499       // historical_relative_decrease, which measures the increase
500       // from a reference iteration. The model cost change is
501       // estimated by accumulating the model cost changes since the
502       // reference iteration. The historical relative_decrease offers
503       // a boost to a step which is not too bad compared to the
504       // reference iteration, allowing for non-monotonic steps.
505       iteration_summary.relative_decrease =
506           options.use_nonmonotonic_steps
507           ? max(relative_decrease, historical_relative_decrease)
508           : relative_decrease;
509 
510       // Normally, the quality of a trust region step is measured by
511       // the ratio
512       //
513       //              cost_change
514       //    r =    -----------------
515       //           model_cost_change
516       //
517       // All the change in the nonlinear objective is due to the trust
518       // region step so this ratio is a good measure of the quality of
519       // the trust region radius. However, when inner iterations are
520       // being used, cost_change includes the contribution of the
521       // inner iterations and its not fair to credit it all to the
522       // trust region algorithm. So we change the ratio to be
523       //
524       //                              cost_change
525       //    r =    ------------------------------------------------
526       //           (model_cost_change + inner_iteration_cost_change)
527       //
528       // In most cases this is fine, but it can be the case that the
529       // change in solution quality due to inner iterations is so large
530       // and the trust region step is so bad, that this ratio can become
531       // quite small.
532       //
533       // This can cause the trust region loop to reject this step. To
534       // get around this, we expicitly check if the inner iterations
535       // led to a net decrease in the objective function value. If
536       // they did, we accept the step even if the trust region ratio
537       // is small.
538       //
539       // Notice that we do not just check that cost_change is positive
540       // which is a weaker condition and would render the
541       // min_relative_decrease threshold useless. Instead, we keep
542       // track of inner_iterations_were_useful, which is true only
543       // when inner iterations lead to a net decrease in the cost.
544       iteration_summary.step_is_successful =
545           (inner_iterations_were_useful ||
546            iteration_summary.relative_decrease >
547            options_.min_relative_decrease);
548 
549       if (iteration_summary.step_is_successful) {
550         accumulated_candidate_model_cost_change += model_cost_change;
551         accumulated_reference_model_cost_change += model_cost_change;
552 
553         if (!inner_iterations_were_useful &&
554             relative_decrease <= options_.min_relative_decrease) {
555           iteration_summary.step_is_nonmonotonic = true;
556           VLOG_IF(2, is_not_silent)
557               << "Non-monotonic step! "
558               << " relative_decrease: "
559               << relative_decrease
560               << " historical_relative_decrease: "
561               << historical_relative_decrease;
562         }
563       }
564     }
565 
566     if (iteration_summary.step_is_successful) {
567       ++summary->num_successful_steps;
568       strategy->StepAccepted(iteration_summary.relative_decrease);
569 
570       x = x_plus_delta;
571       x_norm = x.norm();
572 
573       // Step looks good, evaluate the residuals and Jacobian at this
574       // point.
575       if (!evaluator->Evaluate(x.data(),
576                                &cost,
577                                residuals.data(),
578                                gradient.data(),
579                                jacobian)) {
580         summary->message = "Residual and Jacobian evaluation failed.";
581         summary->termination_type = FAILURE;
582         LOG_IF(WARNING, is_not_silent) << "Terminating: " << summary->message;
583         return;
584       }
585 
586       negative_gradient = -gradient;
587       if (!evaluator->Plus(x.data(),
588                            negative_gradient.data(),
589                            projected_gradient_step.data())) {
590         summary->message =
591             "projected_gradient_step = Plus(x, -gradient) failed.";
592         summary->termination_type = FAILURE;
593         LOG(ERROR) << "Terminating: " << summary->message;
594         return;
595       }
596 
597       iteration_summary.gradient_max_norm =
598         (x - projected_gradient_step).lpNorm<Eigen::Infinity>();
599       iteration_summary.gradient_norm = (x - projected_gradient_step).norm();
600 
601       if (iteration_summary.gradient_max_norm <= options.gradient_tolerance) {
602         summary->message = StringPrintf("Gradient tolerance reached. "
603                                         "Gradient max norm: %e <= %e",
604                                         iteration_summary.gradient_max_norm,
605                                         options_.gradient_tolerance);
606         summary->termination_type = CONVERGENCE;
607         VLOG_IF(1, is_not_silent) << "Terminating: " << summary->message;
608         return;
609       }
610 
611       if (options_.jacobi_scaling) {
612         jacobian->ScaleColumns(scale.data());
613       }
614 
615       // Update the best, reference and candidate iterates.
616       //
617       // Based on algorithm 10.1.2 (page 357) of "Trust Region
618       // Methods" by Conn Gould & Toint, or equations 33-40 of
619       // "Non-monotone trust-region algorithms for nonlinear
620       // optimization subject to convex constraints" by Phil Toint,
621       // Mathematical Programming, 77, 1997.
622       if (cost < minimum_cost) {
623         // A step that improves solution quality was found.
624         x_min = x;
625         minimum_cost = cost;
626         // Set the candidate iterate to the current point.
627         candidate_cost = cost;
628         num_consecutive_nonmonotonic_steps = 0;
629         accumulated_candidate_model_cost_change = 0.0;
630       } else {
631         ++num_consecutive_nonmonotonic_steps;
632         if (cost > candidate_cost) {
633           // The current iterate is has a higher cost than the
634           // candidate iterate. Set the candidate to this point.
635           VLOG_IF(2, is_not_silent)
636               << "Updating the candidate iterate to the current point.";
637           candidate_cost = cost;
638           accumulated_candidate_model_cost_change = 0.0;
639         }
640 
641         // At this point we have made too many non-monotonic steps and
642         // we are going to reset the value of the reference iterate so
643         // as to force the algorithm to descend.
644         //
645         // This is the case because the candidate iterate has a value
646         // greater than minimum_cost but smaller than the reference
647         // iterate.
648         if (num_consecutive_nonmonotonic_steps ==
649             options.max_consecutive_nonmonotonic_steps) {
650           VLOG_IF(2, is_not_silent)
651               << "Resetting the reference point to the candidate point";
652           reference_cost = candidate_cost;
653           accumulated_reference_model_cost_change =
654               accumulated_candidate_model_cost_change;
655         }
656       }
657     } else {
658       ++summary->num_unsuccessful_steps;
659       if (iteration_summary.step_is_valid) {
660         strategy->StepRejected(iteration_summary.relative_decrease);
661       } else {
662         strategy->StepIsInvalid();
663       }
664     }
665 
666     iteration_summary.cost = cost + summary->fixed_cost;
667     iteration_summary.trust_region_radius = strategy->Radius();
668     if (iteration_summary.trust_region_radius <
669         options_.min_trust_region_radius) {
670       summary->message = "Termination. Minimum trust region radius reached.";
671       summary->termination_type = CONVERGENCE;
672       VLOG_IF(1, is_not_silent) << summary->message;
673       return;
674     }
675 
676     iteration_summary.iteration_time_in_seconds =
677         WallTimeInSeconds() - iteration_start_time;
678     iteration_summary.cumulative_time_in_seconds =
679         WallTimeInSeconds() - start_time
680         + summary->preprocessor_time_in_seconds;
681     summary->iterations.push_back(iteration_summary);
682   }
683 }
684 
685 
686 }  // namespace internal
687 }  // namespace ceres
688