blob: eb2c7c903e8c4555e5d3903003545b009a7cae29 [file] [log] [blame]
Austin Schuh70cc9552019-01-21 19:46:48 -08001// Ceres Solver - A fast non-linear least squares minimizer
Austin Schuh3de38b02024-06-25 18:25:10 -07002// Copyright 2023 Google Inc. All rights reserved.
Austin Schuh70cc9552019-01-21 19:46:48 -08003// http://ceres-solver.org/
4//
5// Redistribution and use in source and binary forms, with or without
6// modification, are permitted provided that the following conditions are met:
7//
8// * Redistributions of source code must retain the above copyright notice,
9// this list of conditions and the following disclaimer.
10// * Redistributions in binary form must reproduce the above copyright notice,
11// this list of conditions and the following disclaimer in the documentation
12// and/or other materials provided with the distribution.
13// * Neither the name of Google Inc. nor the names of its contributors may be
14// used to endorse or promote products derived from this software without
15// specific prior written permission.
16//
17// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27// POSSIBILITY OF SUCH DAMAGE.
28//
29// Author: sameeragarwal@google.com (Sameer Agarwal)
30
31#include "ceres/line_search.h"
32
33#include <algorithm>
34#include <cmath>
35#include <iomanip>
Austin Schuh3de38b02024-06-25 18:25:10 -070036#include <map>
37#include <memory>
38#include <ostream> // NOLINT
39#include <string>
40#include <vector>
Austin Schuh70cc9552019-01-21 19:46:48 -080041
42#include "ceres/evaluator.h"
43#include "ceres/function_sample.h"
44#include "ceres/internal/eigen.h"
45#include "ceres/map_util.h"
46#include "ceres/polynomial.h"
47#include "ceres/stringprintf.h"
48#include "ceres/wall_time.h"
49#include "glog/logging.h"
50
Austin Schuh3de38b02024-06-25 18:25:10 -070051namespace ceres::internal {
Austin Schuh70cc9552019-01-21 19:46:48 -080052
53namespace {
54// Precision used for floating point values in error message output.
55const int kErrorMessageNumericPrecision = 8;
56} // namespace
57
Austin Schuh3de38b02024-06-25 18:25:10 -070058std::ostream& operator<<(std::ostream& os, const FunctionSample& sample);
Austin Schuh70cc9552019-01-21 19:46:48 -080059
60// Convenience stream operator for pushing FunctionSamples into log messages.
Austin Schuh3de38b02024-06-25 18:25:10 -070061std::ostream& operator<<(std::ostream& os, const FunctionSample& sample) {
Austin Schuh70cc9552019-01-21 19:46:48 -080062 os << sample.ToDebugString();
63 return os;
64}
65
Austin Schuh3de38b02024-06-25 18:25:10 -070066LineSearch::~LineSearch() = default;
67
Austin Schuh70cc9552019-01-21 19:46:48 -080068LineSearch::LineSearch(const LineSearch::Options& options)
69 : options_(options) {}
70
Austin Schuh3de38b02024-06-25 18:25:10 -070071std::unique_ptr<LineSearch> LineSearch::Create(
72 const LineSearchType line_search_type,
73 const LineSearch::Options& options,
74 std::string* error) {
Austin Schuh70cc9552019-01-21 19:46:48 -080075 switch (line_search_type) {
Austin Schuh1d1e6ea2020-12-23 21:56:30 -080076 case ceres::ARMIJO:
Austin Schuh3de38b02024-06-25 18:25:10 -070077 return std::make_unique<ArmijoLineSearch>(options);
Austin Schuh1d1e6ea2020-12-23 21:56:30 -080078 case ceres::WOLFE:
Austin Schuh3de38b02024-06-25 18:25:10 -070079 return std::make_unique<WolfeLineSearch>(options);
Austin Schuh1d1e6ea2020-12-23 21:56:30 -080080 default:
Austin Schuh3de38b02024-06-25 18:25:10 -070081 *error = std::string("Invalid line search algorithm type: ") +
Austin Schuh1d1e6ea2020-12-23 21:56:30 -080082 LineSearchTypeToString(line_search_type) +
Austin Schuh3de38b02024-06-25 18:25:10 -070083 std::string(", unable to create line search.");
Austin Schuh70cc9552019-01-21 19:46:48 -080084 }
Austin Schuh3de38b02024-06-25 18:25:10 -070085 return nullptr;
Austin Schuh70cc9552019-01-21 19:46:48 -080086}
87
88LineSearchFunction::LineSearchFunction(Evaluator* evaluator)
89 : evaluator_(evaluator),
90 position_(evaluator->NumParameters()),
91 direction_(evaluator->NumEffectiveParameters()),
92 scaled_direction_(evaluator->NumEffectiveParameters()),
93 initial_evaluator_residual_time_in_seconds(0.0),
94 initial_evaluator_jacobian_time_in_seconds(0.0) {}
95
Austin Schuh1d1e6ea2020-12-23 21:56:30 -080096void LineSearchFunction::Init(const Vector& position, const Vector& direction) {
Austin Schuh70cc9552019-01-21 19:46:48 -080097 position_ = position;
98 direction_ = direction;
99}
100
101void LineSearchFunction::Evaluate(const double x,
102 const bool evaluate_gradient,
103 FunctionSample* output) {
104 output->x = x;
105 output->vector_x_is_valid = false;
106 output->value_is_valid = false;
107 output->gradient_is_valid = false;
108 output->vector_gradient_is_valid = false;
109
110 scaled_direction_ = output->x * direction_;
111 output->vector_x.resize(position_.rows(), 1);
112 if (!evaluator_->Plus(position_.data(),
113 scaled_direction_.data(),
114 output->vector_x.data())) {
115 return;
116 }
117 output->vector_x_is_valid = true;
118
Austin Schuh3de38b02024-06-25 18:25:10 -0700119 double* gradient = nullptr;
Austin Schuh70cc9552019-01-21 19:46:48 -0800120 if (evaluate_gradient) {
121 output->vector_gradient.resize(direction_.rows(), 1);
122 gradient = output->vector_gradient.data();
123 }
124 const bool eval_status = evaluator_->Evaluate(
Austin Schuh3de38b02024-06-25 18:25:10 -0700125 output->vector_x.data(), &(output->value), nullptr, gradient, nullptr);
Austin Schuh70cc9552019-01-21 19:46:48 -0800126
127 if (!eval_status || !std::isfinite(output->value)) {
128 return;
129 }
130
131 output->value_is_valid = true;
132 if (!evaluate_gradient) {
133 return;
134 }
135
136 output->gradient = direction_.dot(output->vector_gradient);
137 if (!std::isfinite(output->gradient)) {
138 return;
139 }
140
141 output->gradient_is_valid = true;
142 output->vector_gradient_is_valid = true;
143}
144
145double LineSearchFunction::DirectionInfinityNorm() const {
146 return direction_.lpNorm<Eigen::Infinity>();
147}
148
149void LineSearchFunction::ResetTimeStatistics() {
Austin Schuh3de38b02024-06-25 18:25:10 -0700150 const std::map<std::string, CallStatistics> evaluator_statistics =
Austin Schuh70cc9552019-01-21 19:46:48 -0800151 evaluator_->Statistics();
152
153 initial_evaluator_residual_time_in_seconds =
154 FindWithDefault(
155 evaluator_statistics, "Evaluator::Residual", CallStatistics())
156 .time;
157 initial_evaluator_jacobian_time_in_seconds =
158 FindWithDefault(
159 evaluator_statistics, "Evaluator::Jacobian", CallStatistics())
160 .time;
161}
162
163void LineSearchFunction::TimeStatistics(
164 double* cost_evaluation_time_in_seconds,
165 double* gradient_evaluation_time_in_seconds) const {
Austin Schuh3de38b02024-06-25 18:25:10 -0700166 const std::map<std::string, CallStatistics> evaluator_time_statistics =
Austin Schuh70cc9552019-01-21 19:46:48 -0800167 evaluator_->Statistics();
168 *cost_evaluation_time_in_seconds =
169 FindWithDefault(
170 evaluator_time_statistics, "Evaluator::Residual", CallStatistics())
171 .time -
172 initial_evaluator_residual_time_in_seconds;
173 // Strictly speaking this will slightly underestimate the time spent
174 // evaluating the gradient of the line search univariate cost function as it
175 // does not count the time spent performing the dot product with the direction
176 // vector. However, this will typically be small by comparison, and also
177 // allows direct subtraction of the timing information from the totals for
178 // the evaluator returned in the solver summary.
179 *gradient_evaluation_time_in_seconds =
180 FindWithDefault(
181 evaluator_time_statistics, "Evaluator::Jacobian", CallStatistics())
182 .time -
183 initial_evaluator_jacobian_time_in_seconds;
184}
185
186void LineSearch::Search(double step_size_estimate,
187 double initial_cost,
188 double initial_gradient,
189 Summary* summary) const {
190 const double start_time = WallTimeInSeconds();
191 CHECK(summary != nullptr);
192 *summary = LineSearch::Summary();
193
194 summary->cost_evaluation_time_in_seconds = 0.0;
195 summary->gradient_evaluation_time_in_seconds = 0.0;
196 summary->polynomial_minimization_time_in_seconds = 0.0;
197 options().function->ResetTimeStatistics();
198 this->DoSearch(step_size_estimate, initial_cost, initial_gradient, summary);
Austin Schuh1d1e6ea2020-12-23 21:56:30 -0800199 options().function->TimeStatistics(
200 &summary->cost_evaluation_time_in_seconds,
201 &summary->gradient_evaluation_time_in_seconds);
Austin Schuh70cc9552019-01-21 19:46:48 -0800202
203 summary->total_time_in_seconds = WallTimeInSeconds() - start_time;
204}
205
206// Returns step_size \in [min_step_size, max_step_size] which minimizes the
207// polynomial of degree defined by interpolation_type which interpolates all
208// of the provided samples with valid values.
209double LineSearch::InterpolatingPolynomialMinimizingStepSize(
210 const LineSearchInterpolationType& interpolation_type,
211 const FunctionSample& lowerbound,
212 const FunctionSample& previous,
213 const FunctionSample& current,
214 const double min_step_size,
215 const double max_step_size) const {
216 if (!current.value_is_valid ||
Austin Schuh1d1e6ea2020-12-23 21:56:30 -0800217 (interpolation_type == BISECTION && max_step_size <= current.x)) {
Austin Schuh70cc9552019-01-21 19:46:48 -0800218 // Either: sample is invalid; or we are using BISECTION and contracting
219 // the step size.
220 return std::min(std::max(current.x * 0.5, min_step_size), max_step_size);
221 } else if (interpolation_type == BISECTION) {
222 CHECK_GT(max_step_size, current.x);
223 // We are expanding the search (during a Wolfe bracketing phase) using
224 // BISECTION interpolation. Using BISECTION when trying to expand is
225 // strictly speaking an oxymoron, but we define this to mean always taking
226 // the maximum step size so that the Armijo & Wolfe implementations are
227 // agnostic to the interpolation type.
228 return max_step_size;
229 }
230 // Only check if lower-bound is valid here, where it is required
231 // to avoid replicating current.value_is_valid == false
232 // behaviour in WolfeLineSearch.
233 CHECK(lowerbound.value_is_valid)
234 << std::scientific << std::setprecision(kErrorMessageNumericPrecision)
235 << "Ceres bug: lower-bound sample for interpolation is invalid, "
236 << "please contact the developers!, interpolation_type: "
237 << LineSearchInterpolationTypeToString(interpolation_type)
238 << ", lowerbound: " << lowerbound << ", previous: " << previous
239 << ", current: " << current;
240
241 // Select step size by interpolating the function and gradient values
242 // and minimizing the corresponding polynomial.
Austin Schuh3de38b02024-06-25 18:25:10 -0700243 std::vector<FunctionSample> samples;
Austin Schuh70cc9552019-01-21 19:46:48 -0800244 samples.push_back(lowerbound);
245
246 if (interpolation_type == QUADRATIC) {
247 // Two point interpolation using function values and the
248 // gradient at the lower bound.
Austin Schuh3de38b02024-06-25 18:25:10 -0700249 samples.emplace_back(current.x, current.value);
Austin Schuh70cc9552019-01-21 19:46:48 -0800250
251 if (previous.value_is_valid) {
252 // Three point interpolation, using function values and the
253 // gradient at the lower bound.
Austin Schuh3de38b02024-06-25 18:25:10 -0700254 samples.emplace_back(previous.x, previous.value);
Austin Schuh70cc9552019-01-21 19:46:48 -0800255 }
256 } else if (interpolation_type == CUBIC) {
257 // Two point interpolation using the function values and the gradients.
258 samples.push_back(current);
259
260 if (previous.value_is_valid) {
261 // Three point interpolation using the function values and
262 // the gradients.
263 samples.push_back(previous);
264 }
265 } else {
266 LOG(FATAL) << "Ceres bug: No handler for interpolation_type: "
267 << LineSearchInterpolationTypeToString(interpolation_type)
268 << ", please contact the developers!";
269 }
270
271 double step_size = 0.0, unused_min_value = 0.0;
Austin Schuh1d1e6ea2020-12-23 21:56:30 -0800272 MinimizeInterpolatingPolynomial(
273 samples, min_step_size, max_step_size, &step_size, &unused_min_value);
Austin Schuh70cc9552019-01-21 19:46:48 -0800274 return step_size;
275}
276
277ArmijoLineSearch::ArmijoLineSearch(const LineSearch::Options& options)
278 : LineSearch(options) {}
279
280void ArmijoLineSearch::DoSearch(const double step_size_estimate,
281 const double initial_cost,
282 const double initial_gradient,
283 Summary* summary) const {
284 CHECK_GE(step_size_estimate, 0.0);
285 CHECK_GT(options().sufficient_decrease, 0.0);
286 CHECK_LT(options().sufficient_decrease, 1.0);
287 CHECK_GT(options().max_num_iterations, 0);
288 LineSearchFunction* function = options().function;
289
290 // Note initial_cost & initial_gradient are evaluated at step_size = 0,
291 // not step_size_estimate, which is our starting guess.
292 FunctionSample initial_position(0.0, initial_cost, initial_gradient);
293 initial_position.vector_x = function->position();
294 initial_position.vector_x_is_valid = true;
295
296 const double descent_direction_max_norm = function->DirectionInfinityNorm();
297 FunctionSample previous;
298 FunctionSample current;
299
300 // As the Armijo line search algorithm always uses the initial point, for
301 // which both the function value and derivative are known, when fitting a
302 // minimizing polynomial, we can fit up to a quadratic without requiring the
303 // gradient at the current query point.
304 const bool kEvaluateGradient = options().interpolation_type == CUBIC;
305
306 ++summary->num_function_evaluations;
307 if (kEvaluateGradient) {
308 ++summary->num_gradient_evaluations;
309 }
310
311 function->Evaluate(step_size_estimate, kEvaluateGradient, &current);
312 while (!current.value_is_valid ||
Austin Schuh1d1e6ea2020-12-23 21:56:30 -0800313 current.value > (initial_cost + options().sufficient_decrease *
314 initial_gradient * current.x)) {
Austin Schuh70cc9552019-01-21 19:46:48 -0800315 // If current.value_is_valid is false, we treat it as if the cost at that
316 // point is not large enough to satisfy the sufficient decrease condition.
317 ++summary->num_iterations;
318 if (summary->num_iterations >= options().max_num_iterations) {
Austin Schuh1d1e6ea2020-12-23 21:56:30 -0800319 summary->error = StringPrintf(
320 "Line search failed: Armijo failed to find a point "
321 "satisfying the sufficient decrease condition within "
322 "specified max_num_iterations: %d.",
323 options().max_num_iterations);
324 if (!options().is_silent) {
325 LOG(WARNING) << summary->error;
326 }
Austin Schuh70cc9552019-01-21 19:46:48 -0800327 return;
328 }
329
330 const double polynomial_minimization_start_time = WallTimeInSeconds();
Austin Schuh1d1e6ea2020-12-23 21:56:30 -0800331 const double step_size = this->InterpolatingPolynomialMinimizingStepSize(
332 options().interpolation_type,
333 initial_position,
334 previous,
335 current,
336 (options().max_step_contraction * current.x),
337 (options().min_step_contraction * current.x));
Austin Schuh70cc9552019-01-21 19:46:48 -0800338 summary->polynomial_minimization_time_in_seconds +=
339 (WallTimeInSeconds() - polynomial_minimization_start_time);
340
341 if (step_size * descent_direction_max_norm < options().min_step_size) {
Austin Schuh1d1e6ea2020-12-23 21:56:30 -0800342 summary->error = StringPrintf(
343 "Line search failed: step_size too small: %.5e "
344 "with descent_direction_max_norm: %.5e.",
345 step_size,
346 descent_direction_max_norm);
347 if (!options().is_silent) {
348 LOG(WARNING) << summary->error;
349 }
Austin Schuh70cc9552019-01-21 19:46:48 -0800350 return;
351 }
352
353 previous = current;
354
355 ++summary->num_function_evaluations;
356 if (kEvaluateGradient) {
357 ++summary->num_gradient_evaluations;
358 }
359
360 function->Evaluate(step_size, kEvaluateGradient, &current);
361 }
362
363 summary->optimal_point = current;
364 summary->success = true;
365}
366
367WolfeLineSearch::WolfeLineSearch(const LineSearch::Options& options)
368 : LineSearch(options) {}
369
370void WolfeLineSearch::DoSearch(const double step_size_estimate,
371 const double initial_cost,
372 const double initial_gradient,
373 Summary* summary) const {
374 // All parameters should have been validated by the Solver, but as
375 // invalid values would produce crazy nonsense, hard check them here.
376 CHECK_GE(step_size_estimate, 0.0);
377 CHECK_GT(options().sufficient_decrease, 0.0);
378 CHECK_GT(options().sufficient_curvature_decrease,
379 options().sufficient_decrease);
380 CHECK_LT(options().sufficient_curvature_decrease, 1.0);
381 CHECK_GT(options().max_step_expansion, 1.0);
382
383 // Note initial_cost & initial_gradient are evaluated at step_size = 0,
384 // not step_size_estimate, which is our starting guess.
385 FunctionSample initial_position(0.0, initial_cost, initial_gradient);
386 initial_position.vector_x = options().function->position();
387 initial_position.vector_x_is_valid = true;
388 bool do_zoom_search = false;
389 // Important: The high/low in bracket_high & bracket_low refer to their
390 // _function_ values, not their step sizes i.e. it is _not_ required that
391 // bracket_low.x < bracket_high.x.
392 FunctionSample solution, bracket_low, bracket_high;
393
394 // Wolfe bracketing phase: Increases step_size until either it finds a point
395 // that satisfies the (strong) Wolfe conditions, or an interval that brackets
396 // step sizes which satisfy the conditions. From Nocedal & Wright [1] p61 the
397 // interval: (step_size_{k-1}, step_size_{k}) contains step lengths satisfying
398 // the strong Wolfe conditions if one of the following conditions are met:
399 //
400 // 1. step_size_{k} violates the sufficient decrease (Armijo) condition.
401 // 2. f(step_size_{k}) >= f(step_size_{k-1}).
402 // 3. f'(step_size_{k}) >= 0.
403 //
404 // Caveat: If f(step_size_{k}) is invalid, then step_size is reduced, ignoring
405 // this special case, step_size monotonically increases during bracketing.
406 if (!this->BracketingPhase(initial_position,
407 step_size_estimate,
408 &bracket_low,
409 &bracket_high,
410 &do_zoom_search,
411 summary)) {
412 // Failed to find either a valid point, a valid bracket satisfying the Wolfe
413 // conditions, or even a step size > minimum tolerance satisfying the Armijo
414 // condition.
415 return;
416 }
417
418 if (!do_zoom_search) {
419 // Either: Bracketing phase already found a point satisfying the strong
420 // Wolfe conditions, thus no Zoom required.
421 //
422 // Or: Bracketing failed to find a valid bracket or a point satisfying the
423 // strong Wolfe conditions within max_num_iterations, or whilst searching
424 // shrank the bracket width until it was below our minimum tolerance.
425 // As these are 'artificial' constraints, and we would otherwise fail to
426 // produce a valid point when ArmijoLineSearch would succeed, we return the
Austin Schuh3de38b02024-06-25 18:25:10 -0700427 // point with the lowest cost found thus far which satisfies the Armijo
Austin Schuh70cc9552019-01-21 19:46:48 -0800428 // condition (but not the Wolfe conditions).
429 summary->optimal_point = bracket_low;
430 summary->success = true;
431 return;
432 }
433
434 VLOG(3) << std::scientific << std::setprecision(kErrorMessageNumericPrecision)
Austin Schuh1d1e6ea2020-12-23 21:56:30 -0800435 << "Starting line search zoom phase with bracket_low: " << bracket_low
436 << ", bracket_high: " << bracket_high
Austin Schuh70cc9552019-01-21 19:46:48 -0800437 << ", bracket width: " << fabs(bracket_low.x - bracket_high.x)
438 << ", bracket abs delta cost: "
439 << fabs(bracket_low.value - bracket_high.value);
440
441 // Wolfe Zoom phase: Called when the Bracketing phase finds an interval of
442 // non-zero, finite width that should bracket step sizes which satisfy the
443 // (strong) Wolfe conditions (before finding a step size that satisfies the
444 // conditions). Zoom successively decreases the size of the interval until a
445 // step size which satisfies the Wolfe conditions is found. The interval is
446 // defined by bracket_low & bracket_high, which satisfy:
447 //
448 // 1. The interval bounded by step sizes: bracket_low.x & bracket_high.x
Austin Schuh3de38b02024-06-25 18:25:10 -0700449 // contains step sizes that satisfy the strong Wolfe conditions.
450 // 2. bracket_low.x is of all the step sizes evaluated *which satisfied the
Austin Schuh70cc9552019-01-21 19:46:48 -0800451 // Armijo sufficient decrease condition*, the one which generated the
452 // smallest function value, i.e. bracket_low.value <
453 // f(all other steps satisfying Armijo).
454 // - Note that this does _not_ (necessarily) mean that initially
455 // bracket_low.value < bracket_high.value (although this is typical)
456 // e.g. when bracket_low = initial_position, and bracket_high is the
457 // first sample, and which does not satisfy the Armijo condition,
458 // but still has bracket_high.value < initial_position.value.
459 // 3. bracket_high is chosen after bracket_low, s.t.
460 // bracket_low.gradient * (bracket_high.x - bracket_low.x) < 0.
Austin Schuh1d1e6ea2020-12-23 21:56:30 -0800461 if (!this->ZoomPhase(
462 initial_position, bracket_low, bracket_high, &solution, summary) &&
463 !solution.value_is_valid) {
Austin Schuh70cc9552019-01-21 19:46:48 -0800464 // Failed to find a valid point (given the specified decrease parameters)
465 // within the specified bracket.
466 return;
467 }
468 // Ensure that if we ran out of iterations whilst zooming the bracket, or
469 // shrank the bracket width to < tolerance and failed to find a point which
470 // satisfies the strong Wolfe curvature condition, that we return the point
471 // amongst those found thus far, which minimizes f() and satisfies the Armijo
472 // condition.
473
474 if (!solution.value_is_valid || solution.value > bracket_low.value) {
475 summary->optimal_point = bracket_low;
476 } else {
477 summary->optimal_point = solution;
478 }
479
480 summary->success = true;
481}
482
483// Returns true if either:
484//
485// A termination condition satisfying the (strong) Wolfe bracketing conditions
486// is found:
487//
488// - A valid point, defined as a bracket of zero width [zoom not required].
489// - A valid bracket (of width > tolerance), [zoom required].
490//
491// Or, searching was stopped due to an 'artificial' constraint, i.e. not
492// a condition imposed / required by the underlying algorithm, but instead an
493// engineering / implementation consideration. But a step which exceeds the
Austin Schuh3de38b02024-06-25 18:25:10 -0700494// minimum step size, and satisfies the Armijo condition was still found,
Austin Schuh70cc9552019-01-21 19:46:48 -0800495// and should thus be used [zoom not required].
496//
497// Returns false if no step size > minimum step size was found which
498// satisfies at least the Armijo condition.
Austin Schuh1d1e6ea2020-12-23 21:56:30 -0800499bool WolfeLineSearch::BracketingPhase(const FunctionSample& initial_position,
500 const double step_size_estimate,
501 FunctionSample* bracket_low,
502 FunctionSample* bracket_high,
503 bool* do_zoom_search,
504 Summary* summary) const {
Austin Schuh70cc9552019-01-21 19:46:48 -0800505 LineSearchFunction* function = options().function;
506
507 FunctionSample previous = initial_position;
508 FunctionSample current;
509
Austin Schuh1d1e6ea2020-12-23 21:56:30 -0800510 const double descent_direction_max_norm = function->DirectionInfinityNorm();
Austin Schuh70cc9552019-01-21 19:46:48 -0800511
512 *do_zoom_search = false;
513 *bracket_low = initial_position;
514
515 // As we require the gradient to evaluate the Wolfe condition, we always
516 // calculate it together with the value, irrespective of the interpolation
517 // type. As opposed to only calculating the gradient after the Armijo
Austin Schuh3de38b02024-06-25 18:25:10 -0700518 // condition is satisfied, as the computational saving from this approach
Austin Schuh70cc9552019-01-21 19:46:48 -0800519 // would be slight (perhaps even negative due to the extra call). Also,
520 // always calculating the value & gradient together protects against us
521 // reporting invalid solutions if the cost function returns slightly different
522 // function values when evaluated with / without gradients (due to numerical
523 // issues).
524 ++summary->num_function_evaluations;
525 ++summary->num_gradient_evaluations;
526 const bool kEvaluateGradient = true;
527 function->Evaluate(step_size_estimate, kEvaluateGradient, &current);
528 while (true) {
529 ++summary->num_iterations;
530
531 if (current.value_is_valid &&
Austin Schuh1d1e6ea2020-12-23 21:56:30 -0800532 (current.value > (initial_position.value +
533 options().sufficient_decrease *
534 initial_position.gradient * current.x) ||
Austin Schuh70cc9552019-01-21 19:46:48 -0800535 (previous.value_is_valid && current.value > previous.value))) {
536 // Bracket found: current step size violates Armijo sufficient decrease
537 // condition, or has stepped past an inflection point of f() relative to
538 // previous step size.
539 *do_zoom_search = true;
540 *bracket_low = previous;
541 *bracket_high = current;
542 VLOG(3) << std::scientific
543 << std::setprecision(kErrorMessageNumericPrecision)
544 << "Bracket found: current step (" << current.x
545 << ") violates Armijo sufficient condition, or has passed an "
546 << "inflection point of f() based on value.";
547 break;
548 }
549
550 if (current.value_is_valid &&
Austin Schuh1d1e6ea2020-12-23 21:56:30 -0800551 fabs(current.gradient) <= -options().sufficient_curvature_decrease *
552 initial_position.gradient) {
Austin Schuh70cc9552019-01-21 19:46:48 -0800553 // Current step size satisfies the strong Wolfe conditions, and is thus a
554 // valid termination point, therefore a Zoom not required.
555 *bracket_low = current;
556 *bracket_high = current;
557 VLOG(3) << std::scientific
558 << std::setprecision(kErrorMessageNumericPrecision)
559 << "Bracketing phase found step size: " << current.x
560 << ", satisfying strong Wolfe conditions, initial_position: "
561 << initial_position << ", current: " << current;
562 break;
563
564 } else if (current.value_is_valid && current.gradient >= 0) {
565 // Bracket found: current step size has stepped past an inflection point
566 // of f(), but Armijo sufficient decrease is still satisfied and
567 // f(current) is our best minimum thus far. Remember step size
568 // monotonically increases, thus previous_step_size < current_step_size
569 // even though f(previous) > f(current).
570 *do_zoom_search = true;
571 // Note inverse ordering from first bracket case.
572 *bracket_low = current;
573 *bracket_high = previous;
574 VLOG(3) << "Bracket found: current step (" << current.x
575 << ") satisfies Armijo, but has gradient >= 0, thus have passed "
576 << "an inflection point of f().";
577 break;
578
579 } else if (current.value_is_valid &&
Austin Schuh1d1e6ea2020-12-23 21:56:30 -0800580 fabs(current.x - previous.x) * descent_direction_max_norm <
581 options().min_step_size) {
Austin Schuh70cc9552019-01-21 19:46:48 -0800582 // We have shrunk the search bracket to a width less than our tolerance,
583 // and still not found either a point satisfying the strong Wolfe
584 // conditions, or a valid bracket containing such a point. Stop searching
585 // and set bracket_low to the size size amongst all those tested which
586 // minimizes f() and satisfies the Armijo condition.
Austin Schuh1d1e6ea2020-12-23 21:56:30 -0800587
588 if (!options().is_silent) {
589 LOG(WARNING) << "Line search failed: Wolfe bracketing phase shrank "
590 << "bracket width: " << fabs(current.x - previous.x)
591 << ", to < tolerance: " << options().min_step_size
592 << ", with descent_direction_max_norm: "
593 << descent_direction_max_norm << ", and failed to find "
594 << "a point satisfying the strong Wolfe conditions or a "
595 << "bracketing containing such a point. Accepting "
596 << "point found satisfying Armijo condition only, to "
597 << "allow continuation.";
598 }
Austin Schuh70cc9552019-01-21 19:46:48 -0800599 *bracket_low = current;
600 break;
601
602 } else if (summary->num_iterations >= options().max_num_iterations) {
603 // Check num iterations bound here so that we always evaluate the
604 // max_num_iterations-th iteration against all conditions, and
605 // then perform no additional (unused) evaluations.
Austin Schuh1d1e6ea2020-12-23 21:56:30 -0800606 summary->error = StringPrintf(
607 "Line search failed: Wolfe bracketing phase failed to "
608 "find a point satisfying strong Wolfe conditions, or a "
609 "bracket containing such a point within specified "
610 "max_num_iterations: %d",
611 options().max_num_iterations);
612 if (!options().is_silent) {
613 LOG(WARNING) << summary->error;
614 }
Austin Schuh70cc9552019-01-21 19:46:48 -0800615 // Ensure that bracket_low is always set to the step size amongst all
616 // those tested which minimizes f() and satisfies the Armijo condition
617 // when we terminate due to the 'artificial' max_num_iterations condition.
618 *bracket_low =
619 current.value_is_valid && current.value < bracket_low->value
Austin Schuh1d1e6ea2020-12-23 21:56:30 -0800620 ? current
621 : *bracket_low;
Austin Schuh70cc9552019-01-21 19:46:48 -0800622 break;
623 }
624 // Either: f(current) is invalid; or, f(current) is valid, but does not
625 // satisfy the strong Wolfe conditions itself, or the conditions for
626 // being a boundary of a bracket.
627
628 // If f(current) is valid, (but meets no criteria) expand the search by
629 // increasing the step size. If f(current) is invalid, contract the step
630 // size.
631 //
632 // In Nocedal & Wright [1] (p60), the step-size can only increase in the
Austin Schuh1d1e6ea2020-12-23 21:56:30 -0800633 // bracketing phase: step_size_{k+1} \in [step_size_k, step_size_k *
634 // factor]. However this does not account for the function returning invalid
635 // values which we support, in which case we need to contract the step size
636 // whilst ensuring that we do not invert the bracket, i.e, we require that:
Austin Schuh70cc9552019-01-21 19:46:48 -0800637 // step_size_{k-1} <= step_size_{k+1} < step_size_k.
638 const double min_step_size =
Austin Schuh1d1e6ea2020-12-23 21:56:30 -0800639 current.value_is_valid ? current.x : previous.x;
Austin Schuh70cc9552019-01-21 19:46:48 -0800640 const double max_step_size =
Austin Schuh1d1e6ea2020-12-23 21:56:30 -0800641 current.value_is_valid ? (current.x * options().max_step_expansion)
642 : current.x;
Austin Schuh70cc9552019-01-21 19:46:48 -0800643
644 // We are performing 2-point interpolation only here, but the API of
645 // InterpolatingPolynomialMinimizingStepSize() allows for up to
646 // 3-point interpolation, so pad call with a sample with an invalid
647 // value that will therefore be ignored.
648 const FunctionSample unused_previous;
649 DCHECK(!unused_previous.value_is_valid);
650 // Contracts step size if f(current) is not valid.
651 const double polynomial_minimization_start_time = WallTimeInSeconds();
Austin Schuh1d1e6ea2020-12-23 21:56:30 -0800652 const double step_size = this->InterpolatingPolynomialMinimizingStepSize(
653 options().interpolation_type,
654 previous,
655 unused_previous,
656 current,
657 min_step_size,
658 max_step_size);
Austin Schuh70cc9552019-01-21 19:46:48 -0800659 summary->polynomial_minimization_time_in_seconds +=
660 (WallTimeInSeconds() - polynomial_minimization_start_time);
661 if (step_size * descent_direction_max_norm < options().min_step_size) {
Austin Schuh1d1e6ea2020-12-23 21:56:30 -0800662 summary->error = StringPrintf(
663 "Line search failed: step_size too small: %.5e "
664 "with descent_direction_max_norm: %.5e",
665 step_size,
666 descent_direction_max_norm);
667 if (!options().is_silent) {
668 LOG(WARNING) << summary->error;
669 }
Austin Schuh70cc9552019-01-21 19:46:48 -0800670 return false;
671 }
672
673 // Only advance the lower boundary (in x) of the bracket if f(current)
674 // is valid such that we can support contracting the step size when
675 // f(current) is invalid without risking inverting the bracket in x, i.e.
676 // prevent previous.x > current.x.
677 previous = current.value_is_valid ? current : previous;
678 ++summary->num_function_evaluations;
679 ++summary->num_gradient_evaluations;
680 function->Evaluate(step_size, kEvaluateGradient, &current);
681 }
682
683 // Ensure that even if a valid bracket was found, we will only mark a zoom
684 // as required if the bracket's width is greater than our minimum tolerance.
685 if (*do_zoom_search &&
Austin Schuh1d1e6ea2020-12-23 21:56:30 -0800686 fabs(bracket_high->x - bracket_low->x) * descent_direction_max_norm <
687 options().min_step_size) {
Austin Schuh70cc9552019-01-21 19:46:48 -0800688 *do_zoom_search = false;
689 }
690
691 return true;
692}
693
694// Returns true iff solution satisfies the strong Wolfe conditions. Otherwise,
695// on return false, if we stopped searching due to the 'artificial' condition of
696// reaching max_num_iterations, solution is the step size amongst all those
697// tested, which satisfied the Armijo decrease condition and minimized f().
698bool WolfeLineSearch::ZoomPhase(const FunctionSample& initial_position,
699 FunctionSample bracket_low,
700 FunctionSample bracket_high,
701 FunctionSample* solution,
702 Summary* summary) const {
703 LineSearchFunction* function = options().function;
704
705 CHECK(bracket_low.value_is_valid && bracket_low.gradient_is_valid)
706 << std::scientific << std::setprecision(kErrorMessageNumericPrecision)
707 << "Ceres bug: f_low input to Wolfe Zoom invalid, please contact "
708 << "the developers!, initial_position: " << initial_position
Austin Schuh1d1e6ea2020-12-23 21:56:30 -0800709 << ", bracket_low: " << bracket_low << ", bracket_high: " << bracket_high;
Austin Schuh70cc9552019-01-21 19:46:48 -0800710 // We do not require bracket_high.gradient_is_valid as the gradient condition
711 // for a valid bracket is only dependent upon bracket_low.gradient, and
712 // in order to minimize jacobian evaluations, bracket_high.gradient may
713 // not have been calculated (if bracket_high.value does not satisfy the
714 // Armijo sufficient decrease condition and interpolation method does not
715 // require it).
716 //
717 // We also do not require that: bracket_low.value < bracket_high.value,
718 // although this is typical. This is to deal with the case when
719 // bracket_low = initial_position, bracket_high is the first sample,
720 // and bracket_high does not satisfy the Armijo condition, but still has
721 // bracket_high.value < initial_position.value.
722 CHECK(bracket_high.value_is_valid)
723 << std::scientific << std::setprecision(kErrorMessageNumericPrecision)
724 << "Ceres bug: f_high input to Wolfe Zoom invalid, please "
725 << "contact the developers!, initial_position: " << initial_position
Austin Schuh1d1e6ea2020-12-23 21:56:30 -0800726 << ", bracket_low: " << bracket_low << ", bracket_high: " << bracket_high;
Austin Schuh70cc9552019-01-21 19:46:48 -0800727
728 if (bracket_low.gradient * (bracket_high.x - bracket_low.x) >= 0) {
729 // The third condition for a valid initial bracket:
730 //
731 // 3. bracket_high is chosen after bracket_low, s.t.
732 // bracket_low.gradient * (bracket_high.x - bracket_low.x) < 0.
733 //
734 // is not satisfied. As this can happen when the users' cost function
735 // returns inconsistent gradient values relative to the function values,
736 // we do not CHECK_LT(), but we do stop processing and return an invalid
737 // value.
Austin Schuh1d1e6ea2020-12-23 21:56:30 -0800738 summary->error = StringPrintf(
739 "Line search failed: Wolfe zoom phase passed a bracket "
740 "which does not satisfy: bracket_low.gradient * "
741 "(bracket_high.x - bracket_low.x) < 0 [%.8e !< 0] "
742 "with initial_position: %s, bracket_low: %s, bracket_high:"
743 " %s, the most likely cause of which is the cost function "
744 "returning inconsistent gradient & function values.",
745 bracket_low.gradient * (bracket_high.x - bracket_low.x),
746 initial_position.ToDebugString().c_str(),
747 bracket_low.ToDebugString().c_str(),
748 bracket_high.ToDebugString().c_str());
749 if (!options().is_silent) {
750 LOG(WARNING) << summary->error;
751 }
Austin Schuh70cc9552019-01-21 19:46:48 -0800752 solution->value_is_valid = false;
753 return false;
754 }
755
756 const int num_bracketing_iterations = summary->num_iterations;
757 const double descent_direction_max_norm = function->DirectionInfinityNorm();
758
759 while (true) {
760 // Set solution to bracket_low, as it is our best step size (smallest f())
761 // found thus far and satisfies the Armijo condition, even though it does
762 // not satisfy the Wolfe condition.
763 *solution = bracket_low;
764 if (summary->num_iterations >= options().max_num_iterations) {
Austin Schuh1d1e6ea2020-12-23 21:56:30 -0800765 summary->error = StringPrintf(
766 "Line search failed: Wolfe zoom phase failed to "
767 "find a point satisfying strong Wolfe conditions "
768 "within specified max_num_iterations: %d, "
769 "(num iterations taken for bracketing: %d).",
770 options().max_num_iterations,
771 num_bracketing_iterations);
772 if (!options().is_silent) {
773 LOG(WARNING) << summary->error;
774 }
Austin Schuh70cc9552019-01-21 19:46:48 -0800775 return false;
776 }
Austin Schuh1d1e6ea2020-12-23 21:56:30 -0800777 if (fabs(bracket_high.x - bracket_low.x) * descent_direction_max_norm <
778 options().min_step_size) {
Austin Schuh70cc9552019-01-21 19:46:48 -0800779 // Bracket width has been reduced below tolerance, and no point satisfying
780 // the strong Wolfe conditions has been found.
Austin Schuh1d1e6ea2020-12-23 21:56:30 -0800781 summary->error = StringPrintf(
782 "Line search failed: Wolfe zoom bracket width: %.5e "
783 "too small with descent_direction_max_norm: %.5e.",
784 fabs(bracket_high.x - bracket_low.x),
785 descent_direction_max_norm);
786 if (!options().is_silent) {
787 LOG(WARNING) << summary->error;
788 }
Austin Schuh70cc9552019-01-21 19:46:48 -0800789 return false;
790 }
791
792 ++summary->num_iterations;
793 // Polynomial interpolation requires inputs ordered according to step size,
794 // not f(step size).
795 const FunctionSample& lower_bound_step =
796 bracket_low.x < bracket_high.x ? bracket_low : bracket_high;
797 const FunctionSample& upper_bound_step =
798 bracket_low.x < bracket_high.x ? bracket_high : bracket_low;
799 // We are performing 2-point interpolation only here, but the API of
800 // InterpolatingPolynomialMinimizingStepSize() allows for up to
801 // 3-point interpolation, so pad call with a sample with an invalid
802 // value that will therefore be ignored.
803 const FunctionSample unused_previous;
804 DCHECK(!unused_previous.value_is_valid);
805 const double polynomial_minimization_start_time = WallTimeInSeconds();
Austin Schuh1d1e6ea2020-12-23 21:56:30 -0800806 const double step_size = this->InterpolatingPolynomialMinimizingStepSize(
807 options().interpolation_type,
808 lower_bound_step,
809 unused_previous,
810 upper_bound_step,
811 lower_bound_step.x,
812 upper_bound_step.x);
Austin Schuh70cc9552019-01-21 19:46:48 -0800813 summary->polynomial_minimization_time_in_seconds +=
814 (WallTimeInSeconds() - polynomial_minimization_start_time);
815 // No check on magnitude of step size being too small here as it is
816 // lower-bounded by the initial bracket start point, which was valid.
817 //
818 // As we require the gradient to evaluate the Wolfe condition, we always
819 // calculate it together with the value, irrespective of the interpolation
820 // type. As opposed to only calculating the gradient after the Armijo
Austin Schuh3de38b02024-06-25 18:25:10 -0700821 // condition is satisfied, as the computational saving from this approach
Austin Schuh70cc9552019-01-21 19:46:48 -0800822 // would be slight (perhaps even negative due to the extra call). Also,
823 // always calculating the value & gradient together protects against us
824 // reporting invalid solutions if the cost function returns slightly
825 // different function values when evaluated with / without gradients (due
826 // to numerical issues).
827 ++summary->num_function_evaluations;
828 ++summary->num_gradient_evaluations;
829 const bool kEvaluateGradient = true;
830 function->Evaluate(step_size, kEvaluateGradient, solution);
831 if (!solution->value_is_valid || !solution->gradient_is_valid) {
Austin Schuh1d1e6ea2020-12-23 21:56:30 -0800832 summary->error = StringPrintf(
833 "Line search failed: Wolfe Zoom phase found "
834 "step_size: %.5e, for which function is invalid, "
835 "between low_step: %.5e and high_step: %.5e "
836 "at which function is valid.",
837 solution->x,
838 bracket_low.x,
839 bracket_high.x);
840 if (!options().is_silent) {
841 LOG(WARNING) << summary->error;
842 }
Austin Schuh70cc9552019-01-21 19:46:48 -0800843 return false;
844 }
845
846 VLOG(3) << "Zoom iteration: "
847 << summary->num_iterations - num_bracketing_iterations
848 << ", bracket_low: " << bracket_low
849 << ", bracket_high: " << bracket_high
850 << ", minimizing solution: " << *solution;
851
Austin Schuh1d1e6ea2020-12-23 21:56:30 -0800852 if ((solution->value > (initial_position.value +
853 options().sufficient_decrease *
854 initial_position.gradient * solution->x)) ||
Austin Schuh70cc9552019-01-21 19:46:48 -0800855 (solution->value >= bracket_low.value)) {
856 // Armijo sufficient decrease not satisfied, or not better
857 // than current lowest sample, use as new upper bound.
858 bracket_high = *solution;
859 continue;
860 }
861
862 // Armijo sufficient decrease satisfied, check strong Wolfe condition.
863 if (fabs(solution->gradient) <=
864 -options().sufficient_curvature_decrease * initial_position.gradient) {
865 // Found a valid termination point satisfying strong Wolfe conditions.
866 VLOG(3) << std::scientific
867 << std::setprecision(kErrorMessageNumericPrecision)
868 << "Zoom phase found step size: " << solution->x
869 << ", satisfying strong Wolfe conditions.";
870 break;
871
872 } else if (solution->gradient * (bracket_high.x - bracket_low.x) >= 0) {
873 bracket_high = bracket_low;
874 }
875
876 bracket_low = *solution;
877 }
878 // Solution contains a valid point which satisfies the strong Wolfe
879 // conditions.
880 return true;
881}
882
Austin Schuh3de38b02024-06-25 18:25:10 -0700883} // namespace ceres::internal