blob: ba28bec1680fdb03354e32ab911bed933dabf591 [file] [log] [blame]
Austin Schuh70cc9552019-01-21 19:46:48 -08001// Ceres Solver - A fast non-linear least squares minimizer
Austin Schuh3de38b02024-06-25 18:25:10 -07002// Copyright 2023 Google Inc. All rights reserved.
Austin Schuh70cc9552019-01-21 19:46:48 -08003// http://ceres-solver.org/
4//
5// Redistribution and use in source and binary forms, with or without
6// modification, are permitted provided that the following conditions are met:
7//
8// * Redistributions of source code must retain the above copyright notice,
9// this list of conditions and the following disclaimer.
10// * Redistributions in binary form must reproduce the above copyright notice,
11// this list of conditions and the following disclaimer in the documentation
12// and/or other materials provided with the distribution.
13// * Neither the name of Google Inc. nor the names of its contributors may be
14// used to endorse or promote products derived from this software without
15// specific prior written permission.
16//
17// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27// POSSIBILITY OF SUCH DAMAGE.
28//
29// Author: sameeragarwal@google.com (Sameer Agarwal)
30// mierle@gmail.com (Keir Mierle)
31// tbennun@gmail.com (Tal Ben-Nun)
32//
33// Finite differencing routines used by NumericDiffCostFunction.
34
35#ifndef CERES_PUBLIC_INTERNAL_NUMERIC_DIFF_H_
36#define CERES_PUBLIC_INTERNAL_NUMERIC_DIFF_H_
37
38#include <cstring>
Austin Schuh1d1e6ea2020-12-23 21:56:30 -080039#include <utility>
Austin Schuh70cc9552019-01-21 19:46:48 -080040
41#include "Eigen/Dense"
42#include "Eigen/StdVector"
43#include "ceres/cost_function.h"
44#include "ceres/internal/fixed_array.h"
45#include "ceres/internal/variadic_evaluate.h"
46#include "ceres/numeric_diff_options.h"
47#include "ceres/types.h"
48#include "glog/logging.h"
49
Austin Schuh3de38b02024-06-25 18:25:10 -070050namespace ceres::internal {
Austin Schuh70cc9552019-01-21 19:46:48 -080051
52// This is split from the main class because C++ doesn't allow partial template
53// specializations for member functions. The alternative is to repeat the main
54// class for differing numbers of parameters, which is also unfortunate.
Austin Schuh1d1e6ea2020-12-23 21:56:30 -080055template <typename CostFunctor,
56 NumericDiffMethodType kMethod,
57 int kNumResiduals,
58 typename ParameterDims,
59 int kParameterBlock,
Austin Schuh70cc9552019-01-21 19:46:48 -080060 int kParameterBlockSize>
61struct NumericDiff {
62 // Mutates parameters but must restore them before return.
63 static bool EvaluateJacobianForParameterBlock(
64 const CostFunctor* functor,
65 const double* residuals_at_eval_point,
66 const NumericDiffOptions& options,
67 int num_residuals,
68 int parameter_block_index,
69 int parameter_block_size,
Austin Schuh1d1e6ea2020-12-23 21:56:30 -080070 double** parameters,
71 double* jacobian) {
72 using Eigen::ColMajor;
Austin Schuh70cc9552019-01-21 19:46:48 -080073 using Eigen::Map;
74 using Eigen::Matrix;
75 using Eigen::RowMajor;
Austin Schuh70cc9552019-01-21 19:46:48 -080076
77 DCHECK(jacobian);
78
79 const int num_residuals_internal =
80 (kNumResiduals != ceres::DYNAMIC ? kNumResiduals : num_residuals);
81 const int parameter_block_index_internal =
Austin Schuh1d1e6ea2020-12-23 21:56:30 -080082 (kParameterBlock != ceres::DYNAMIC ? kParameterBlock
83 : parameter_block_index);
Austin Schuh70cc9552019-01-21 19:46:48 -080084 const int parameter_block_size_internal =
Austin Schuh1d1e6ea2020-12-23 21:56:30 -080085 (kParameterBlockSize != ceres::DYNAMIC ? kParameterBlockSize
86 : parameter_block_size);
Austin Schuh70cc9552019-01-21 19:46:48 -080087
Austin Schuh3de38b02024-06-25 18:25:10 -070088 using ResidualVector = Matrix<double, kNumResiduals, 1>;
89 using ParameterVector = Matrix<double, kParameterBlockSize, 1>;
Austin Schuh70cc9552019-01-21 19:46:48 -080090
91 // The convoluted reasoning for choosing the Row/Column major
92 // ordering of the matrix is an artifact of the restrictions in
93 // Eigen that prevent it from creating RowMajor matrices with a
94 // single column. In these cases, we ask for a ColMajor matrix.
Austin Schuh3de38b02024-06-25 18:25:10 -070095 using JacobianMatrix =
96 Matrix<double,
97 kNumResiduals,
98 kParameterBlockSize,
99 (kParameterBlockSize == 1) ? ColMajor : RowMajor>;
Austin Schuh70cc9552019-01-21 19:46:48 -0800100
Austin Schuh1d1e6ea2020-12-23 21:56:30 -0800101 Map<JacobianMatrix> parameter_jacobian(
102 jacobian, num_residuals_internal, parameter_block_size_internal);
Austin Schuh70cc9552019-01-21 19:46:48 -0800103
104 Map<ParameterVector> x_plus_delta(
105 parameters[parameter_block_index_internal],
106 parameter_block_size_internal);
107 ParameterVector x(x_plus_delta);
Austin Schuh1d1e6ea2020-12-23 21:56:30 -0800108 ParameterVector step_size =
109 x.array().abs() * ((kMethod == RIDDERS)
110 ? options.ridders_relative_initial_step_size
111 : options.relative_step_size);
Austin Schuh70cc9552019-01-21 19:46:48 -0800112
113 // It is not a good idea to make the step size arbitrarily
114 // small. This will lead to problems with round off and numerical
115 // instability when dividing by the step size. The general
116 // recommendation is to not go down below sqrt(epsilon).
117 double min_step_size = std::sqrt(std::numeric_limits<double>::epsilon());
118
119 // For Ridders' method, the initial step size is required to be large,
120 // thus ridders_relative_initial_step_size is used.
121 if (kMethod == RIDDERS) {
Austin Schuh1d1e6ea2020-12-23 21:56:30 -0800122 min_step_size =
Austin Schuh3de38b02024-06-25 18:25:10 -0700123 (std::max)(min_step_size, options.ridders_relative_initial_step_size);
Austin Schuh70cc9552019-01-21 19:46:48 -0800124 }
125
126 // For each parameter in the parameter block, use finite differences to
127 // compute the derivative for that parameter.
128 FixedArray<double> temp_residual_array(num_residuals_internal);
129 FixedArray<double> residual_array(num_residuals_internal);
Austin Schuh1d1e6ea2020-12-23 21:56:30 -0800130 Map<ResidualVector> residuals(residual_array.data(),
Austin Schuh70cc9552019-01-21 19:46:48 -0800131 num_residuals_internal);
132
133 for (int j = 0; j < parameter_block_size_internal; ++j) {
Austin Schuh3de38b02024-06-25 18:25:10 -0700134 const double delta = (std::max)(min_step_size, step_size(j));
Austin Schuh70cc9552019-01-21 19:46:48 -0800135
136 if (kMethod == RIDDERS) {
Austin Schuh1d1e6ea2020-12-23 21:56:30 -0800137 if (!EvaluateRiddersJacobianColumn(functor,
138 j,
139 delta,
Austin Schuh70cc9552019-01-21 19:46:48 -0800140 options,
141 num_residuals_internal,
142 parameter_block_size_internal,
143 x.data(),
144 residuals_at_eval_point,
145 parameters,
146 x_plus_delta.data(),
Austin Schuh1d1e6ea2020-12-23 21:56:30 -0800147 temp_residual_array.data(),
148 residual_array.data())) {
Austin Schuh70cc9552019-01-21 19:46:48 -0800149 return false;
150 }
151 } else {
Austin Schuh1d1e6ea2020-12-23 21:56:30 -0800152 if (!EvaluateJacobianColumn(functor,
153 j,
154 delta,
Austin Schuh70cc9552019-01-21 19:46:48 -0800155 num_residuals_internal,
156 parameter_block_size_internal,
157 x.data(),
158 residuals_at_eval_point,
159 parameters,
160 x_plus_delta.data(),
Austin Schuh1d1e6ea2020-12-23 21:56:30 -0800161 temp_residual_array.data(),
162 residual_array.data())) {
Austin Schuh70cc9552019-01-21 19:46:48 -0800163 return false;
164 }
165 }
166
167 parameter_jacobian.col(j).matrix() = residuals;
168 }
169 return true;
170 }
171
172 static bool EvaluateJacobianColumn(const CostFunctor* functor,
173 int parameter_index,
174 double delta,
175 int num_residuals,
176 int parameter_block_size,
177 const double* x_ptr,
178 const double* residuals_at_eval_point,
179 double** parameters,
180 double* x_plus_delta_ptr,
181 double* temp_residuals_ptr,
182 double* residuals_ptr) {
183 using Eigen::Map;
184 using Eigen::Matrix;
185
Austin Schuh3de38b02024-06-25 18:25:10 -0700186 using ResidualVector = Matrix<double, kNumResiduals, 1>;
187 using ParameterVector = Matrix<double, kParameterBlockSize, 1>;
Austin Schuh70cc9552019-01-21 19:46:48 -0800188
189 Map<const ParameterVector> x(x_ptr, parameter_block_size);
Austin Schuh1d1e6ea2020-12-23 21:56:30 -0800190 Map<ParameterVector> x_plus_delta(x_plus_delta_ptr, parameter_block_size);
Austin Schuh70cc9552019-01-21 19:46:48 -0800191
192 Map<ResidualVector> residuals(residuals_ptr, num_residuals);
193 Map<ResidualVector> temp_residuals(temp_residuals_ptr, num_residuals);
194
195 // Mutate 1 element at a time and then restore.
196 x_plus_delta(parameter_index) = x(parameter_index) + delta;
197
Austin Schuh1d1e6ea2020-12-23 21:56:30 -0800198 if (!VariadicEvaluate<ParameterDims>(
199 *functor, parameters, residuals.data())) {
Austin Schuh70cc9552019-01-21 19:46:48 -0800200 return false;
201 }
202
203 // Compute this column of the jacobian in 3 steps:
204 // 1. Store residuals for the forward part.
205 // 2. Subtract residuals for the backward (or 0) part.
206 // 3. Divide out the run.
207 double one_over_delta = 1.0 / delta;
208 if (kMethod == CENTRAL || kMethod == RIDDERS) {
209 // Compute the function on the other side of x(parameter_index).
210 x_plus_delta(parameter_index) = x(parameter_index) - delta;
211
Austin Schuh1d1e6ea2020-12-23 21:56:30 -0800212 if (!VariadicEvaluate<ParameterDims>(
213 *functor, parameters, temp_residuals.data())) {
Austin Schuh70cc9552019-01-21 19:46:48 -0800214 return false;
215 }
216
217 residuals -= temp_residuals;
218 one_over_delta /= 2;
219 } else {
220 // Forward difference only; reuse existing residuals evaluation.
221 residuals -=
Austin Schuh1d1e6ea2020-12-23 21:56:30 -0800222 Map<const ResidualVector>(residuals_at_eval_point, num_residuals);
Austin Schuh70cc9552019-01-21 19:46:48 -0800223 }
224
225 // Restore x_plus_delta.
226 x_plus_delta(parameter_index) = x(parameter_index);
227
228 // Divide out the run to get slope.
229 residuals *= one_over_delta;
230
231 return true;
232 }
233
234 // This numeric difference implementation uses adaptive differentiation
235 // on the parameters to obtain the Jacobian matrix. The adaptive algorithm
236 // is based on Ridders' method for adaptive differentiation, which creates
237 // a Romberg tableau from varying step sizes and extrapolates the
238 // intermediate results to obtain the current computational error.
239 //
240 // References:
241 // C.J.F. Ridders, Accurate computation of F'(x) and F'(x) F"(x), Advances
242 // in Engineering Software (1978), Volume 4, Issue 2, April 1982,
243 // Pages 75-76, ISSN 0141-1195,
244 // http://dx.doi.org/10.1016/S0141-1195(82)80057-0.
245 static bool EvaluateRiddersJacobianColumn(
246 const CostFunctor* functor,
247 int parameter_index,
248 double delta,
249 const NumericDiffOptions& options,
250 int num_residuals,
251 int parameter_block_size,
252 const double* x_ptr,
253 const double* residuals_at_eval_point,
254 double** parameters,
255 double* x_plus_delta_ptr,
256 double* temp_residuals_ptr,
257 double* residuals_ptr) {
Austin Schuh1d1e6ea2020-12-23 21:56:30 -0800258 using Eigen::aligned_allocator;
Austin Schuh70cc9552019-01-21 19:46:48 -0800259 using Eigen::Map;
260 using Eigen::Matrix;
Austin Schuh70cc9552019-01-21 19:46:48 -0800261
Austin Schuh3de38b02024-06-25 18:25:10 -0700262 using ResidualVector = Matrix<double, kNumResiduals, 1>;
263 using ResidualCandidateMatrix =
264 Matrix<double, kNumResiduals, Eigen::Dynamic>;
265 using ParameterVector = Matrix<double, kParameterBlockSize, 1>;
Austin Schuh70cc9552019-01-21 19:46:48 -0800266
267 Map<const ParameterVector> x(x_ptr, parameter_block_size);
Austin Schuh1d1e6ea2020-12-23 21:56:30 -0800268 Map<ParameterVector> x_plus_delta(x_plus_delta_ptr, parameter_block_size);
Austin Schuh70cc9552019-01-21 19:46:48 -0800269
270 Map<ResidualVector> residuals(residuals_ptr, num_residuals);
271 Map<ResidualVector> temp_residuals(temp_residuals_ptr, num_residuals);
272
273 // In order for the algorithm to converge, the step size should be
274 // initialized to a value that is large enough to produce a significant
275 // change in the function.
276 // As the derivative is estimated, the step size decreases.
277 // By default, the step sizes are chosen so that the middle column
278 // of the Romberg tableau uses the input delta.
Austin Schuh1d1e6ea2020-12-23 21:56:30 -0800279 double current_step_size =
280 delta * pow(options.ridders_step_shrink_factor,
281 options.max_num_ridders_extrapolations / 2);
Austin Schuh70cc9552019-01-21 19:46:48 -0800282
283 // Double-buffering temporary differential candidate vectors
284 // from previous step size.
285 ResidualCandidateMatrix stepsize_candidates_a(
Austin Schuh1d1e6ea2020-12-23 21:56:30 -0800286 num_residuals, options.max_num_ridders_extrapolations);
Austin Schuh70cc9552019-01-21 19:46:48 -0800287 ResidualCandidateMatrix stepsize_candidates_b(
Austin Schuh1d1e6ea2020-12-23 21:56:30 -0800288 num_residuals, options.max_num_ridders_extrapolations);
Austin Schuh70cc9552019-01-21 19:46:48 -0800289 ResidualCandidateMatrix* current_candidates = &stepsize_candidates_a;
290 ResidualCandidateMatrix* previous_candidates = &stepsize_candidates_b;
291
292 // Represents the computational error of the derivative. This variable is
293 // initially set to a large value, and is set to the difference between
294 // current and previous finite difference extrapolations.
295 // norm_error is supposed to decrease as the finite difference tableau
296 // generation progresses, serving both as an estimate for differentiation
297 // error and as a measure of differentiation numerical stability.
Austin Schuh3de38b02024-06-25 18:25:10 -0700298 double norm_error = (std::numeric_limits<double>::max)();
Austin Schuh70cc9552019-01-21 19:46:48 -0800299
300 // Loop over decreasing step sizes until:
301 // 1. Error is smaller than a given value (ridders_epsilon),
302 // 2. Maximal order of extrapolation reached, or
303 // 3. Extrapolation becomes numerically unstable.
304 for (int i = 0; i < options.max_num_ridders_extrapolations; ++i) {
305 // Compute the numerical derivative at this step size.
Austin Schuh1d1e6ea2020-12-23 21:56:30 -0800306 if (!EvaluateJacobianColumn(functor,
307 parameter_index,
308 current_step_size,
Austin Schuh70cc9552019-01-21 19:46:48 -0800309 num_residuals,
310 parameter_block_size,
311 x.data(),
312 residuals_at_eval_point,
313 parameters,
314 x_plus_delta.data(),
315 temp_residuals.data(),
316 current_candidates->col(0).data())) {
317 // Something went wrong; bail.
318 return false;
319 }
320
321 // Store initial results.
322 if (i == 0) {
323 residuals = current_candidates->col(0);
324 }
325
326 // Shrink differentiation step size.
327 current_step_size /= options.ridders_step_shrink_factor;
328
329 // Extrapolation factor for Richardson acceleration method (see below).
330 double richardson_factor = options.ridders_step_shrink_factor *
Austin Schuh1d1e6ea2020-12-23 21:56:30 -0800331 options.ridders_step_shrink_factor;
Austin Schuh70cc9552019-01-21 19:46:48 -0800332 for (int k = 1; k <= i; ++k) {
333 // Extrapolate the various orders of finite differences using
334 // the Richardson acceleration method.
335 current_candidates->col(k) =
336 (richardson_factor * current_candidates->col(k - 1) -
Austin Schuh1d1e6ea2020-12-23 21:56:30 -0800337 previous_candidates->col(k - 1)) /
338 (richardson_factor - 1.0);
Austin Schuh70cc9552019-01-21 19:46:48 -0800339
340 richardson_factor *= options.ridders_step_shrink_factor *
Austin Schuh1d1e6ea2020-12-23 21:56:30 -0800341 options.ridders_step_shrink_factor;
Austin Schuh70cc9552019-01-21 19:46:48 -0800342
343 // Compute the difference between the previous value and the current.
Austin Schuh3de38b02024-06-25 18:25:10 -0700344 double candidate_error = (std::max)(
Austin Schuh1d1e6ea2020-12-23 21:56:30 -0800345 (current_candidates->col(k) - current_candidates->col(k - 1))
346 .norm(),
347 (current_candidates->col(k) - previous_candidates->col(k - 1))
348 .norm());
Austin Schuh70cc9552019-01-21 19:46:48 -0800349
350 // If the error has decreased, update results.
351 if (candidate_error <= norm_error) {
352 norm_error = candidate_error;
353 residuals = current_candidates->col(k);
354
355 // If the error is small enough, stop.
356 if (norm_error < options.ridders_epsilon) {
357 break;
358 }
359 }
360 }
361
362 // After breaking out of the inner loop, declare convergence.
363 if (norm_error < options.ridders_epsilon) {
364 break;
365 }
366
367 // Check to see if the current gradient estimate is numerically unstable.
368 // If so, bail out and return the last stable result.
369 if (i > 0) {
Austin Schuh1d1e6ea2020-12-23 21:56:30 -0800370 double tableau_error =
371 (current_candidates->col(i) - previous_candidates->col(i - 1))
372 .norm();
Austin Schuh70cc9552019-01-21 19:46:48 -0800373
374 // Compare current error to the chosen candidate's error.
375 if (tableau_error >= 2 * norm_error) {
376 break;
377 }
378 }
379
380 std::swap(current_candidates, previous_candidates);
381 }
382 return true;
383 }
384};
385
386// This function calls NumericDiff<...>::EvaluateJacobianForParameterBlock for
387// each parameter block.
388//
389// Example:
390// A call to
391// EvaluateJacobianForParameterBlocks<StaticParameterDims<2, 3>>(
392// functor,
393// residuals_at_eval_point,
394// options,
395// num_residuals,
396// parameters,
397// jacobians);
398// will result in the following calls to
399// NumericDiff<...>::EvaluateJacobianForParameterBlock:
400//
401// if (jacobians[0] != nullptr) {
402// if (!NumericDiff<
403// CostFunctor,
404// method,
405// kNumResiduals,
406// StaticParameterDims<2, 3>,
407// 0,
408// 2>::EvaluateJacobianForParameterBlock(functor,
409// residuals_at_eval_point,
410// options,
411// num_residuals,
412// 0,
413// 2,
414// parameters,
415// jacobians[0])) {
416// return false;
417// }
418// }
419// if (jacobians[1] != nullptr) {
420// if (!NumericDiff<
421// CostFunctor,
422// method,
423// kNumResiduals,
424// StaticParameterDims<2, 3>,
425// 1,
426// 3>::EvaluateJacobianForParameterBlock(functor,
427// residuals_at_eval_point,
428// options,
429// num_residuals,
430// 1,
431// 3,
432// parameters,
433// jacobians[1])) {
434// return false;
435// }
436// }
437template <typename ParameterDims,
438 typename Parameters = typename ParameterDims::Parameters,
439 int ParameterIdx = 0>
440struct EvaluateJacobianForParameterBlocks;
441
442template <typename ParameterDims, int N, int... Ns, int ParameterIdx>
443struct EvaluateJacobianForParameterBlocks<ParameterDims,
Austin Schuh1d1e6ea2020-12-23 21:56:30 -0800444 std::integer_sequence<int, N, Ns...>,
Austin Schuh70cc9552019-01-21 19:46:48 -0800445 ParameterIdx> {
446 template <NumericDiffMethodType method,
447 int kNumResiduals,
448 typename CostFunctor>
449 static bool Apply(const CostFunctor* functor,
450 const double* residuals_at_eval_point,
451 const NumericDiffOptions& options,
452 int num_residuals,
453 double** parameters,
454 double** jacobians) {
455 if (jacobians[ParameterIdx] != nullptr) {
456 if (!NumericDiff<
457 CostFunctor,
458 method,
459 kNumResiduals,
460 ParameterDims,
461 ParameterIdx,
462 N>::EvaluateJacobianForParameterBlock(functor,
463 residuals_at_eval_point,
464 options,
465 num_residuals,
466 ParameterIdx,
467 N,
468 parameters,
469 jacobians[ParameterIdx])) {
470 return false;
471 }
472 }
473
474 return EvaluateJacobianForParameterBlocks<ParameterDims,
Austin Schuh1d1e6ea2020-12-23 21:56:30 -0800475 std::integer_sequence<int, Ns...>,
Austin Schuh70cc9552019-01-21 19:46:48 -0800476 ParameterIdx + 1>::
477 template Apply<method, kNumResiduals>(functor,
478 residuals_at_eval_point,
479 options,
480 num_residuals,
481 parameters,
482 jacobians);
483 }
484};
485
486// End of 'recursion'. Nothing more to do.
487template <typename ParameterDims, int ParameterIdx>
Austin Schuh1d1e6ea2020-12-23 21:56:30 -0800488struct EvaluateJacobianForParameterBlocks<ParameterDims,
489 std::integer_sequence<int>,
Austin Schuh70cc9552019-01-21 19:46:48 -0800490 ParameterIdx> {
Austin Schuh1d1e6ea2020-12-23 21:56:30 -0800491 template <NumericDiffMethodType method,
492 int kNumResiduals,
Austin Schuh70cc9552019-01-21 19:46:48 -0800493 typename CostFunctor>
494 static bool Apply(const CostFunctor* /* NOT USED*/,
495 const double* /* NOT USED*/,
Austin Schuh1d1e6ea2020-12-23 21:56:30 -0800496 const NumericDiffOptions& /* NOT USED*/,
497 int /* NOT USED*/,
498 double** /* NOT USED*/,
499 double** /* NOT USED*/) {
Austin Schuh70cc9552019-01-21 19:46:48 -0800500 return true;
501 }
502};
503
Austin Schuh3de38b02024-06-25 18:25:10 -0700504} // namespace ceres::internal
Austin Schuh70cc9552019-01-21 19:46:48 -0800505
506#endif // CERES_PUBLIC_INTERNAL_NUMERIC_DIFF_H_