blob: 638c0c9e82fb43e5e587f6ec0756d508f172fb6d [file] [log] [blame]
Austin Schuh70cc9552019-01-21 19:46:48 -08001// Ceres Solver - A fast non-linear least squares minimizer
2// Copyright 2015 Google Inc. All rights reserved.
3// http://ceres-solver.org/
4//
5// Redistribution and use in source and binary forms, with or without
6// modification, are permitted provided that the following conditions are met:
7//
8// * Redistributions of source code must retain the above copyright notice,
9// this list of conditions and the following disclaimer.
10// * Redistributions in binary form must reproduce the above copyright notice,
11// this list of conditions and the following disclaimer in the documentation
12// and/or other materials provided with the distribution.
13// * Neither the name of Google Inc. nor the names of its contributors may be
14// used to endorse or promote products derived from this software without
15// specific prior written permission.
16//
17// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27// POSSIBILITY OF SUCH DAMAGE.
28//
29// Author: sameeragarwal@google.com (Sameer Agarwal)
30
31#include "ceres/loss_function.h"
32
33#include <cstddef>
34
35#include "glog/logging.h"
36#include "gtest/gtest.h"
37
38namespace ceres {
39namespace internal {
40namespace {
41
42// Helper function for testing a LossFunction callback.
43//
44// Compares the values of rho'(s) and rho''(s) computed by the
45// callback with estimates obtained by symmetric finite differencing
46// of rho(s).
47void AssertLossFunctionIsValid(const LossFunction& loss, double s) {
48 CHECK_GT(s, 0);
49
50 // Evaluate rho(s), rho'(s) and rho''(s).
51 double rho[3];
52 loss.Evaluate(s, rho);
53
54 // Use symmetric finite differencing to estimate rho'(s) and
55 // rho''(s).
56 const double kH = 1e-4;
57 // Values at s + kH.
58 double fwd[3];
59 // Values at s - kH.
60 double bwd[3];
61 loss.Evaluate(s + kH, fwd);
62 loss.Evaluate(s - kH, bwd);
63
64 // First derivative.
65 const double fd_1 = (fwd[0] - bwd[0]) / (2 * kH);
66 ASSERT_NEAR(fd_1, rho[1], 1e-6);
67
68 // Second derivative.
Austin Schuh1d1e6ea2020-12-23 21:56:30 -080069 const double fd_2 = (fwd[0] - 2 * rho[0] + bwd[0]) / (kH * kH);
Austin Schuh70cc9552019-01-21 19:46:48 -080070 ASSERT_NEAR(fd_2, rho[2], 1e-6);
71}
72} // namespace
73
74// Try two values of the scaling a = 0.7 and 1.3
75// (where scaling makes sense) and of the squared norm
76// s = 0.357 and 1.792
77//
78// Note that for the Huber loss the test exercises both code paths
79// (i.e. both small and large values of s).
80
81TEST(LossFunction, TrivialLoss) {
82 AssertLossFunctionIsValid(TrivialLoss(), 0.357);
83 AssertLossFunctionIsValid(TrivialLoss(), 1.792);
Austin Schuh1d1e6ea2020-12-23 21:56:30 -080084 // Check that at s = 0: rho = [0, 1, 0].
85 double rho[3];
86 TrivialLoss().Evaluate(0.0, rho);
87 ASSERT_NEAR(rho[0], 0.0, 1e-6);
88 ASSERT_NEAR(rho[1], 1.0, 1e-6);
89 ASSERT_NEAR(rho[2], 0.0, 1e-6);
Austin Schuh70cc9552019-01-21 19:46:48 -080090}
91
92TEST(LossFunction, HuberLoss) {
93 AssertLossFunctionIsValid(HuberLoss(0.7), 0.357);
94 AssertLossFunctionIsValid(HuberLoss(0.7), 1.792);
95 AssertLossFunctionIsValid(HuberLoss(1.3), 0.357);
96 AssertLossFunctionIsValid(HuberLoss(1.3), 1.792);
Austin Schuh1d1e6ea2020-12-23 21:56:30 -080097 // Check that at s = 0: rho = [0, 1, 0].
98 double rho[3];
99 HuberLoss(0.7).Evaluate(0.0, rho);
100 ASSERT_NEAR(rho[0], 0.0, 1e-6);
101 ASSERT_NEAR(rho[1], 1.0, 1e-6);
102 ASSERT_NEAR(rho[2], 0.0, 1e-6);
Austin Schuh70cc9552019-01-21 19:46:48 -0800103}
104
105TEST(LossFunction, SoftLOneLoss) {
106 AssertLossFunctionIsValid(SoftLOneLoss(0.7), 0.357);
107 AssertLossFunctionIsValid(SoftLOneLoss(0.7), 1.792);
108 AssertLossFunctionIsValid(SoftLOneLoss(1.3), 0.357);
109 AssertLossFunctionIsValid(SoftLOneLoss(1.3), 1.792);
Austin Schuh1d1e6ea2020-12-23 21:56:30 -0800110 // Check that at s = 0: rho = [0, 1, -1 / (2 * a^2)].
111 double rho[3];
112 SoftLOneLoss(0.7).Evaluate(0.0, rho);
113 ASSERT_NEAR(rho[0], 0.0, 1e-6);
114 ASSERT_NEAR(rho[1], 1.0, 1e-6);
115 ASSERT_NEAR(rho[2], -0.5 / (0.7 * 0.7), 1e-6);
Austin Schuh70cc9552019-01-21 19:46:48 -0800116}
117
118TEST(LossFunction, CauchyLoss) {
119 AssertLossFunctionIsValid(CauchyLoss(0.7), 0.357);
120 AssertLossFunctionIsValid(CauchyLoss(0.7), 1.792);
121 AssertLossFunctionIsValid(CauchyLoss(1.3), 0.357);
122 AssertLossFunctionIsValid(CauchyLoss(1.3), 1.792);
Austin Schuh1d1e6ea2020-12-23 21:56:30 -0800123 // Check that at s = 0: rho = [0, 1, -1 / a^2].
124 double rho[3];
125 CauchyLoss(0.7).Evaluate(0.0, rho);
126 ASSERT_NEAR(rho[0], 0.0, 1e-6);
127 ASSERT_NEAR(rho[1], 1.0, 1e-6);
128 ASSERT_NEAR(rho[2], -1.0 / (0.7 * 0.7), 1e-6);
Austin Schuh70cc9552019-01-21 19:46:48 -0800129}
130
131TEST(LossFunction, ArctanLoss) {
132 AssertLossFunctionIsValid(ArctanLoss(0.7), 0.357);
133 AssertLossFunctionIsValid(ArctanLoss(0.7), 1.792);
134 AssertLossFunctionIsValid(ArctanLoss(1.3), 0.357);
135 AssertLossFunctionIsValid(ArctanLoss(1.3), 1.792);
Austin Schuh1d1e6ea2020-12-23 21:56:30 -0800136 // Check that at s = 0: rho = [0, 1, 0].
137 double rho[3];
138 ArctanLoss(0.7).Evaluate(0.0, rho);
139 ASSERT_NEAR(rho[0], 0.0, 1e-6);
140 ASSERT_NEAR(rho[1], 1.0, 1e-6);
141 ASSERT_NEAR(rho[2], 0.0, 1e-6);
Austin Schuh70cc9552019-01-21 19:46:48 -0800142}
143
144TEST(LossFunction, TolerantLoss) {
145 AssertLossFunctionIsValid(TolerantLoss(0.7, 0.4), 0.357);
146 AssertLossFunctionIsValid(TolerantLoss(0.7, 0.4), 1.792);
147 AssertLossFunctionIsValid(TolerantLoss(0.7, 0.4), 55.5);
148 AssertLossFunctionIsValid(TolerantLoss(1.3, 0.1), 0.357);
149 AssertLossFunctionIsValid(TolerantLoss(1.3, 0.1), 1.792);
150 AssertLossFunctionIsValid(TolerantLoss(1.3, 0.1), 55.5);
151 // Check the value at zero is actually zero.
152 double rho[3];
153 TolerantLoss(0.7, 0.4).Evaluate(0.0, rho);
154 ASSERT_NEAR(rho[0], 0.0, 1e-6);
155 // Check that loss before and after the approximation threshold are good.
156 // A threshold of 36.7 is used by the implementation.
157 AssertLossFunctionIsValid(TolerantLoss(20.0, 1.0), 20.0 + 36.6);
158 AssertLossFunctionIsValid(TolerantLoss(20.0, 1.0), 20.0 + 36.7);
159 AssertLossFunctionIsValid(TolerantLoss(20.0, 1.0), 20.0 + 36.8);
160 AssertLossFunctionIsValid(TolerantLoss(20.0, 1.0), 20.0 + 1000.0);
161}
162
163TEST(LossFunction, TukeyLoss) {
164 AssertLossFunctionIsValid(TukeyLoss(0.7), 0.357);
165 AssertLossFunctionIsValid(TukeyLoss(0.7), 1.792);
166 AssertLossFunctionIsValid(TukeyLoss(1.3), 0.357);
167 AssertLossFunctionIsValid(TukeyLoss(1.3), 1.792);
Austin Schuh1d1e6ea2020-12-23 21:56:30 -0800168 // Check that at s = 0: rho = [0, 1, -2 / a^2].
169 double rho[3];
170 TukeyLoss(0.7).Evaluate(0.0, rho);
171 ASSERT_NEAR(rho[0], 0.0, 1e-6);
172 ASSERT_NEAR(rho[1], 1.0, 1e-6);
173 ASSERT_NEAR(rho[2], -2.0 / (0.7 * 0.7), 1e-6);
Austin Schuh70cc9552019-01-21 19:46:48 -0800174}
175
176TEST(LossFunction, ComposedLoss) {
177 {
178 HuberLoss f(0.7);
179 CauchyLoss g(1.3);
180 ComposedLoss c(&f, DO_NOT_TAKE_OWNERSHIP, &g, DO_NOT_TAKE_OWNERSHIP);
181 AssertLossFunctionIsValid(c, 0.357);
182 AssertLossFunctionIsValid(c, 1.792);
183 }
184 {
185 CauchyLoss f(0.7);
186 HuberLoss g(1.3);
187 ComposedLoss c(&f, DO_NOT_TAKE_OWNERSHIP, &g, DO_NOT_TAKE_OWNERSHIP);
188 AssertLossFunctionIsValid(c, 0.357);
189 AssertLossFunctionIsValid(c, 1.792);
190 }
191}
192
193TEST(LossFunction, ScaledLoss) {
194 // Wrap a few loss functions, and a few scale factors. This can't combine
195 // construction with the call to AssertLossFunctionIsValid() because Apple's
196 // GCC is unable to eliminate the copy of ScaledLoss, which is not copyable.
197 {
198 ScaledLoss scaled_loss(NULL, 6, TAKE_OWNERSHIP);
199 AssertLossFunctionIsValid(scaled_loss, 0.323);
200 }
201 {
202 ScaledLoss scaled_loss(new TrivialLoss(), 10, TAKE_OWNERSHIP);
203 AssertLossFunctionIsValid(scaled_loss, 0.357);
204 }
205 {
206 ScaledLoss scaled_loss(new HuberLoss(0.7), 0.1, TAKE_OWNERSHIP);
207 AssertLossFunctionIsValid(scaled_loss, 1.792);
208 }
209 {
210 ScaledLoss scaled_loss(new SoftLOneLoss(1.3), 0.1, TAKE_OWNERSHIP);
211 AssertLossFunctionIsValid(scaled_loss, 1.792);
212 }
213 {
214 ScaledLoss scaled_loss(new CauchyLoss(1.3), 10, TAKE_OWNERSHIP);
215 AssertLossFunctionIsValid(scaled_loss, 1.792);
216 }
217 {
218 ScaledLoss scaled_loss(new ArctanLoss(1.3), 10, TAKE_OWNERSHIP);
219 AssertLossFunctionIsValid(scaled_loss, 1.792);
220 }
221 {
Austin Schuh1d1e6ea2020-12-23 21:56:30 -0800222 ScaledLoss scaled_loss(new TolerantLoss(1.3, 0.1), 10, TAKE_OWNERSHIP);
Austin Schuh70cc9552019-01-21 19:46:48 -0800223 AssertLossFunctionIsValid(scaled_loss, 1.792);
224 }
225 {
Austin Schuh1d1e6ea2020-12-23 21:56:30 -0800226 ScaledLoss scaled_loss(new ComposedLoss(new HuberLoss(0.8),
227 TAKE_OWNERSHIP,
228 new TolerantLoss(1.3, 0.5),
229 TAKE_OWNERSHIP),
230 10,
231 TAKE_OWNERSHIP);
Austin Schuh70cc9552019-01-21 19:46:48 -0800232 AssertLossFunctionIsValid(scaled_loss, 1.792);
233 }
234}
235
236TEST(LossFunction, LossFunctionWrapper) {
237 // Initialization
238 HuberLoss loss_function1(1.0);
Austin Schuh1d1e6ea2020-12-23 21:56:30 -0800239 LossFunctionWrapper loss_function_wrapper(new HuberLoss(1.0), TAKE_OWNERSHIP);
Austin Schuh70cc9552019-01-21 19:46:48 -0800240
241 double s = 0.862;
242 double rho_gold[3];
243 double rho[3];
244 loss_function1.Evaluate(s, rho_gold);
245 loss_function_wrapper.Evaluate(s, rho);
246 for (int i = 0; i < 3; ++i) {
247 EXPECT_NEAR(rho[i], rho_gold[i], 1e-12);
248 }
249
250 // Resetting
251 HuberLoss loss_function2(0.5);
252 loss_function_wrapper.Reset(new HuberLoss(0.5), TAKE_OWNERSHIP);
253 loss_function_wrapper.Evaluate(s, rho);
254 loss_function2.Evaluate(s, rho_gold);
255 for (int i = 0; i < 3; ++i) {
256 EXPECT_NEAR(rho[i], rho_gold[i], 1e-12);
257 }
258
259 // Not taking ownership.
260 HuberLoss loss_function3(0.3);
261 loss_function_wrapper.Reset(&loss_function3, DO_NOT_TAKE_OWNERSHIP);
262 loss_function_wrapper.Evaluate(s, rho);
263 loss_function3.Evaluate(s, rho_gold);
264 for (int i = 0; i < 3; ++i) {
265 EXPECT_NEAR(rho[i], rho_gold[i], 1e-12);
266 }
267
268 // Set to NULL
269 TrivialLoss loss_function4;
270 loss_function_wrapper.Reset(NULL, TAKE_OWNERSHIP);
271 loss_function_wrapper.Evaluate(s, rho);
272 loss_function4.Evaluate(s, rho_gold);
273 for (int i = 0; i < 3; ++i) {
274 EXPECT_NEAR(rho[i], rho_gold[i], 1e-12);
275 }
276
277 // Set to NULL, not taking ownership
278 loss_function_wrapper.Reset(NULL, DO_NOT_TAKE_OWNERSHIP);
279 loss_function_wrapper.Evaluate(s, rho);
280 loss_function4.Evaluate(s, rho_gold);
281 for (int i = 0; i < 3; ++i) {
282 EXPECT_NEAR(rho[i], rho_gold[i], 1e-12);
283 }
Austin Schuh70cc9552019-01-21 19:46:48 -0800284}
285
286} // namespace internal
287} // namespace ceres