blob: a156b8954fe2c04102d4552cba99d2797ffe078c [file] [log] [blame]
Austin Schuh70cc9552019-01-21 19:46:48 -08001// Ceres Solver - A fast non-linear least squares minimizer
2// Copyright 2015 Google Inc. All rights reserved.
3// http://ceres-solver.org/
4//
5// Redistribution and use in source and binary forms, with or without
6// modification, are permitted provided that the following conditions are met:
7//
8// * Redistributions of source code must retain the above copyright notice,
9// this list of conditions and the following disclaimer.
10// * Redistributions in binary form must reproduce the above copyright notice,
11// this list of conditions and the following disclaimer in the documentation
12// and/or other materials provided with the distribution.
13// * Neither the name of Google Inc. nor the names of its contributors may be
14// used to endorse or promote products derived from this software without
15// specific prior written permission.
16//
17// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27// POSSIBILITY OF SUCH DAMAGE.
28//
29// Author: keir@google.com (Keir Mierle)
30//
31// Tests shared across evaluators. The tests try all combinations of linear
32// solver and num_eliminate_blocks (for schur-based solvers).
33
34#include "ceres/evaluator.h"
35
36#include <memory>
37#include "ceres/casts.h"
38#include "ceres/cost_function.h"
39#include "ceres/crs_matrix.h"
40#include "ceres/evaluator_test_utils.h"
41#include "ceres/internal/eigen.h"
42#include "ceres/local_parameterization.h"
43#include "ceres/problem_impl.h"
44#include "ceres/program.h"
45#include "ceres/sized_cost_function.h"
46#include "ceres/sparse_matrix.h"
47#include "ceres/stringprintf.h"
48#include "ceres/types.h"
49#include "gtest/gtest.h"
50
51namespace ceres {
52namespace internal {
53
54using std::string;
55using std::vector;
56
57// TODO(keir): Consider pushing this into a common test utils file.
58template <int kFactor, int kNumResiduals, int... Ns>
59class ParameterIgnoringCostFunction
60 : public SizedCostFunction<kNumResiduals, Ns...> {
61 typedef SizedCostFunction<kNumResiduals, Ns...> Base;
62
63 public:
64 explicit ParameterIgnoringCostFunction(bool succeeds = true)
65 : succeeds_(succeeds) {}
66
67 virtual bool Evaluate(double const* const* parameters,
68 double* residuals,
69 double** jacobians) const {
70 for (int i = 0; i < Base::num_residuals(); ++i) {
71 residuals[i] = i + 1;
72 }
73 if (jacobians) {
74 for (int k = 0; k < Base::parameter_block_sizes().size(); ++k) {
75 // The jacobians here are full sized, but they are transformed in the
76 // evaluator into the "local" jacobian. In the tests, the "subset
77 // constant" parameterization is used, which should pick out columns
78 // from these jacobians. Put values in the jacobian that make this
79 // obvious; in particular, make the jacobians like this:
80 //
81 // 1 2 3 4 ...
82 // 1 2 3 4 ... .* kFactor
83 // 1 2 3 4 ...
84 //
85 // where the multiplication by kFactor makes it easier to distinguish
86 // between Jacobians of different residuals for the same parameter.
87 if (jacobians[k] != nullptr) {
88 MatrixRef jacobian(jacobians[k],
89 Base::num_residuals(),
90 Base::parameter_block_sizes()[k]);
91 for (int j = 0; j < Base::parameter_block_sizes()[k]; ++j) {
92 jacobian.col(j).setConstant(kFactor * (j + 1));
93 }
94 }
95 }
96 }
97 return succeeds_;
98 }
99
100 private:
101 bool succeeds_;
102};
103
104struct EvaluatorTestOptions {
105 EvaluatorTestOptions(LinearSolverType linear_solver_type,
106 int num_eliminate_blocks,
107 bool dynamic_sparsity = false)
108 : linear_solver_type(linear_solver_type),
109 num_eliminate_blocks(num_eliminate_blocks),
110 dynamic_sparsity(dynamic_sparsity) {}
111
112 LinearSolverType linear_solver_type;
113 int num_eliminate_blocks;
114 bool dynamic_sparsity;
115};
116
117struct EvaluatorTest
118 : public ::testing::TestWithParam<EvaluatorTestOptions> {
119 Evaluator* CreateEvaluator(Program* program) {
120 // This program is straight from the ProblemImpl, and so has no index/offset
121 // yet; compute it here as required by the evaluator implementations.
122 program->SetParameterOffsetsAndIndex();
123
124 if (VLOG_IS_ON(1)) {
125 string report;
126 StringAppendF(&report, "Creating evaluator with type: %d",
127 GetParam().linear_solver_type);
128 if (GetParam().linear_solver_type == SPARSE_NORMAL_CHOLESKY) {
129 StringAppendF(&report, ", dynamic_sparsity: %d",
130 GetParam().dynamic_sparsity);
131 }
132 StringAppendF(&report, " and num_eliminate_blocks: %d",
133 GetParam().num_eliminate_blocks);
134 VLOG(1) << report;
135 }
136 Evaluator::Options options;
137 options.linear_solver_type = GetParam().linear_solver_type;
138 options.num_eliminate_blocks = GetParam().num_eliminate_blocks;
139 options.dynamic_sparsity = GetParam().dynamic_sparsity;
140 options.context = problem.context();
141 string error;
142 return Evaluator::Create(options, program, &error);
143 }
144
145 void EvaluateAndCompare(ProblemImpl *problem,
146 int expected_num_rows,
147 int expected_num_cols,
148 double expected_cost,
149 const double* expected_residuals,
150 const double* expected_gradient,
151 const double* expected_jacobian) {
152 std::unique_ptr<Evaluator> evaluator(
153 CreateEvaluator(problem->mutable_program()));
154 int num_residuals = expected_num_rows;
155 int num_parameters = expected_num_cols;
156
157 double cost = -1;
158
159 Vector residuals(num_residuals);
160 residuals.setConstant(-2000);
161
162 Vector gradient(num_parameters);
163 gradient.setConstant(-3000);
164
165 std::unique_ptr<SparseMatrix> jacobian(evaluator->CreateJacobian());
166
167 ASSERT_EQ(expected_num_rows, evaluator->NumResiduals());
168 ASSERT_EQ(expected_num_cols, evaluator->NumEffectiveParameters());
169 ASSERT_EQ(expected_num_rows, jacobian->num_rows());
170 ASSERT_EQ(expected_num_cols, jacobian->num_cols());
171
172 vector<double> state(evaluator->NumParameters());
173
174 ASSERT_TRUE(evaluator->Evaluate(
175 &state[0],
176 &cost,
177 expected_residuals != nullptr ? &residuals[0] : nullptr,
178 expected_gradient != nullptr ? &gradient[0] : nullptr,
179 expected_jacobian != nullptr ? jacobian.get() : nullptr));
180
181 Matrix actual_jacobian;
182 if (expected_jacobian != nullptr) {
183 jacobian->ToDenseMatrix(&actual_jacobian);
184 }
185
186 CompareEvaluations(expected_num_rows,
187 expected_num_cols,
188 expected_cost,
189 expected_residuals,
190 expected_gradient,
191 expected_jacobian,
192 cost,
193 &residuals[0],
194 &gradient[0],
195 actual_jacobian.data());
196 }
197
198 // Try all combinations of parameters for the evaluator.
199 void CheckAllEvaluationCombinations(const ExpectedEvaluation &expected) {
200 for (int i = 0; i < 8; ++i) {
201 EvaluateAndCompare(&problem,
202 expected.num_rows,
203 expected.num_cols,
204 expected.cost,
205 (i & 1) ? expected.residuals : nullptr,
206 (i & 2) ? expected.gradient : nullptr,
207 (i & 4) ? expected.jacobian : nullptr);
208 }
209 }
210
211 // The values are ignored completely by the cost function.
212 double x[2];
213 double y[3];
214 double z[4];
215
216 ProblemImpl problem;
217};
218
219void SetSparseMatrixConstant(SparseMatrix* sparse_matrix, double value) {
220 VectorRef(sparse_matrix->mutable_values(),
221 sparse_matrix->num_nonzeros()).setConstant(value);
222}
223
224TEST_P(EvaluatorTest, SingleResidualProblem) {
225 problem.AddResidualBlock(new ParameterIgnoringCostFunction<1, 3, 2, 3, 4>,
226 nullptr,
227 x, y, z);
228
229 ExpectedEvaluation expected = {
230 // Rows/columns
231 3, 9,
232 // Cost
233 7.0,
234 // Residuals
235 { 1.0, 2.0, 3.0 },
236 // Gradient
237 { 6.0, 12.0, // x
238 6.0, 12.0, 18.0, // y
239 6.0, 12.0, 18.0, 24.0, // z
240 },
241 // Jacobian
242 // x y z
243 { 1, 2, 1, 2, 3, 1, 2, 3, 4,
244 1, 2, 1, 2, 3, 1, 2, 3, 4,
245 1, 2, 1, 2, 3, 1, 2, 3, 4
246 }
247 };
248 CheckAllEvaluationCombinations(expected);
249}
250
251TEST_P(EvaluatorTest, SingleResidualProblemWithPermutedParameters) {
252 // Add the parameters in explicit order to force the ordering in the program.
253 problem.AddParameterBlock(x, 2);
254 problem.AddParameterBlock(y, 3);
255 problem.AddParameterBlock(z, 4);
256
257 // Then use a cost function which is similar to the others, but swap around
258 // the ordering of the parameters to the cost function. This shouldn't affect
259 // the jacobian evaluation, but requires explicit handling in the evaluators.
260 // At one point the compressed row evaluator had a bug that went undetected
261 // for a long time, since by chance most users added parameters to the problem
262 // in the same order that they occurred as parameters to a cost function.
263 problem.AddResidualBlock(new ParameterIgnoringCostFunction<1, 3, 4, 3, 2>,
264 nullptr,
265 z, y, x);
266
267 ExpectedEvaluation expected = {
268 // Rows/columns
269 3, 9,
270 // Cost
271 7.0,
272 // Residuals
273 { 1.0, 2.0, 3.0 },
274 // Gradient
275 { 6.0, 12.0, // x
276 6.0, 12.0, 18.0, // y
277 6.0, 12.0, 18.0, 24.0, // z
278 },
279 // Jacobian
280 // x y z
281 { 1, 2, 1, 2, 3, 1, 2, 3, 4,
282 1, 2, 1, 2, 3, 1, 2, 3, 4,
283 1, 2, 1, 2, 3, 1, 2, 3, 4
284 }
285 };
286 CheckAllEvaluationCombinations(expected);
287}
288
289TEST_P(EvaluatorTest, SingleResidualProblemWithNuisanceParameters) {
290 // These parameters are not used.
291 double a[2];
292 double b[1];
293 double c[1];
294 double d[3];
295
296 // Add the parameters in a mixed order so the Jacobian is "checkered" with the
297 // values from the other parameters.
298 problem.AddParameterBlock(a, 2);
299 problem.AddParameterBlock(x, 2);
300 problem.AddParameterBlock(b, 1);
301 problem.AddParameterBlock(y, 3);
302 problem.AddParameterBlock(c, 1);
303 problem.AddParameterBlock(z, 4);
304 problem.AddParameterBlock(d, 3);
305
306 problem.AddResidualBlock(new ParameterIgnoringCostFunction<1, 3, 2, 3, 4>,
307 nullptr,
308 x, y, z);
309
310 ExpectedEvaluation expected = {
311 // Rows/columns
312 3, 16,
313 // Cost
314 7.0,
315 // Residuals
316 { 1.0, 2.0, 3.0 },
317 // Gradient
318 { 0.0, 0.0, // a
319 6.0, 12.0, // x
320 0.0, // b
321 6.0, 12.0, 18.0, // y
322 0.0, // c
323 6.0, 12.0, 18.0, 24.0, // z
324 0.0, 0.0, 0.0, // d
325 },
326 // Jacobian
327 // a x b y c z d
328 { 0, 0, 1, 2, 0, 1, 2, 3, 0, 1, 2, 3, 4, 0, 0, 0,
329 0, 0, 1, 2, 0, 1, 2, 3, 0, 1, 2, 3, 4, 0, 0, 0,
330 0, 0, 1, 2, 0, 1, 2, 3, 0, 1, 2, 3, 4, 0, 0, 0
331 }
332 };
333 CheckAllEvaluationCombinations(expected);
334}
335
336TEST_P(EvaluatorTest, MultipleResidualProblem) {
337 // Add the parameters in explicit order to force the ordering in the program.
338 problem.AddParameterBlock(x, 2);
339 problem.AddParameterBlock(y, 3);
340 problem.AddParameterBlock(z, 4);
341
342 // f(x, y) in R^2
343 problem.AddResidualBlock(new ParameterIgnoringCostFunction<1, 2, 2, 3>,
344 nullptr,
345 x, y);
346
347 // g(x, z) in R^3
348 problem.AddResidualBlock(new ParameterIgnoringCostFunction<2, 3, 2, 4>,
349 nullptr,
350 x, z);
351
352 // h(y, z) in R^4
353 problem.AddResidualBlock(new ParameterIgnoringCostFunction<3, 4, 3, 4>,
354 nullptr,
355 y, z);
356
357 ExpectedEvaluation expected = {
358 // Rows/columns
359 9, 9,
360 // Cost
361 // f g h
362 ( 1 + 4 + 1 + 4 + 9 + 1 + 4 + 9 + 16) / 2.0,
363 // Residuals
364 { 1.0, 2.0, // f
365 1.0, 2.0, 3.0, // g
366 1.0, 2.0, 3.0, 4.0 // h
367 },
368 // Gradient
369 { 15.0, 30.0, // x
370 33.0, 66.0, 99.0, // y
371 42.0, 84.0, 126.0, 168.0 // z
372 },
373 // Jacobian
374 // x y z
375 { /* f(x, y) */ 1, 2, 1, 2, 3, 0, 0, 0, 0,
376 1, 2, 1, 2, 3, 0, 0, 0, 0,
377
378 /* g(x, z) */ 2, 4, 0, 0, 0, 2, 4, 6, 8,
379 2, 4, 0, 0, 0, 2, 4, 6, 8,
380 2, 4, 0, 0, 0, 2, 4, 6, 8,
381
382 /* h(y, z) */ 0, 0, 3, 6, 9, 3, 6, 9, 12,
383 0, 0, 3, 6, 9, 3, 6, 9, 12,
384 0, 0, 3, 6, 9, 3, 6, 9, 12,
385 0, 0, 3, 6, 9, 3, 6, 9, 12
386 }
387 };
388 CheckAllEvaluationCombinations(expected);
389}
390
391TEST_P(EvaluatorTest, MultipleResidualsWithLocalParameterizations) {
392 // Add the parameters in explicit order to force the ordering in the program.
393 problem.AddParameterBlock(x, 2);
394
395 // Fix y's first dimension.
396 vector<int> y_fixed;
397 y_fixed.push_back(0);
398 problem.AddParameterBlock(y, 3, new SubsetParameterization(3, y_fixed));
399
400 // Fix z's second dimension.
401 vector<int> z_fixed;
402 z_fixed.push_back(1);
403 problem.AddParameterBlock(z, 4, new SubsetParameterization(4, z_fixed));
404
405 // f(x, y) in R^2
406 problem.AddResidualBlock(new ParameterIgnoringCostFunction<1, 2, 2, 3>,
407 nullptr,
408 x, y);
409
410 // g(x, z) in R^3
411 problem.AddResidualBlock(new ParameterIgnoringCostFunction<2, 3, 2, 4>,
412 nullptr,
413 x, z);
414
415 // h(y, z) in R^4
416 problem.AddResidualBlock(new ParameterIgnoringCostFunction<3, 4, 3, 4>,
417 nullptr,
418 y, z);
419
420 ExpectedEvaluation expected = {
421 // Rows/columns
422 9, 7,
423 // Cost
424 // f g h
425 ( 1 + 4 + 1 + 4 + 9 + 1 + 4 + 9 + 16) / 2.0,
426 // Residuals
427 { 1.0, 2.0, // f
428 1.0, 2.0, 3.0, // g
429 1.0, 2.0, 3.0, 4.0 // h
430 },
431 // Gradient
432 { 15.0, 30.0, // x
433 66.0, 99.0, // y
434 42.0, 126.0, 168.0 // z
435 },
436 // Jacobian
437 // x y z
438 { /* f(x, y) */ 1, 2, 2, 3, 0, 0, 0,
439 1, 2, 2, 3, 0, 0, 0,
440
441 /* g(x, z) */ 2, 4, 0, 0, 2, 6, 8,
442 2, 4, 0, 0, 2, 6, 8,
443 2, 4, 0, 0, 2, 6, 8,
444
445 /* h(y, z) */ 0, 0, 6, 9, 3, 9, 12,
446 0, 0, 6, 9, 3, 9, 12,
447 0, 0, 6, 9, 3, 9, 12,
448 0, 0, 6, 9, 3, 9, 12
449 }
450 };
451 CheckAllEvaluationCombinations(expected);
452}
453
454TEST_P(EvaluatorTest, MultipleResidualProblemWithSomeConstantParameters) {
455 // The values are ignored completely by the cost function.
456 double x[2];
457 double y[3];
458 double z[4];
459
460 // Add the parameters in explicit order to force the ordering in the program.
461 problem.AddParameterBlock(x, 2);
462 problem.AddParameterBlock(y, 3);
463 problem.AddParameterBlock(z, 4);
464
465 // f(x, y) in R^2
466 problem.AddResidualBlock(new ParameterIgnoringCostFunction<1, 2, 2, 3>,
467 nullptr,
468 x, y);
469
470 // g(x, z) in R^3
471 problem.AddResidualBlock(new ParameterIgnoringCostFunction<2, 3, 2, 4>,
472 nullptr,
473 x, z);
474
475 // h(y, z) in R^4
476 problem.AddResidualBlock(new ParameterIgnoringCostFunction<3, 4, 3, 4>,
477 nullptr,
478 y, z);
479
480 // For this test, "z" is constant.
481 problem.SetParameterBlockConstant(z);
482
483 // Create the reduced program which is missing the fixed "z" variable.
484 // Normally, the preprocessing of the program that happens in solver_impl
485 // takes care of this, but we don't want to invoke the solver here.
486 Program reduced_program;
487 vector<ParameterBlock*>* parameter_blocks =
488 problem.mutable_program()->mutable_parameter_blocks();
489
490 // "z" is the last parameter; save it for later and pop it off temporarily.
491 // Note that "z" will still get read during evaluation, so it cannot be
492 // deleted at this point.
493 ParameterBlock* parameter_block_z = parameter_blocks->back();
494 parameter_blocks->pop_back();
495
496 ExpectedEvaluation expected = {
497 // Rows/columns
498 9, 5,
499 // Cost
500 // f g h
501 ( 1 + 4 + 1 + 4 + 9 + 1 + 4 + 9 + 16) / 2.0,
502 // Residuals
503 { 1.0, 2.0, // f
504 1.0, 2.0, 3.0, // g
505 1.0, 2.0, 3.0, 4.0 // h
506 },
507 // Gradient
508 { 15.0, 30.0, // x
509 33.0, 66.0, 99.0, // y
510 },
511 // Jacobian
512 // x y
513 { /* f(x, y) */ 1, 2, 1, 2, 3,
514 1, 2, 1, 2, 3,
515
516 /* g(x, z) */ 2, 4, 0, 0, 0,
517 2, 4, 0, 0, 0,
518 2, 4, 0, 0, 0,
519
520 /* h(y, z) */ 0, 0, 3, 6, 9,
521 0, 0, 3, 6, 9,
522 0, 0, 3, 6, 9,
523 0, 0, 3, 6, 9
524 }
525 };
526 CheckAllEvaluationCombinations(expected);
527
528 // Restore parameter block z, so it will get freed in a consistent way.
529 parameter_blocks->push_back(parameter_block_z);
530}
531
532TEST_P(EvaluatorTest, EvaluatorAbortsForResidualsThatFailToEvaluate) {
533 // Switch the return value to failure.
534 problem.AddResidualBlock(
535 new ParameterIgnoringCostFunction<20, 3, 2, 3, 4>(false),
536 nullptr,
537 x,
538 y,
539 z);
540
541 // The values are ignored.
542 double state[9];
543
544 std::unique_ptr<Evaluator> evaluator(
545 CreateEvaluator(problem.mutable_program()));
546 std::unique_ptr<SparseMatrix> jacobian(evaluator->CreateJacobian());
547 double cost;
548 EXPECT_FALSE(evaluator->Evaluate(state, &cost, nullptr, nullptr, nullptr));
549}
550
551// In the pairs, the first argument is the linear solver type, and the second
552// argument is num_eliminate_blocks. Changing the num_eliminate_blocks only
553// makes sense for the schur-based solvers.
554//
555// Try all values of num_eliminate_blocks that make sense given that in the
556// tests a maximum of 4 parameter blocks are present.
557INSTANTIATE_TEST_CASE_P(
558 LinearSolvers,
559 EvaluatorTest,
560 ::testing::Values(EvaluatorTestOptions(DENSE_QR, 0),
561 EvaluatorTestOptions(DENSE_SCHUR, 0),
562 EvaluatorTestOptions(DENSE_SCHUR, 1),
563 EvaluatorTestOptions(DENSE_SCHUR, 2),
564 EvaluatorTestOptions(DENSE_SCHUR, 3),
565 EvaluatorTestOptions(DENSE_SCHUR, 4),
566 EvaluatorTestOptions(SPARSE_SCHUR, 0),
567 EvaluatorTestOptions(SPARSE_SCHUR, 1),
568 EvaluatorTestOptions(SPARSE_SCHUR, 2),
569 EvaluatorTestOptions(SPARSE_SCHUR, 3),
570 EvaluatorTestOptions(SPARSE_SCHUR, 4),
571 EvaluatorTestOptions(ITERATIVE_SCHUR, 0),
572 EvaluatorTestOptions(ITERATIVE_SCHUR, 1),
573 EvaluatorTestOptions(ITERATIVE_SCHUR, 2),
574 EvaluatorTestOptions(ITERATIVE_SCHUR, 3),
575 EvaluatorTestOptions(ITERATIVE_SCHUR, 4),
576 EvaluatorTestOptions(SPARSE_NORMAL_CHOLESKY, 0, false),
577 EvaluatorTestOptions(SPARSE_NORMAL_CHOLESKY, 0, true)));
578
579// Simple cost function used to check if the evaluator is sensitive to
580// state changes.
581class ParameterSensitiveCostFunction : public SizedCostFunction<2, 2> {
582 public:
583 virtual bool Evaluate(double const* const* parameters,
584 double* residuals,
585 double** jacobians) const {
586 double x1 = parameters[0][0];
587 double x2 = parameters[0][1];
588 residuals[0] = x1 * x1;
589 residuals[1] = x2 * x2;
590
591 if (jacobians != nullptr) {
592 double* jacobian = jacobians[0];
593 if (jacobian != nullptr) {
594 jacobian[0] = 2.0 * x1;
595 jacobian[1] = 0.0;
596 jacobian[2] = 0.0;
597 jacobian[3] = 2.0 * x2;
598 }
599 }
600 return true;
601 }
602};
603
604TEST(Evaluator, EvaluatorRespectsParameterChanges) {
605 ProblemImpl problem;
606
607 double x[2];
608 x[0] = 1.0;
609 x[1] = 1.0;
610
611 problem.AddResidualBlock(new ParameterSensitiveCostFunction(), nullptr, x);
612 Program* program = problem.mutable_program();
613 program->SetParameterOffsetsAndIndex();
614
615 Evaluator::Options options;
616 options.linear_solver_type = DENSE_QR;
617 options.num_eliminate_blocks = 0;
618 options.context = problem.context();
619 string error;
620 std::unique_ptr<Evaluator> evaluator(
621 Evaluator::Create(options, program, &error));
622 std::unique_ptr<SparseMatrix> jacobian(evaluator->CreateJacobian());
623
624 ASSERT_EQ(2, jacobian->num_rows());
625 ASSERT_EQ(2, jacobian->num_cols());
626
627 double state[2];
628 state[0] = 2.0;
629 state[1] = 3.0;
630
631 // The original state of a residual block comes from the user's
632 // state. So the original state is 1.0, 1.0, and the only way we get
633 // the 2.0, 3.0 results in the following tests is if it respects the
634 // values in the state vector.
635
636 // Cost only; no residuals and no jacobian.
637 {
638 double cost = -1;
639 ASSERT_TRUE(evaluator->Evaluate(state, &cost, nullptr, nullptr, nullptr));
640 EXPECT_EQ(48.5, cost);
641 }
642
643 // Cost and residuals, no jacobian.
644 {
645 double cost = -1;
646 double residuals[2] = {-2, -2};
647 ASSERT_TRUE(evaluator->Evaluate(state, &cost, residuals, nullptr, nullptr));
648 EXPECT_EQ(48.5, cost);
649 EXPECT_EQ(4, residuals[0]);
650 EXPECT_EQ(9, residuals[1]);
651 }
652
653 // Cost, residuals, and jacobian.
654 {
655 double cost = -1;
656 double residuals[2] = {-2, -2};
657 SetSparseMatrixConstant(jacobian.get(), -1);
658 ASSERT_TRUE(
659 evaluator->Evaluate(state, &cost, residuals, nullptr, jacobian.get()));
660 EXPECT_EQ(48.5, cost);
661 EXPECT_EQ(4, residuals[0]);
662 EXPECT_EQ(9, residuals[1]);
663 Matrix actual_jacobian;
664 jacobian->ToDenseMatrix(&actual_jacobian);
665
666 Matrix expected_jacobian(2, 2);
667 expected_jacobian << 2 * state[0], 0, 0, 2 * state[1];
668
669 EXPECT_TRUE((actual_jacobian.array() == expected_jacobian.array()).all())
670 << "Actual:\n"
671 << actual_jacobian << "\nExpected:\n"
672 << expected_jacobian;
673 }
674}
675
676} // namespace internal
677} // namespace ceres