blob: 3b7bfd2981677b935127d10a243f8001cd6aedcc [file] [log] [blame]
Austin Schuh70cc9552019-01-21 19:46:48 -08001// Ceres Solver - A fast non-linear least squares minimizer
Austin Schuh3de38b02024-06-25 18:25:10 -07002// Copyright 2023 Google Inc. All rights reserved.
Austin Schuh70cc9552019-01-21 19:46:48 -08003// http://ceres-solver.org/
4//
5// Redistribution and use in source and binary forms, with or without
6// modification, are permitted provided that the following conditions are met:
7//
8// * Redistributions of source code must retain the above copyright notice,
9// this list of conditions and the following disclaimer.
10// * Redistributions in binary form must reproduce the above copyright notice,
11// this list of conditions and the following disclaimer in the documentation
12// and/or other materials provided with the distribution.
13// * Neither the name of Google Inc. nor the names of its contributors may be
14// used to endorse or promote products derived from this software without
15// specific prior written permission.
16//
17// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27// POSSIBILITY OF SUCH DAMAGE.
28//
29// Author: sameeragarwal@google.com (Sameer Agarwal)
30
31#include "ceres/iterative_refiner.h"
32
Austin Schuh3de38b02024-06-25 18:25:10 -070033#include <utility>
34
Austin Schuh70cc9552019-01-21 19:46:48 -080035#include "Eigen/Dense"
Austin Schuh3de38b02024-06-25 18:25:10 -070036#include "ceres/dense_cholesky.h"
Austin Schuh70cc9552019-01-21 19:46:48 -080037#include "ceres/internal/eigen.h"
38#include "ceres/sparse_cholesky.h"
39#include "ceres/sparse_matrix.h"
40#include "glog/logging.h"
41#include "gtest/gtest.h"
42
Austin Schuh3de38b02024-06-25 18:25:10 -070043namespace ceres::internal {
Austin Schuh70cc9552019-01-21 19:46:48 -080044
45// Macros to help us define virtual methods which we do not expect to
46// use/call in this test.
47#define DO_NOT_CALL \
48 { LOG(FATAL) << "DO NOT CALL"; }
49#define DO_NOT_CALL_WITH_RETURN(x) \
50 { \
51 LOG(FATAL) << "DO NOT CALL"; \
52 return x; \
53 }
54
55// A fake SparseMatrix, which uses an Eigen matrix to do the real work.
56class FakeSparseMatrix : public SparseMatrix {
57 public:
Austin Schuh3de38b02024-06-25 18:25:10 -070058 explicit FakeSparseMatrix(Matrix m) : m_(std::move(m)) {}
Austin Schuh70cc9552019-01-21 19:46:48 -080059
60 // y += Ax
Austin Schuh3de38b02024-06-25 18:25:10 -070061 void RightMultiplyAndAccumulate(const double* x, double* y) const final {
Austin Schuh70cc9552019-01-21 19:46:48 -080062 VectorRef(y, m_.cols()) += m_ * ConstVectorRef(x, m_.cols());
63 }
64 // y += A'x
Austin Schuh3de38b02024-06-25 18:25:10 -070065 void LeftMultiplyAndAccumulate(const double* x, double* y) const final {
Austin Schuh70cc9552019-01-21 19:46:48 -080066 // We will assume that this is a symmetric matrix.
Austin Schuh3de38b02024-06-25 18:25:10 -070067 RightMultiplyAndAccumulate(x, y);
Austin Schuh70cc9552019-01-21 19:46:48 -080068 }
69
Austin Schuh1d1e6ea2020-12-23 21:56:30 -080070 double* mutable_values() final { return m_.data(); }
71 const double* values() const final { return m_.data(); }
72 int num_rows() const final { return m_.cols(); }
73 int num_cols() const final { return m_.cols(); }
74 int num_nonzeros() const final { return m_.cols() * m_.cols(); }
Austin Schuh70cc9552019-01-21 19:46:48 -080075
76 // The following methods are not needed for tests in this file.
Austin Schuh1d1e6ea2020-12-23 21:56:30 -080077 void SquaredColumnNorm(double* x) const final DO_NOT_CALL;
78 void ScaleColumns(const double* scale) final DO_NOT_CALL;
79 void SetZero() final DO_NOT_CALL;
80 void ToDenseMatrix(Matrix* dense_matrix) const final DO_NOT_CALL;
81 void ToTextFile(FILE* file) const final DO_NOT_CALL;
Austin Schuh70cc9552019-01-21 19:46:48 -080082
83 private:
84 Matrix m_;
85};
86
87// A fake SparseCholesky which uses Eigen's Cholesky factorization to
88// do the real work. The template parameter allows us to work in
89// doubles or floats, even though the source matrix is double.
90template <typename Scalar>
91class FakeSparseCholesky : public SparseCholesky {
92 public:
Austin Schuh3de38b02024-06-25 18:25:10 -070093 explicit FakeSparseCholesky(const Matrix& lhs) { lhs_ = lhs.cast<Scalar>(); }
94
95 LinearSolverTerminationType Solve(const double* rhs_ptr,
96 double* solution_ptr,
97 std::string* message) final {
98 const int num_cols = lhs_.cols();
99 VectorRef solution(solution_ptr, num_cols);
100 ConstVectorRef rhs(rhs_ptr, num_cols);
101 auto llt = lhs_.llt();
102 CHECK_EQ(llt.info(), Eigen::Success);
103 solution = llt.solve(rhs.cast<Scalar>()).template cast<double>();
104 return LinearSolverTerminationType::SUCCESS;
105 }
106
107 // The following methods are not needed for tests in this file.
108 CompressedRowSparseMatrix::StorageType StorageType() const final
109 DO_NOT_CALL_WITH_RETURN(
110 CompressedRowSparseMatrix::StorageType::UPPER_TRIANGULAR);
111 LinearSolverTerminationType Factorize(CompressedRowSparseMatrix* lhs,
112 std::string* message) final
113 DO_NOT_CALL_WITH_RETURN(LinearSolverTerminationType::FAILURE);
114
115 private:
116 Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> lhs_;
117};
118
119// A fake DenseCholesky which uses Eigen's Cholesky factorization to
120// do the real work. The template parameter allows us to work in
121// doubles or floats, even though the source matrix is double.
122template <typename Scalar>
123class FakeDenseCholesky : public DenseCholesky {
124 public:
125 explicit FakeDenseCholesky(const Matrix& lhs) { lhs_ = lhs.cast<Scalar>(); }
Austin Schuh70cc9552019-01-21 19:46:48 -0800126
Austin Schuh1d1e6ea2020-12-23 21:56:30 -0800127 LinearSolverTerminationType Solve(const double* rhs_ptr,
128 double* solution_ptr,
129 std::string* message) final {
Austin Schuh70cc9552019-01-21 19:46:48 -0800130 const int num_cols = lhs_.cols();
131 VectorRef solution(solution_ptr, num_cols);
132 ConstVectorRef rhs(rhs_ptr, num_cols);
133 solution = lhs_.llt().solve(rhs.cast<Scalar>()).template cast<double>();
Austin Schuh3de38b02024-06-25 18:25:10 -0700134 return LinearSolverTerminationType::SUCCESS;
Austin Schuh70cc9552019-01-21 19:46:48 -0800135 }
136
Austin Schuh3de38b02024-06-25 18:25:10 -0700137 LinearSolverTerminationType Factorize(int num_cols,
138 double* lhs,
Austin Schuh1d1e6ea2020-12-23 21:56:30 -0800139 std::string* message) final
Austin Schuh3de38b02024-06-25 18:25:10 -0700140 DO_NOT_CALL_WITH_RETURN(LinearSolverTerminationType::FAILURE);
Austin Schuh70cc9552019-01-21 19:46:48 -0800141
142 private:
143 Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> lhs_;
144};
145
146#undef DO_NOT_CALL
147#undef DO_NOT_CALL_WITH_RETURN
148
Austin Schuh3de38b02024-06-25 18:25:10 -0700149class SparseIterativeRefinerTest : public ::testing::Test {
Austin Schuh70cc9552019-01-21 19:46:48 -0800150 public:
Austin Schuh3de38b02024-06-25 18:25:10 -0700151 void SetUp() override {
Austin Schuh70cc9552019-01-21 19:46:48 -0800152 num_cols_ = 5;
153 max_num_iterations_ = 30;
154 Matrix m(num_cols_, num_cols_);
155 m.setRandom();
156 lhs_ = m * m.transpose();
157 solution_.resize(num_cols_);
158 solution_.setRandom();
159 rhs_ = lhs_ * solution_;
160 };
161
162 protected:
163 int num_cols_;
164 int max_num_iterations_;
165 Matrix lhs_;
166 Vector rhs_, solution_;
167};
168
Austin Schuh3de38b02024-06-25 18:25:10 -0700169TEST_F(SparseIterativeRefinerTest,
170 RandomSolutionWithExactFactorizationConverges) {
Austin Schuh70cc9552019-01-21 19:46:48 -0800171 FakeSparseMatrix lhs(lhs_);
172 FakeSparseCholesky<double> sparse_cholesky(lhs_);
Austin Schuh3de38b02024-06-25 18:25:10 -0700173 SparseIterativeRefiner refiner(max_num_iterations_);
Austin Schuh70cc9552019-01-21 19:46:48 -0800174 Vector refined_solution(num_cols_);
175 refined_solution.setRandom();
176 refiner.Refine(lhs, rhs_.data(), &sparse_cholesky, refined_solution.data());
177 EXPECT_NEAR((lhs_ * refined_solution - rhs_).norm(),
178 0.0,
179 std::numeric_limits<double>::epsilon() * 10);
180}
181
Austin Schuh3de38b02024-06-25 18:25:10 -0700182TEST_F(SparseIterativeRefinerTest,
Austin Schuh70cc9552019-01-21 19:46:48 -0800183 RandomSolutionWithApproximationFactorizationConverges) {
184 FakeSparseMatrix lhs(lhs_);
185 // Use a single precision Cholesky factorization of the double
186 // precision matrix. This will give us an approximate factorization.
187 FakeSparseCholesky<float> sparse_cholesky(lhs_);
Austin Schuh3de38b02024-06-25 18:25:10 -0700188 SparseIterativeRefiner refiner(max_num_iterations_);
Austin Schuh70cc9552019-01-21 19:46:48 -0800189 Vector refined_solution(num_cols_);
190 refined_solution.setRandom();
191 refiner.Refine(lhs, rhs_.data(), &sparse_cholesky, refined_solution.data());
192 EXPECT_NEAR((lhs_ * refined_solution - rhs_).norm(),
193 0.0,
194 std::numeric_limits<double>::epsilon() * 10);
195}
196
Austin Schuh3de38b02024-06-25 18:25:10 -0700197class DenseIterativeRefinerTest : public ::testing::Test {
198 public:
199 void SetUp() override {
200 num_cols_ = 5;
201 max_num_iterations_ = 30;
202 Matrix m(num_cols_, num_cols_);
203 m.setRandom();
204 lhs_ = m * m.transpose();
205 solution_.resize(num_cols_);
206 solution_.setRandom();
207 rhs_ = lhs_ * solution_;
208 };
209
210 protected:
211 int num_cols_;
212 int max_num_iterations_;
213 Matrix lhs_;
214 Vector rhs_, solution_;
215};
216
217TEST_F(DenseIterativeRefinerTest,
218 RandomSolutionWithExactFactorizationConverges) {
219 Matrix lhs = lhs_;
220 FakeDenseCholesky<double> dense_cholesky(lhs);
221 DenseIterativeRefiner refiner(max_num_iterations_);
222 Vector refined_solution(num_cols_);
223 refined_solution.setRandom();
224 refiner.Refine(lhs.cols(),
225 lhs.data(),
226 rhs_.data(),
227 &dense_cholesky,
228 refined_solution.data());
229 EXPECT_NEAR((lhs_ * refined_solution - rhs_).norm(),
230 0.0,
231 std::numeric_limits<double>::epsilon() * 10);
232}
233
234TEST_F(DenseIterativeRefinerTest,
235 RandomSolutionWithApproximationFactorizationConverges) {
236 Matrix lhs = lhs_;
237 // Use a single precision Cholesky factorization of the double
238 // precision matrix. This will give us an approximate factorization.
239 FakeDenseCholesky<float> dense_cholesky(lhs_);
240 DenseIterativeRefiner refiner(max_num_iterations_);
241 Vector refined_solution(num_cols_);
242 refined_solution.setRandom();
243 refiner.Refine(lhs.cols(),
244 lhs.data(),
245 rhs_.data(),
246 &dense_cholesky,
247 refined_solution.data());
248 EXPECT_NEAR((lhs_ * refined_solution - rhs_).norm(),
249 0.0,
250 std::numeric_limits<double>::epsilon() * 10);
251}
252
253} // namespace ceres::internal