Austin Schuh | 70cc955 | 2019-01-21 19:46:48 -0800 | [diff] [blame] | 1 | // Ceres Solver - A fast non-linear least squares minimizer |
Austin Schuh | 3de38b0 | 2024-06-25 18:25:10 -0700 | [diff] [blame^] | 2 | // Copyright 2023 Google Inc. All rights reserved. |
Austin Schuh | 70cc955 | 2019-01-21 19:46:48 -0800 | [diff] [blame] | 3 | // http://ceres-solver.org/ |
| 4 | // |
| 5 | // Redistribution and use in source and binary forms, with or without |
| 6 | // modification, are permitted provided that the following conditions are met: |
| 7 | // |
| 8 | // * Redistributions of source code must retain the above copyright notice, |
| 9 | // this list of conditions and the following disclaimer. |
| 10 | // * Redistributions in binary form must reproduce the above copyright notice, |
| 11 | // this list of conditions and the following disclaimer in the documentation |
| 12 | // and/or other materials provided with the distribution. |
| 13 | // * Neither the name of Google Inc. nor the names of its contributors may be |
| 14 | // used to endorse or promote products derived from this software without |
| 15 | // specific prior written permission. |
| 16 | // |
| 17 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
| 18 | // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| 19 | // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| 20 | // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE |
| 21 | // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
| 22 | // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
| 23 | // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
| 24 | // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
| 25 | // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
| 26 | // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
| 27 | // POSSIBILITY OF SUCH DAMAGE. |
| 28 | // |
| 29 | // Author: sameeragarwal@google.com (Sameer Agarwal) |
| 30 | |
| 31 | #include "ceres/iterative_refiner.h" |
| 32 | |
Austin Schuh | 3de38b0 | 2024-06-25 18:25:10 -0700 | [diff] [blame^] | 33 | #include <utility> |
| 34 | |
Austin Schuh | 70cc955 | 2019-01-21 19:46:48 -0800 | [diff] [blame] | 35 | #include "Eigen/Dense" |
Austin Schuh | 3de38b0 | 2024-06-25 18:25:10 -0700 | [diff] [blame^] | 36 | #include "ceres/dense_cholesky.h" |
Austin Schuh | 70cc955 | 2019-01-21 19:46:48 -0800 | [diff] [blame] | 37 | #include "ceres/internal/eigen.h" |
| 38 | #include "ceres/sparse_cholesky.h" |
| 39 | #include "ceres/sparse_matrix.h" |
| 40 | #include "glog/logging.h" |
| 41 | #include "gtest/gtest.h" |
| 42 | |
Austin Schuh | 3de38b0 | 2024-06-25 18:25:10 -0700 | [diff] [blame^] | 43 | namespace ceres::internal { |
Austin Schuh | 70cc955 | 2019-01-21 19:46:48 -0800 | [diff] [blame] | 44 | |
| 45 | // Macros to help us define virtual methods which we do not expect to |
| 46 | // use/call in this test. |
| 47 | #define DO_NOT_CALL \ |
| 48 | { LOG(FATAL) << "DO NOT CALL"; } |
| 49 | #define DO_NOT_CALL_WITH_RETURN(x) \ |
| 50 | { \ |
| 51 | LOG(FATAL) << "DO NOT CALL"; \ |
| 52 | return x; \ |
| 53 | } |
| 54 | |
| 55 | // A fake SparseMatrix, which uses an Eigen matrix to do the real work. |
| 56 | class FakeSparseMatrix : public SparseMatrix { |
| 57 | public: |
Austin Schuh | 3de38b0 | 2024-06-25 18:25:10 -0700 | [diff] [blame^] | 58 | explicit FakeSparseMatrix(Matrix m) : m_(std::move(m)) {} |
Austin Schuh | 70cc955 | 2019-01-21 19:46:48 -0800 | [diff] [blame] | 59 | |
| 60 | // y += Ax |
Austin Schuh | 3de38b0 | 2024-06-25 18:25:10 -0700 | [diff] [blame^] | 61 | void RightMultiplyAndAccumulate(const double* x, double* y) const final { |
Austin Schuh | 70cc955 | 2019-01-21 19:46:48 -0800 | [diff] [blame] | 62 | VectorRef(y, m_.cols()) += m_ * ConstVectorRef(x, m_.cols()); |
| 63 | } |
| 64 | // y += A'x |
Austin Schuh | 3de38b0 | 2024-06-25 18:25:10 -0700 | [diff] [blame^] | 65 | void LeftMultiplyAndAccumulate(const double* x, double* y) const final { |
Austin Schuh | 70cc955 | 2019-01-21 19:46:48 -0800 | [diff] [blame] | 66 | // We will assume that this is a symmetric matrix. |
Austin Schuh | 3de38b0 | 2024-06-25 18:25:10 -0700 | [diff] [blame^] | 67 | RightMultiplyAndAccumulate(x, y); |
Austin Schuh | 70cc955 | 2019-01-21 19:46:48 -0800 | [diff] [blame] | 68 | } |
| 69 | |
Austin Schuh | 1d1e6ea | 2020-12-23 21:56:30 -0800 | [diff] [blame] | 70 | double* mutable_values() final { return m_.data(); } |
| 71 | const double* values() const final { return m_.data(); } |
| 72 | int num_rows() const final { return m_.cols(); } |
| 73 | int num_cols() const final { return m_.cols(); } |
| 74 | int num_nonzeros() const final { return m_.cols() * m_.cols(); } |
Austin Schuh | 70cc955 | 2019-01-21 19:46:48 -0800 | [diff] [blame] | 75 | |
| 76 | // The following methods are not needed for tests in this file. |
Austin Schuh | 1d1e6ea | 2020-12-23 21:56:30 -0800 | [diff] [blame] | 77 | void SquaredColumnNorm(double* x) const final DO_NOT_CALL; |
| 78 | void ScaleColumns(const double* scale) final DO_NOT_CALL; |
| 79 | void SetZero() final DO_NOT_CALL; |
| 80 | void ToDenseMatrix(Matrix* dense_matrix) const final DO_NOT_CALL; |
| 81 | void ToTextFile(FILE* file) const final DO_NOT_CALL; |
Austin Schuh | 70cc955 | 2019-01-21 19:46:48 -0800 | [diff] [blame] | 82 | |
| 83 | private: |
| 84 | Matrix m_; |
| 85 | }; |
| 86 | |
| 87 | // A fake SparseCholesky which uses Eigen's Cholesky factorization to |
| 88 | // do the real work. The template parameter allows us to work in |
| 89 | // doubles or floats, even though the source matrix is double. |
| 90 | template <typename Scalar> |
| 91 | class FakeSparseCholesky : public SparseCholesky { |
| 92 | public: |
Austin Schuh | 3de38b0 | 2024-06-25 18:25:10 -0700 | [diff] [blame^] | 93 | explicit FakeSparseCholesky(const Matrix& lhs) { lhs_ = lhs.cast<Scalar>(); } |
| 94 | |
| 95 | LinearSolverTerminationType Solve(const double* rhs_ptr, |
| 96 | double* solution_ptr, |
| 97 | std::string* message) final { |
| 98 | const int num_cols = lhs_.cols(); |
| 99 | VectorRef solution(solution_ptr, num_cols); |
| 100 | ConstVectorRef rhs(rhs_ptr, num_cols); |
| 101 | auto llt = lhs_.llt(); |
| 102 | CHECK_EQ(llt.info(), Eigen::Success); |
| 103 | solution = llt.solve(rhs.cast<Scalar>()).template cast<double>(); |
| 104 | return LinearSolverTerminationType::SUCCESS; |
| 105 | } |
| 106 | |
| 107 | // The following methods are not needed for tests in this file. |
| 108 | CompressedRowSparseMatrix::StorageType StorageType() const final |
| 109 | DO_NOT_CALL_WITH_RETURN( |
| 110 | CompressedRowSparseMatrix::StorageType::UPPER_TRIANGULAR); |
| 111 | LinearSolverTerminationType Factorize(CompressedRowSparseMatrix* lhs, |
| 112 | std::string* message) final |
| 113 | DO_NOT_CALL_WITH_RETURN(LinearSolverTerminationType::FAILURE); |
| 114 | |
| 115 | private: |
| 116 | Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> lhs_; |
| 117 | }; |
| 118 | |
| 119 | // A fake DenseCholesky which uses Eigen's Cholesky factorization to |
| 120 | // do the real work. The template parameter allows us to work in |
| 121 | // doubles or floats, even though the source matrix is double. |
| 122 | template <typename Scalar> |
| 123 | class FakeDenseCholesky : public DenseCholesky { |
| 124 | public: |
| 125 | explicit FakeDenseCholesky(const Matrix& lhs) { lhs_ = lhs.cast<Scalar>(); } |
Austin Schuh | 70cc955 | 2019-01-21 19:46:48 -0800 | [diff] [blame] | 126 | |
Austin Schuh | 1d1e6ea | 2020-12-23 21:56:30 -0800 | [diff] [blame] | 127 | LinearSolverTerminationType Solve(const double* rhs_ptr, |
| 128 | double* solution_ptr, |
| 129 | std::string* message) final { |
Austin Schuh | 70cc955 | 2019-01-21 19:46:48 -0800 | [diff] [blame] | 130 | const int num_cols = lhs_.cols(); |
| 131 | VectorRef solution(solution_ptr, num_cols); |
| 132 | ConstVectorRef rhs(rhs_ptr, num_cols); |
| 133 | solution = lhs_.llt().solve(rhs.cast<Scalar>()).template cast<double>(); |
Austin Schuh | 3de38b0 | 2024-06-25 18:25:10 -0700 | [diff] [blame^] | 134 | return LinearSolverTerminationType::SUCCESS; |
Austin Schuh | 70cc955 | 2019-01-21 19:46:48 -0800 | [diff] [blame] | 135 | } |
| 136 | |
Austin Schuh | 3de38b0 | 2024-06-25 18:25:10 -0700 | [diff] [blame^] | 137 | LinearSolverTerminationType Factorize(int num_cols, |
| 138 | double* lhs, |
Austin Schuh | 1d1e6ea | 2020-12-23 21:56:30 -0800 | [diff] [blame] | 139 | std::string* message) final |
Austin Schuh | 3de38b0 | 2024-06-25 18:25:10 -0700 | [diff] [blame^] | 140 | DO_NOT_CALL_WITH_RETURN(LinearSolverTerminationType::FAILURE); |
Austin Schuh | 70cc955 | 2019-01-21 19:46:48 -0800 | [diff] [blame] | 141 | |
| 142 | private: |
| 143 | Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> lhs_; |
| 144 | }; |
| 145 | |
| 146 | #undef DO_NOT_CALL |
| 147 | #undef DO_NOT_CALL_WITH_RETURN |
| 148 | |
Austin Schuh | 3de38b0 | 2024-06-25 18:25:10 -0700 | [diff] [blame^] | 149 | class SparseIterativeRefinerTest : public ::testing::Test { |
Austin Schuh | 70cc955 | 2019-01-21 19:46:48 -0800 | [diff] [blame] | 150 | public: |
Austin Schuh | 3de38b0 | 2024-06-25 18:25:10 -0700 | [diff] [blame^] | 151 | void SetUp() override { |
Austin Schuh | 70cc955 | 2019-01-21 19:46:48 -0800 | [diff] [blame] | 152 | num_cols_ = 5; |
| 153 | max_num_iterations_ = 30; |
| 154 | Matrix m(num_cols_, num_cols_); |
| 155 | m.setRandom(); |
| 156 | lhs_ = m * m.transpose(); |
| 157 | solution_.resize(num_cols_); |
| 158 | solution_.setRandom(); |
| 159 | rhs_ = lhs_ * solution_; |
| 160 | }; |
| 161 | |
| 162 | protected: |
| 163 | int num_cols_; |
| 164 | int max_num_iterations_; |
| 165 | Matrix lhs_; |
| 166 | Vector rhs_, solution_; |
| 167 | }; |
| 168 | |
Austin Schuh | 3de38b0 | 2024-06-25 18:25:10 -0700 | [diff] [blame^] | 169 | TEST_F(SparseIterativeRefinerTest, |
| 170 | RandomSolutionWithExactFactorizationConverges) { |
Austin Schuh | 70cc955 | 2019-01-21 19:46:48 -0800 | [diff] [blame] | 171 | FakeSparseMatrix lhs(lhs_); |
| 172 | FakeSparseCholesky<double> sparse_cholesky(lhs_); |
Austin Schuh | 3de38b0 | 2024-06-25 18:25:10 -0700 | [diff] [blame^] | 173 | SparseIterativeRefiner refiner(max_num_iterations_); |
Austin Schuh | 70cc955 | 2019-01-21 19:46:48 -0800 | [diff] [blame] | 174 | Vector refined_solution(num_cols_); |
| 175 | refined_solution.setRandom(); |
| 176 | refiner.Refine(lhs, rhs_.data(), &sparse_cholesky, refined_solution.data()); |
| 177 | EXPECT_NEAR((lhs_ * refined_solution - rhs_).norm(), |
| 178 | 0.0, |
| 179 | std::numeric_limits<double>::epsilon() * 10); |
| 180 | } |
| 181 | |
Austin Schuh | 3de38b0 | 2024-06-25 18:25:10 -0700 | [diff] [blame^] | 182 | TEST_F(SparseIterativeRefinerTest, |
Austin Schuh | 70cc955 | 2019-01-21 19:46:48 -0800 | [diff] [blame] | 183 | RandomSolutionWithApproximationFactorizationConverges) { |
| 184 | FakeSparseMatrix lhs(lhs_); |
| 185 | // Use a single precision Cholesky factorization of the double |
| 186 | // precision matrix. This will give us an approximate factorization. |
| 187 | FakeSparseCholesky<float> sparse_cholesky(lhs_); |
Austin Schuh | 3de38b0 | 2024-06-25 18:25:10 -0700 | [diff] [blame^] | 188 | SparseIterativeRefiner refiner(max_num_iterations_); |
Austin Schuh | 70cc955 | 2019-01-21 19:46:48 -0800 | [diff] [blame] | 189 | Vector refined_solution(num_cols_); |
| 190 | refined_solution.setRandom(); |
| 191 | refiner.Refine(lhs, rhs_.data(), &sparse_cholesky, refined_solution.data()); |
| 192 | EXPECT_NEAR((lhs_ * refined_solution - rhs_).norm(), |
| 193 | 0.0, |
| 194 | std::numeric_limits<double>::epsilon() * 10); |
| 195 | } |
| 196 | |
Austin Schuh | 3de38b0 | 2024-06-25 18:25:10 -0700 | [diff] [blame^] | 197 | class DenseIterativeRefinerTest : public ::testing::Test { |
| 198 | public: |
| 199 | void SetUp() override { |
| 200 | num_cols_ = 5; |
| 201 | max_num_iterations_ = 30; |
| 202 | Matrix m(num_cols_, num_cols_); |
| 203 | m.setRandom(); |
| 204 | lhs_ = m * m.transpose(); |
| 205 | solution_.resize(num_cols_); |
| 206 | solution_.setRandom(); |
| 207 | rhs_ = lhs_ * solution_; |
| 208 | }; |
| 209 | |
| 210 | protected: |
| 211 | int num_cols_; |
| 212 | int max_num_iterations_; |
| 213 | Matrix lhs_; |
| 214 | Vector rhs_, solution_; |
| 215 | }; |
| 216 | |
| 217 | TEST_F(DenseIterativeRefinerTest, |
| 218 | RandomSolutionWithExactFactorizationConverges) { |
| 219 | Matrix lhs = lhs_; |
| 220 | FakeDenseCholesky<double> dense_cholesky(lhs); |
| 221 | DenseIterativeRefiner refiner(max_num_iterations_); |
| 222 | Vector refined_solution(num_cols_); |
| 223 | refined_solution.setRandom(); |
| 224 | refiner.Refine(lhs.cols(), |
| 225 | lhs.data(), |
| 226 | rhs_.data(), |
| 227 | &dense_cholesky, |
| 228 | refined_solution.data()); |
| 229 | EXPECT_NEAR((lhs_ * refined_solution - rhs_).norm(), |
| 230 | 0.0, |
| 231 | std::numeric_limits<double>::epsilon() * 10); |
| 232 | } |
| 233 | |
| 234 | TEST_F(DenseIterativeRefinerTest, |
| 235 | RandomSolutionWithApproximationFactorizationConverges) { |
| 236 | Matrix lhs = lhs_; |
| 237 | // Use a single precision Cholesky factorization of the double |
| 238 | // precision matrix. This will give us an approximate factorization. |
| 239 | FakeDenseCholesky<float> dense_cholesky(lhs_); |
| 240 | DenseIterativeRefiner refiner(max_num_iterations_); |
| 241 | Vector refined_solution(num_cols_); |
| 242 | refined_solution.setRandom(); |
| 243 | refiner.Refine(lhs.cols(), |
| 244 | lhs.data(), |
| 245 | rhs_.data(), |
| 246 | &dense_cholesky, |
| 247 | refined_solution.data()); |
| 248 | EXPECT_NEAR((lhs_ * refined_solution - rhs_).norm(), |
| 249 | 0.0, |
| 250 | std::numeric_limits<double>::epsilon() * 10); |
| 251 | } |
| 252 | |
| 253 | } // namespace ceres::internal |