Austin Schuh | 70cc955 | 2019-01-21 19:46:48 -0800 | [diff] [blame] | 1 | // Ceres Solver - A fast non-linear least squares minimizer |
Austin Schuh | 3de38b0 | 2024-06-25 18:25:10 -0700 | [diff] [blame^] | 2 | // Copyright 2023 Google Inc. All rights reserved. |
Austin Schuh | 70cc955 | 2019-01-21 19:46:48 -0800 | [diff] [blame] | 3 | // http://ceres-solver.org/ |
| 4 | // |
| 5 | // Redistribution and use in source and binary forms, with or without |
| 6 | // modification, are permitted provided that the following conditions are met: |
| 7 | // |
| 8 | // * Redistributions of source code must retain the above copyright notice, |
| 9 | // this list of conditions and the following disclaimer. |
| 10 | // * Redistributions in binary form must reproduce the above copyright notice, |
| 11 | // this list of conditions and the following disclaimer in the documentation |
| 12 | // and/or other materials provided with the distribution. |
| 13 | // * Neither the name of Google Inc. nor the names of its contributors may be |
| 14 | // used to endorse or promote products derived from this software without |
| 15 | // specific prior written permission. |
| 16 | // |
| 17 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
| 18 | // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| 19 | // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| 20 | // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE |
| 21 | // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
| 22 | // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
| 23 | // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
| 24 | // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
| 25 | // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
| 26 | // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
| 27 | // POSSIBILITY OF SUCH DAMAGE. |
| 28 | // |
| 29 | // Author: sameeragarwal@google.com (Sameer Agarwal) |
| 30 | |
| 31 | #include "ceres/block_sparse_matrix.h" |
| 32 | |
Austin Schuh | 3de38b0 | 2024-06-25 18:25:10 -0700 | [diff] [blame^] | 33 | #include <algorithm> |
Austin Schuh | 70cc955 | 2019-01-21 19:46:48 -0800 | [diff] [blame] | 34 | #include <memory> |
Austin Schuh | 3de38b0 | 2024-06-25 18:25:10 -0700 | [diff] [blame^] | 35 | #include <random> |
Austin Schuh | 70cc955 | 2019-01-21 19:46:48 -0800 | [diff] [blame] | 36 | #include <string> |
Austin Schuh | 3de38b0 | 2024-06-25 18:25:10 -0700 | [diff] [blame^] | 37 | #include <vector> |
Austin Schuh | 1d1e6ea | 2020-12-23 21:56:30 -0800 | [diff] [blame] | 38 | |
Austin Schuh | 70cc955 | 2019-01-21 19:46:48 -0800 | [diff] [blame] | 39 | #include "ceres/casts.h" |
Austin Schuh | 3de38b0 | 2024-06-25 18:25:10 -0700 | [diff] [blame^] | 40 | #include "ceres/crs_matrix.h" |
Austin Schuh | 70cc955 | 2019-01-21 19:46:48 -0800 | [diff] [blame] | 41 | #include "ceres/internal/eigen.h" |
| 42 | #include "ceres/linear_least_squares_problems.h" |
| 43 | #include "ceres/triplet_sparse_matrix.h" |
| 44 | #include "glog/logging.h" |
| 45 | #include "gtest/gtest.h" |
| 46 | |
| 47 | namespace ceres { |
| 48 | namespace internal { |
| 49 | |
Austin Schuh | 3de38b0 | 2024-06-25 18:25:10 -0700 | [diff] [blame^] | 50 | namespace { |
| 51 | |
| 52 | std::unique_ptr<BlockSparseMatrix> CreateTestMatrixFromId(int id) { |
| 53 | if (id == 0) { |
| 54 | // Create the following block sparse matrix: |
| 55 | // [ 1 2 0 0 0 0 ] |
| 56 | // [ 3 4 0 0 0 0 ] |
| 57 | // [ 0 0 5 6 7 0 ] |
| 58 | // [ 0 0 8 9 10 0 ] |
| 59 | CompressedRowBlockStructure* bs = new CompressedRowBlockStructure; |
| 60 | bs->cols = { |
| 61 | // Block size 2, position 0. |
| 62 | Block(2, 0), |
| 63 | // Block size 3, position 2. |
| 64 | Block(3, 2), |
| 65 | // Block size 1, position 5. |
| 66 | Block(1, 5), |
| 67 | }; |
| 68 | bs->rows = {CompressedRow(1), CompressedRow(1)}; |
| 69 | bs->rows[0].block = Block(2, 0); |
| 70 | bs->rows[0].cells = {Cell(0, 0)}; |
| 71 | |
| 72 | bs->rows[1].block = Block(2, 2); |
| 73 | bs->rows[1].cells = {Cell(1, 4)}; |
| 74 | auto m = std::make_unique<BlockSparseMatrix>(bs); |
| 75 | EXPECT_NE(m, nullptr); |
| 76 | EXPECT_EQ(m->num_rows(), 4); |
| 77 | EXPECT_EQ(m->num_cols(), 6); |
| 78 | EXPECT_EQ(m->num_nonzeros(), 10); |
| 79 | double* values = m->mutable_values(); |
| 80 | for (int i = 0; i < 10; ++i) { |
| 81 | values[i] = i + 1; |
| 82 | } |
| 83 | return m; |
| 84 | } else if (id == 1) { |
| 85 | // Create the following block sparse matrix: |
| 86 | // [ 1 2 0 5 6 0 ] |
| 87 | // [ 3 4 0 7 8 0 ] |
| 88 | // [ 0 0 9 0 0 0 ] |
| 89 | CompressedRowBlockStructure* bs = new CompressedRowBlockStructure; |
| 90 | bs->cols = { |
| 91 | // Block size 2, position 0. |
| 92 | Block(2, 0), |
| 93 | // Block size 1, position 2. |
| 94 | Block(1, 2), |
| 95 | // Block size 2, position 3. |
| 96 | Block(2, 3), |
| 97 | // Block size 1, position 5. |
| 98 | Block(1, 5), |
| 99 | }; |
| 100 | bs->rows = {CompressedRow(2), CompressedRow(1)}; |
| 101 | bs->rows[0].block = Block(2, 0); |
| 102 | bs->rows[0].cells = {Cell(0, 0), Cell(2, 4)}; |
| 103 | |
| 104 | bs->rows[1].block = Block(1, 2); |
| 105 | bs->rows[1].cells = {Cell(1, 8)}; |
| 106 | auto m = std::make_unique<BlockSparseMatrix>(bs); |
| 107 | EXPECT_NE(m, nullptr); |
| 108 | EXPECT_EQ(m->num_rows(), 3); |
| 109 | EXPECT_EQ(m->num_cols(), 6); |
| 110 | EXPECT_EQ(m->num_nonzeros(), 9); |
| 111 | double* values = m->mutable_values(); |
| 112 | for (int i = 0; i < 9; ++i) { |
| 113 | values[i] = i + 1; |
| 114 | } |
| 115 | return m; |
| 116 | } else if (id == 2) { |
| 117 | // Create the following block sparse matrix: |
| 118 | // [ 1 2 0 | 6 7 0 ] |
| 119 | // [ 3 4 0 | 8 9 0 ] |
| 120 | // [ 0 0 5 | 0 0 10] |
| 121 | // With cells of the left submatrix preceding cells of the right submatrix |
| 122 | CompressedRowBlockStructure* bs = new CompressedRowBlockStructure; |
| 123 | bs->cols = { |
| 124 | // Block size 2, position 0. |
| 125 | Block(2, 0), |
| 126 | // Block size 1, position 2. |
| 127 | Block(1, 2), |
| 128 | // Block size 2, position 3. |
| 129 | Block(2, 3), |
| 130 | // Block size 1, position 5. |
| 131 | Block(1, 5), |
| 132 | }; |
| 133 | bs->rows = {CompressedRow(2), CompressedRow(1)}; |
| 134 | bs->rows[0].block = Block(2, 0); |
| 135 | bs->rows[0].cells = {Cell(0, 0), Cell(2, 5)}; |
| 136 | |
| 137 | bs->rows[1].block = Block(1, 2); |
| 138 | bs->rows[1].cells = {Cell(1, 4), Cell(3, 9)}; |
| 139 | auto m = std::make_unique<BlockSparseMatrix>(bs); |
| 140 | EXPECT_NE(m, nullptr); |
| 141 | EXPECT_EQ(m->num_rows(), 3); |
| 142 | EXPECT_EQ(m->num_cols(), 6); |
| 143 | EXPECT_EQ(m->num_nonzeros(), 10); |
| 144 | double* values = m->mutable_values(); |
| 145 | for (int i = 0; i < 10; ++i) { |
| 146 | values[i] = i + 1; |
| 147 | } |
| 148 | return m; |
| 149 | } |
| 150 | return nullptr; |
| 151 | } |
| 152 | } // namespace |
| 153 | |
| 154 | const int kNumThreads = 4; |
| 155 | |
Austin Schuh | 70cc955 | 2019-01-21 19:46:48 -0800 | [diff] [blame] | 156 | class BlockSparseMatrixTest : public ::testing::Test { |
Austin Schuh | 1d1e6ea | 2020-12-23 21:56:30 -0800 | [diff] [blame] | 157 | protected: |
| 158 | void SetUp() final { |
Austin Schuh | 3de38b0 | 2024-06-25 18:25:10 -0700 | [diff] [blame^] | 159 | std::unique_ptr<LinearLeastSquaresProblem> problem = |
| 160 | CreateLinearLeastSquaresProblemFromId(2); |
Austin Schuh | 70cc955 | 2019-01-21 19:46:48 -0800 | [diff] [blame] | 161 | CHECK(problem != nullptr); |
Austin Schuh | 3de38b0 | 2024-06-25 18:25:10 -0700 | [diff] [blame^] | 162 | a_.reset(down_cast<BlockSparseMatrix*>(problem->A.release())); |
Austin Schuh | 70cc955 | 2019-01-21 19:46:48 -0800 | [diff] [blame] | 163 | |
Austin Schuh | 3de38b0 | 2024-06-25 18:25:10 -0700 | [diff] [blame^] | 164 | problem = CreateLinearLeastSquaresProblemFromId(1); |
Austin Schuh | 70cc955 | 2019-01-21 19:46:48 -0800 | [diff] [blame] | 165 | CHECK(problem != nullptr); |
Austin Schuh | 3de38b0 | 2024-06-25 18:25:10 -0700 | [diff] [blame^] | 166 | b_.reset(down_cast<TripletSparseMatrix*>(problem->A.release())); |
Austin Schuh | 70cc955 | 2019-01-21 19:46:48 -0800 | [diff] [blame] | 167 | |
Austin Schuh | 3de38b0 | 2024-06-25 18:25:10 -0700 | [diff] [blame^] | 168 | CHECK_EQ(a_->num_rows(), b_->num_rows()); |
| 169 | CHECK_EQ(a_->num_cols(), b_->num_cols()); |
| 170 | CHECK_EQ(a_->num_nonzeros(), b_->num_nonzeros()); |
| 171 | context_.EnsureMinimumThreads(kNumThreads); |
| 172 | |
| 173 | BlockSparseMatrix::RandomMatrixOptions options; |
| 174 | options.num_row_blocks = 1000; |
| 175 | options.min_row_block_size = 1; |
| 176 | options.max_row_block_size = 8; |
| 177 | options.num_col_blocks = 100; |
| 178 | options.min_col_block_size = 1; |
| 179 | options.max_col_block_size = 8; |
| 180 | options.block_density = 0.05; |
| 181 | |
| 182 | std::mt19937 rng; |
| 183 | c_ = BlockSparseMatrix::CreateRandomMatrix(options, rng); |
Austin Schuh | 70cc955 | 2019-01-21 19:46:48 -0800 | [diff] [blame] | 184 | } |
| 185 | |
Austin Schuh | 3de38b0 | 2024-06-25 18:25:10 -0700 | [diff] [blame^] | 186 | std::unique_ptr<BlockSparseMatrix> a_; |
| 187 | std::unique_ptr<TripletSparseMatrix> b_; |
| 188 | std::unique_ptr<BlockSparseMatrix> c_; |
| 189 | ContextImpl context_; |
Austin Schuh | 70cc955 | 2019-01-21 19:46:48 -0800 | [diff] [blame] | 190 | }; |
| 191 | |
| 192 | TEST_F(BlockSparseMatrixTest, SetZeroTest) { |
Austin Schuh | 3de38b0 | 2024-06-25 18:25:10 -0700 | [diff] [blame^] | 193 | a_->SetZero(); |
| 194 | EXPECT_EQ(13, a_->num_nonzeros()); |
Austin Schuh | 70cc955 | 2019-01-21 19:46:48 -0800 | [diff] [blame] | 195 | } |
| 196 | |
Austin Schuh | 3de38b0 | 2024-06-25 18:25:10 -0700 | [diff] [blame^] | 197 | TEST_F(BlockSparseMatrixTest, RightMultiplyAndAccumulateTest) { |
| 198 | Vector y_a = Vector::Zero(a_->num_rows()); |
| 199 | Vector y_b = Vector::Zero(a_->num_rows()); |
| 200 | for (int i = 0; i < a_->num_cols(); ++i) { |
| 201 | Vector x = Vector::Zero(a_->num_cols()); |
Austin Schuh | 70cc955 | 2019-01-21 19:46:48 -0800 | [diff] [blame] | 202 | x[i] = 1.0; |
Austin Schuh | 3de38b0 | 2024-06-25 18:25:10 -0700 | [diff] [blame^] | 203 | a_->RightMultiplyAndAccumulate(x.data(), y_a.data()); |
| 204 | b_->RightMultiplyAndAccumulate(x.data(), y_b.data()); |
Austin Schuh | 70cc955 | 2019-01-21 19:46:48 -0800 | [diff] [blame] | 205 | EXPECT_LT((y_a - y_b).norm(), 1e-12); |
| 206 | } |
| 207 | } |
| 208 | |
Austin Schuh | 3de38b0 | 2024-06-25 18:25:10 -0700 | [diff] [blame^] | 209 | TEST_F(BlockSparseMatrixTest, RightMultiplyAndAccumulateParallelTest) { |
| 210 | Vector y_0 = Vector::Random(a_->num_rows()); |
| 211 | Vector y_s = y_0; |
| 212 | Vector y_p = y_0; |
| 213 | |
| 214 | Vector x = Vector::Random(a_->num_cols()); |
| 215 | a_->RightMultiplyAndAccumulate(x.data(), y_s.data()); |
| 216 | |
| 217 | a_->RightMultiplyAndAccumulate(x.data(), y_p.data(), &context_, kNumThreads); |
| 218 | |
| 219 | // Current parallel implementation is expected to be bit-exact |
| 220 | EXPECT_EQ((y_s - y_p).norm(), 0.); |
| 221 | } |
| 222 | |
| 223 | TEST_F(BlockSparseMatrixTest, LeftMultiplyAndAccumulateTest) { |
| 224 | Vector y_a = Vector::Zero(a_->num_cols()); |
| 225 | Vector y_b = Vector::Zero(a_->num_cols()); |
| 226 | for (int i = 0; i < a_->num_rows(); ++i) { |
| 227 | Vector x = Vector::Zero(a_->num_rows()); |
Austin Schuh | 70cc955 | 2019-01-21 19:46:48 -0800 | [diff] [blame] | 228 | x[i] = 1.0; |
Austin Schuh | 3de38b0 | 2024-06-25 18:25:10 -0700 | [diff] [blame^] | 229 | a_->LeftMultiplyAndAccumulate(x.data(), y_a.data()); |
| 230 | b_->LeftMultiplyAndAccumulate(x.data(), y_b.data()); |
Austin Schuh | 70cc955 | 2019-01-21 19:46:48 -0800 | [diff] [blame] | 231 | EXPECT_LT((y_a - y_b).norm(), 1e-12); |
| 232 | } |
| 233 | } |
| 234 | |
Austin Schuh | 3de38b0 | 2024-06-25 18:25:10 -0700 | [diff] [blame^] | 235 | TEST_F(BlockSparseMatrixTest, LeftMultiplyAndAccumulateParallelTest) { |
| 236 | Vector y_0 = Vector::Random(a_->num_cols()); |
| 237 | Vector y_s = y_0; |
| 238 | Vector y_p = y_0; |
| 239 | |
| 240 | Vector x = Vector::Random(a_->num_rows()); |
| 241 | a_->LeftMultiplyAndAccumulate(x.data(), y_s.data()); |
| 242 | |
| 243 | a_->LeftMultiplyAndAccumulate(x.data(), y_p.data(), &context_, kNumThreads); |
| 244 | |
| 245 | // Parallel implementation for left products uses a different order of |
| 246 | // traversal, thus results might be different |
| 247 | EXPECT_LT((y_s - y_p).norm(), 1e-12); |
| 248 | } |
| 249 | |
Austin Schuh | 70cc955 | 2019-01-21 19:46:48 -0800 | [diff] [blame] | 250 | TEST_F(BlockSparseMatrixTest, SquaredColumnNormTest) { |
Austin Schuh | 3de38b0 | 2024-06-25 18:25:10 -0700 | [diff] [blame^] | 251 | Vector y_a = Vector::Zero(a_->num_cols()); |
| 252 | Vector y_b = Vector::Zero(a_->num_cols()); |
| 253 | a_->SquaredColumnNorm(y_a.data()); |
| 254 | b_->SquaredColumnNorm(y_b.data()); |
Austin Schuh | 70cc955 | 2019-01-21 19:46:48 -0800 | [diff] [blame] | 255 | EXPECT_LT((y_a - y_b).norm(), 1e-12); |
| 256 | } |
| 257 | |
Austin Schuh | 3de38b0 | 2024-06-25 18:25:10 -0700 | [diff] [blame^] | 258 | TEST_F(BlockSparseMatrixTest, SquaredColumnNormParallelTest) { |
| 259 | Vector y_a = Vector::Zero(c_->num_cols()); |
| 260 | Vector y_b = Vector::Zero(c_->num_cols()); |
| 261 | c_->SquaredColumnNorm(y_a.data()); |
| 262 | |
| 263 | c_->SquaredColumnNorm(y_b.data(), &context_, kNumThreads); |
| 264 | EXPECT_LT((y_a - y_b).norm(), 1e-12); |
| 265 | } |
| 266 | |
| 267 | TEST_F(BlockSparseMatrixTest, ScaleColumnsTest) { |
| 268 | const Vector scale = Vector::Random(c_->num_cols()).cwiseAbs(); |
| 269 | |
| 270 | const Vector x = Vector::Random(c_->num_rows()); |
| 271 | Vector y_expected = Vector::Zero(c_->num_cols()); |
| 272 | c_->LeftMultiplyAndAccumulate(x.data(), y_expected.data()); |
| 273 | y_expected.array() *= scale.array(); |
| 274 | |
| 275 | c_->ScaleColumns(scale.data()); |
| 276 | Vector y_observed = Vector::Zero(c_->num_cols()); |
| 277 | c_->LeftMultiplyAndAccumulate(x.data(), y_observed.data()); |
| 278 | |
| 279 | EXPECT_GT(y_expected.norm(), 1.); |
| 280 | EXPECT_LT((y_observed - y_expected).norm(), 1e-12 * y_expected.norm()); |
| 281 | } |
| 282 | |
| 283 | TEST_F(BlockSparseMatrixTest, ScaleColumnsParallelTest) { |
| 284 | const Vector scale = Vector::Random(c_->num_cols()).cwiseAbs(); |
| 285 | |
| 286 | const Vector x = Vector::Random(c_->num_rows()); |
| 287 | Vector y_expected = Vector::Zero(c_->num_cols()); |
| 288 | c_->LeftMultiplyAndAccumulate(x.data(), y_expected.data()); |
| 289 | y_expected.array() *= scale.array(); |
| 290 | |
| 291 | c_->ScaleColumns(scale.data(), &context_, kNumThreads); |
| 292 | Vector y_observed = Vector::Zero(c_->num_cols()); |
| 293 | c_->LeftMultiplyAndAccumulate(x.data(), y_observed.data()); |
| 294 | |
| 295 | EXPECT_GT(y_expected.norm(), 1.); |
| 296 | EXPECT_LT((y_observed - y_expected).norm(), 1e-12 * y_expected.norm()); |
| 297 | } |
| 298 | |
Austin Schuh | 70cc955 | 2019-01-21 19:46:48 -0800 | [diff] [blame] | 299 | TEST_F(BlockSparseMatrixTest, ToDenseMatrixTest) { |
| 300 | Matrix m_a; |
| 301 | Matrix m_b; |
Austin Schuh | 3de38b0 | 2024-06-25 18:25:10 -0700 | [diff] [blame^] | 302 | a_->ToDenseMatrix(&m_a); |
| 303 | b_->ToDenseMatrix(&m_b); |
Austin Schuh | 70cc955 | 2019-01-21 19:46:48 -0800 | [diff] [blame] | 304 | EXPECT_LT((m_a - m_b).norm(), 1e-12); |
| 305 | } |
| 306 | |
| 307 | TEST_F(BlockSparseMatrixTest, AppendRows) { |
Austin Schuh | 3de38b0 | 2024-06-25 18:25:10 -0700 | [diff] [blame^] | 308 | std::unique_ptr<LinearLeastSquaresProblem> problem = |
| 309 | CreateLinearLeastSquaresProblemFromId(2); |
Austin Schuh | 70cc955 | 2019-01-21 19:46:48 -0800 | [diff] [blame] | 310 | std::unique_ptr<BlockSparseMatrix> m( |
| 311 | down_cast<BlockSparseMatrix*>(problem->A.release())); |
Austin Schuh | 3de38b0 | 2024-06-25 18:25:10 -0700 | [diff] [blame^] | 312 | a_->AppendRows(*m); |
| 313 | EXPECT_EQ(a_->num_rows(), 2 * m->num_rows()); |
| 314 | EXPECT_EQ(a_->num_cols(), m->num_cols()); |
Austin Schuh | 70cc955 | 2019-01-21 19:46:48 -0800 | [diff] [blame] | 315 | |
Austin Schuh | 3de38b0 | 2024-06-25 18:25:10 -0700 | [diff] [blame^] | 316 | problem = CreateLinearLeastSquaresProblemFromId(1); |
Austin Schuh | 70cc955 | 2019-01-21 19:46:48 -0800 | [diff] [blame] | 317 | std::unique_ptr<TripletSparseMatrix> m2( |
| 318 | down_cast<TripletSparseMatrix*>(problem->A.release())); |
Austin Schuh | 3de38b0 | 2024-06-25 18:25:10 -0700 | [diff] [blame^] | 319 | b_->AppendRows(*m2); |
Austin Schuh | 70cc955 | 2019-01-21 19:46:48 -0800 | [diff] [blame] | 320 | |
Austin Schuh | 3de38b0 | 2024-06-25 18:25:10 -0700 | [diff] [blame^] | 321 | Vector y_a = Vector::Zero(a_->num_rows()); |
| 322 | Vector y_b = Vector::Zero(a_->num_rows()); |
| 323 | for (int i = 0; i < a_->num_cols(); ++i) { |
| 324 | Vector x = Vector::Zero(a_->num_cols()); |
Austin Schuh | 70cc955 | 2019-01-21 19:46:48 -0800 | [diff] [blame] | 325 | x[i] = 1.0; |
| 326 | y_a.setZero(); |
| 327 | y_b.setZero(); |
| 328 | |
Austin Schuh | 3de38b0 | 2024-06-25 18:25:10 -0700 | [diff] [blame^] | 329 | a_->RightMultiplyAndAccumulate(x.data(), y_a.data()); |
| 330 | b_->RightMultiplyAndAccumulate(x.data(), y_b.data()); |
Austin Schuh | 70cc955 | 2019-01-21 19:46:48 -0800 | [diff] [blame] | 331 | EXPECT_LT((y_a - y_b).norm(), 1e-12); |
| 332 | } |
| 333 | } |
| 334 | |
Austin Schuh | 3de38b0 | 2024-06-25 18:25:10 -0700 | [diff] [blame^] | 335 | TEST_F(BlockSparseMatrixTest, AppendDeleteRowsTransposedStructure) { |
| 336 | auto problem = CreateLinearLeastSquaresProblemFromId(2); |
| 337 | std::unique_ptr<BlockSparseMatrix> m( |
| 338 | down_cast<BlockSparseMatrix*>(problem->A.release())); |
| 339 | |
| 340 | auto block_structure = a_->block_structure(); |
| 341 | |
| 342 | // Several AppendRows and DeleteRowBlocks operations are applied to matrix, |
| 343 | // with regular and transpose block structures being compared after each |
| 344 | // operation. |
| 345 | // |
| 346 | // Non-negative values encode number of row blocks to remove |
| 347 | // -1 encodes appending matrix m |
| 348 | const int num_row_blocks_to_delete[] = {0, -1, 1, -1, 8, -1, 10}; |
| 349 | for (auto& t : num_row_blocks_to_delete) { |
| 350 | if (t == -1) { |
| 351 | a_->AppendRows(*m); |
| 352 | } else if (t > 0) { |
| 353 | CHECK_GE(block_structure->rows.size(), t); |
| 354 | a_->DeleteRowBlocks(t); |
| 355 | } |
| 356 | |
| 357 | auto block_structure = a_->block_structure(); |
| 358 | auto transpose_block_structure = a_->transpose_block_structure(); |
| 359 | ASSERT_NE(block_structure, nullptr); |
| 360 | ASSERT_NE(transpose_block_structure, nullptr); |
| 361 | |
| 362 | EXPECT_EQ(block_structure->rows.size(), |
| 363 | transpose_block_structure->cols.size()); |
| 364 | EXPECT_EQ(block_structure->cols.size(), |
| 365 | transpose_block_structure->rows.size()); |
| 366 | |
| 367 | std::vector<int> nnz_col(transpose_block_structure->rows.size()); |
| 368 | for (int i = 0; i < block_structure->cols.size(); ++i) { |
| 369 | EXPECT_EQ(block_structure->cols[i].position, |
| 370 | transpose_block_structure->rows[i].block.position); |
| 371 | const int col_size = transpose_block_structure->rows[i].block.size; |
| 372 | EXPECT_EQ(block_structure->cols[i].size, col_size); |
| 373 | |
| 374 | for (auto& col_cell : transpose_block_structure->rows[i].cells) { |
| 375 | int matches = 0; |
| 376 | const int row_block_id = col_cell.block_id; |
| 377 | nnz_col[i] += |
| 378 | col_size * transpose_block_structure->cols[row_block_id].size; |
| 379 | for (auto& row_cell : block_structure->rows[row_block_id].cells) { |
| 380 | if (row_cell.block_id != i) continue; |
| 381 | EXPECT_EQ(row_cell.position, col_cell.position); |
| 382 | ++matches; |
| 383 | } |
| 384 | EXPECT_EQ(matches, 1); |
| 385 | } |
| 386 | EXPECT_EQ(nnz_col[i], transpose_block_structure->rows[i].nnz); |
| 387 | if (i > 0) { |
| 388 | nnz_col[i] += nnz_col[i - 1]; |
| 389 | } |
| 390 | EXPECT_EQ(nnz_col[i], transpose_block_structure->rows[i].cumulative_nnz); |
| 391 | } |
| 392 | for (int i = 0; i < block_structure->rows.size(); ++i) { |
| 393 | EXPECT_EQ(block_structure->rows[i].block.position, |
| 394 | transpose_block_structure->cols[i].position); |
| 395 | EXPECT_EQ(block_structure->rows[i].block.size, |
| 396 | transpose_block_structure->cols[i].size); |
| 397 | |
| 398 | for (auto& row_cell : block_structure->rows[i].cells) { |
| 399 | int matches = 0; |
| 400 | const int col_block_id = row_cell.block_id; |
| 401 | for (auto& col_cell : |
| 402 | transpose_block_structure->rows[col_block_id].cells) { |
| 403 | if (col_cell.block_id != i) continue; |
| 404 | EXPECT_EQ(col_cell.position, row_cell.position); |
| 405 | ++matches; |
| 406 | } |
| 407 | EXPECT_EQ(matches, 1); |
| 408 | } |
| 409 | } |
| 410 | } |
| 411 | } |
| 412 | |
Austin Schuh | 70cc955 | 2019-01-21 19:46:48 -0800 | [diff] [blame] | 413 | TEST_F(BlockSparseMatrixTest, AppendAndDeleteBlockDiagonalMatrix) { |
Austin Schuh | 3de38b0 | 2024-06-25 18:25:10 -0700 | [diff] [blame^] | 414 | const std::vector<Block>& column_blocks = a_->block_structure()->cols; |
Austin Schuh | 70cc955 | 2019-01-21 19:46:48 -0800 | [diff] [blame] | 415 | const int num_cols = |
| 416 | column_blocks.back().size + column_blocks.back().position; |
| 417 | Vector diagonal(num_cols); |
| 418 | for (int i = 0; i < num_cols; ++i) { |
| 419 | diagonal(i) = 2 * i * i + 1; |
| 420 | } |
| 421 | std::unique_ptr<BlockSparseMatrix> appendage( |
| 422 | BlockSparseMatrix::CreateDiagonalMatrix(diagonal.data(), column_blocks)); |
| 423 | |
Austin Schuh | 3de38b0 | 2024-06-25 18:25:10 -0700 | [diff] [blame^] | 424 | a_->AppendRows(*appendage); |
Austin Schuh | 70cc955 | 2019-01-21 19:46:48 -0800 | [diff] [blame] | 425 | Vector y_a, y_b; |
Austin Schuh | 3de38b0 | 2024-06-25 18:25:10 -0700 | [diff] [blame^] | 426 | y_a.resize(a_->num_rows()); |
| 427 | y_b.resize(a_->num_rows()); |
| 428 | for (int i = 0; i < a_->num_cols(); ++i) { |
| 429 | Vector x = Vector::Zero(a_->num_cols()); |
Austin Schuh | 70cc955 | 2019-01-21 19:46:48 -0800 | [diff] [blame] | 430 | x[i] = 1.0; |
| 431 | y_a.setZero(); |
| 432 | y_b.setZero(); |
| 433 | |
Austin Schuh | 3de38b0 | 2024-06-25 18:25:10 -0700 | [diff] [blame^] | 434 | a_->RightMultiplyAndAccumulate(x.data(), y_a.data()); |
| 435 | b_->RightMultiplyAndAccumulate(x.data(), y_b.data()); |
| 436 | EXPECT_LT((y_a.head(b_->num_rows()) - y_b.head(b_->num_rows())).norm(), |
Austin Schuh | 1d1e6ea | 2020-12-23 21:56:30 -0800 | [diff] [blame] | 437 | 1e-12); |
Austin Schuh | 3de38b0 | 2024-06-25 18:25:10 -0700 | [diff] [blame^] | 438 | Vector expected_tail = Vector::Zero(a_->num_cols()); |
Austin Schuh | 70cc955 | 2019-01-21 19:46:48 -0800 | [diff] [blame] | 439 | expected_tail(i) = diagonal(i); |
Austin Schuh | 3de38b0 | 2024-06-25 18:25:10 -0700 | [diff] [blame^] | 440 | EXPECT_LT((y_a.tail(a_->num_cols()) - expected_tail).norm(), 1e-12); |
Austin Schuh | 70cc955 | 2019-01-21 19:46:48 -0800 | [diff] [blame] | 441 | } |
| 442 | |
Austin Schuh | 3de38b0 | 2024-06-25 18:25:10 -0700 | [diff] [blame^] | 443 | a_->DeleteRowBlocks(column_blocks.size()); |
| 444 | EXPECT_EQ(a_->num_rows(), b_->num_rows()); |
| 445 | EXPECT_EQ(a_->num_cols(), b_->num_cols()); |
Austin Schuh | 70cc955 | 2019-01-21 19:46:48 -0800 | [diff] [blame] | 446 | |
Austin Schuh | 3de38b0 | 2024-06-25 18:25:10 -0700 | [diff] [blame^] | 447 | y_a.resize(a_->num_rows()); |
| 448 | y_b.resize(a_->num_rows()); |
| 449 | for (int i = 0; i < a_->num_cols(); ++i) { |
| 450 | Vector x = Vector::Zero(a_->num_cols()); |
Austin Schuh | 70cc955 | 2019-01-21 19:46:48 -0800 | [diff] [blame] | 451 | x[i] = 1.0; |
| 452 | y_a.setZero(); |
| 453 | y_b.setZero(); |
| 454 | |
Austin Schuh | 3de38b0 | 2024-06-25 18:25:10 -0700 | [diff] [blame^] | 455 | a_->RightMultiplyAndAccumulate(x.data(), y_a.data()); |
| 456 | b_->RightMultiplyAndAccumulate(x.data(), y_b.data()); |
Austin Schuh | 70cc955 | 2019-01-21 19:46:48 -0800 | [diff] [blame] | 457 | EXPECT_LT((y_a - y_b).norm(), 1e-12); |
| 458 | } |
| 459 | } |
| 460 | |
| 461 | TEST(BlockSparseMatrix, CreateDiagonalMatrix) { |
| 462 | std::vector<Block> column_blocks; |
Austin Schuh | 3de38b0 | 2024-06-25 18:25:10 -0700 | [diff] [blame^] | 463 | column_blocks.emplace_back(2, 0); |
| 464 | column_blocks.emplace_back(1, 2); |
| 465 | column_blocks.emplace_back(3, 3); |
Austin Schuh | 70cc955 | 2019-01-21 19:46:48 -0800 | [diff] [blame] | 466 | const int num_cols = |
| 467 | column_blocks.back().size + column_blocks.back().position; |
| 468 | Vector diagonal(num_cols); |
| 469 | for (int i = 0; i < num_cols; ++i) { |
| 470 | diagonal(i) = 2 * i * i + 1; |
| 471 | } |
| 472 | |
| 473 | std::unique_ptr<BlockSparseMatrix> m( |
| 474 | BlockSparseMatrix::CreateDiagonalMatrix(diagonal.data(), column_blocks)); |
| 475 | const CompressedRowBlockStructure* bs = m->block_structure(); |
| 476 | EXPECT_EQ(bs->cols.size(), column_blocks.size()); |
| 477 | for (int i = 0; i < column_blocks.size(); ++i) { |
| 478 | EXPECT_EQ(bs->cols[i].size, column_blocks[i].size); |
| 479 | EXPECT_EQ(bs->cols[i].position, column_blocks[i].position); |
| 480 | } |
| 481 | EXPECT_EQ(m->num_rows(), m->num_cols()); |
| 482 | Vector x = Vector::Ones(num_cols); |
| 483 | Vector y = Vector::Zero(num_cols); |
Austin Schuh | 3de38b0 | 2024-06-25 18:25:10 -0700 | [diff] [blame^] | 484 | m->RightMultiplyAndAccumulate(x.data(), y.data()); |
Austin Schuh | 70cc955 | 2019-01-21 19:46:48 -0800 | [diff] [blame] | 485 | for (int i = 0; i < num_cols; ++i) { |
| 486 | EXPECT_NEAR(y[i], diagonal[i], std::numeric_limits<double>::epsilon()); |
| 487 | } |
| 488 | } |
| 489 | |
Austin Schuh | 3de38b0 | 2024-06-25 18:25:10 -0700 | [diff] [blame^] | 490 | TEST(BlockSparseMatrix, ToDenseMatrix) { |
| 491 | { |
| 492 | std::unique_ptr<BlockSparseMatrix> m = CreateTestMatrixFromId(0); |
| 493 | Matrix m_dense; |
| 494 | m->ToDenseMatrix(&m_dense); |
| 495 | EXPECT_EQ(m_dense.rows(), 4); |
| 496 | EXPECT_EQ(m_dense.cols(), 6); |
| 497 | Matrix m_expected(4, 6); |
| 498 | m_expected << 1, 2, 0, 0, 0, 0, 3, 4, 0, 0, 0, 0, 0, 0, 5, 6, 7, 0, 0, 0, 8, |
| 499 | 9, 10, 0; |
| 500 | EXPECT_EQ(m_dense, m_expected); |
| 501 | } |
| 502 | |
| 503 | { |
| 504 | std::unique_ptr<BlockSparseMatrix> m = CreateTestMatrixFromId(1); |
| 505 | Matrix m_dense; |
| 506 | m->ToDenseMatrix(&m_dense); |
| 507 | EXPECT_EQ(m_dense.rows(), 3); |
| 508 | EXPECT_EQ(m_dense.cols(), 6); |
| 509 | Matrix m_expected(3, 6); |
| 510 | m_expected << 1, 2, 0, 5, 6, 0, 3, 4, 0, 7, 8, 0, 0, 0, 9, 0, 0, 0; |
| 511 | EXPECT_EQ(m_dense, m_expected); |
| 512 | } |
| 513 | |
| 514 | { |
| 515 | std::unique_ptr<BlockSparseMatrix> m = CreateTestMatrixFromId(2); |
| 516 | Matrix m_dense; |
| 517 | m->ToDenseMatrix(&m_dense); |
| 518 | EXPECT_EQ(m_dense.rows(), 3); |
| 519 | EXPECT_EQ(m_dense.cols(), 6); |
| 520 | Matrix m_expected(3, 6); |
| 521 | m_expected << 1, 2, 0, 6, 7, 0, 3, 4, 0, 8, 9, 0, 0, 0, 5, 0, 0, 10; |
| 522 | EXPECT_EQ(m_dense, m_expected); |
| 523 | } |
| 524 | } |
| 525 | |
| 526 | TEST(BlockSparseMatrix, ToCRSMatrix) { |
| 527 | { |
| 528 | std::unique_ptr<BlockSparseMatrix> m = CreateTestMatrixFromId(0); |
| 529 | auto m_crs = m->ToCompressedRowSparseMatrix(); |
| 530 | std::vector<int> rows_expected = {0, 2, 4, 7, 10}; |
| 531 | std::vector<int> cols_expected = {0, 1, 0, 1, 2, 3, 4, 2, 3, 4}; |
| 532 | std::vector<double> values_expected = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; |
| 533 | for (int i = 0; i < rows_expected.size(); ++i) { |
| 534 | EXPECT_EQ(m_crs->rows()[i], rows_expected[i]); |
| 535 | } |
| 536 | for (int i = 0; i < cols_expected.size(); ++i) { |
| 537 | EXPECT_EQ(m_crs->cols()[i], cols_expected[i]); |
| 538 | } |
| 539 | for (int i = 0; i < values_expected.size(); ++i) { |
| 540 | EXPECT_EQ(m_crs->values()[i], values_expected[i]); |
| 541 | } |
| 542 | } |
| 543 | { |
| 544 | std::unique_ptr<BlockSparseMatrix> m = CreateTestMatrixFromId(1); |
| 545 | auto m_crs = m->ToCompressedRowSparseMatrix(); |
| 546 | std::vector<int> rows_expected = {0, 4, 8, 9}; |
| 547 | std::vector<int> cols_expected = {0, 1, 3, 4, 0, 1, 3, 4, 2}; |
| 548 | std::vector<double> values_expected = {1, 2, 5, 6, 3, 4, 7, 8, 9}; |
| 549 | for (int i = 0; i < rows_expected.size(); ++i) { |
| 550 | EXPECT_EQ(m_crs->rows()[i], rows_expected[i]); |
| 551 | } |
| 552 | for (int i = 0; i < cols_expected.size(); ++i) { |
| 553 | EXPECT_EQ(m_crs->cols()[i], cols_expected[i]); |
| 554 | } |
| 555 | for (int i = 0; i < values_expected.size(); ++i) { |
| 556 | EXPECT_EQ(m_crs->values()[i], values_expected[i]); |
| 557 | } |
| 558 | } |
| 559 | { |
| 560 | std::unique_ptr<BlockSparseMatrix> m = CreateTestMatrixFromId(2); |
| 561 | auto m_crs = m->ToCompressedRowSparseMatrix(); |
| 562 | std::vector<int> rows_expected = {0, 4, 8, 10}; |
| 563 | std::vector<int> cols_expected = {0, 1, 3, 4, 0, 1, 3, 4, 2, 5}; |
| 564 | std::vector<double> values_expected = {1, 2, 6, 7, 3, 4, 8, 9, 5, 10}; |
| 565 | for (int i = 0; i < rows_expected.size(); ++i) { |
| 566 | EXPECT_EQ(m_crs->rows()[i], rows_expected[i]); |
| 567 | } |
| 568 | for (int i = 0; i < cols_expected.size(); ++i) { |
| 569 | EXPECT_EQ(m_crs->cols()[i], cols_expected[i]); |
| 570 | } |
| 571 | for (int i = 0; i < values_expected.size(); ++i) { |
| 572 | EXPECT_EQ(m_crs->values()[i], values_expected[i]); |
| 573 | } |
| 574 | } |
| 575 | } |
| 576 | |
| 577 | TEST(BlockSparseMatrix, ToCRSMatrixTranspose) { |
| 578 | { |
| 579 | std::unique_ptr<BlockSparseMatrix> m = CreateTestMatrixFromId(0); |
| 580 | auto m_crs_transpose = m->ToCompressedRowSparseMatrixTranspose(); |
| 581 | std::vector<int> rows_expected = {0, 2, 4, 6, 8, 10, 10}; |
| 582 | std::vector<int> cols_expected = {0, 1, 0, 1, 2, 3, 2, 3, 2, 3}; |
| 583 | std::vector<double> values_expected = {1, 3, 2, 4, 5, 8, 6, 9, 7, 10}; |
| 584 | EXPECT_EQ(m_crs_transpose->num_nonzeros(), cols_expected.size()); |
| 585 | EXPECT_EQ(m_crs_transpose->num_rows(), rows_expected.size() - 1); |
| 586 | for (int i = 0; i < rows_expected.size(); ++i) { |
| 587 | EXPECT_EQ(m_crs_transpose->rows()[i], rows_expected[i]); |
| 588 | } |
| 589 | for (int i = 0; i < cols_expected.size(); ++i) { |
| 590 | EXPECT_EQ(m_crs_transpose->cols()[i], cols_expected[i]); |
| 591 | } |
| 592 | for (int i = 0; i < values_expected.size(); ++i) { |
| 593 | EXPECT_EQ(m_crs_transpose->values()[i], values_expected[i]); |
| 594 | } |
| 595 | } |
| 596 | { |
| 597 | std::unique_ptr<BlockSparseMatrix> m = CreateTestMatrixFromId(1); |
| 598 | auto m_crs_transpose = m->ToCompressedRowSparseMatrixTranspose(); |
| 599 | std::vector<int> rows_expected = {0, 2, 4, 5, 7, 9, 9}; |
| 600 | std::vector<int> cols_expected = {0, 1, 0, 1, 2, 0, 1, 0, 1}; |
| 601 | std::vector<double> values_expected = {1, 3, 2, 4, 9, 5, 7, 6, 8}; |
| 602 | EXPECT_EQ(m_crs_transpose->num_nonzeros(), cols_expected.size()); |
| 603 | EXPECT_EQ(m_crs_transpose->num_rows(), rows_expected.size() - 1); |
| 604 | for (int i = 0; i < rows_expected.size(); ++i) { |
| 605 | EXPECT_EQ(m_crs_transpose->rows()[i], rows_expected[i]); |
| 606 | } |
| 607 | for (int i = 0; i < cols_expected.size(); ++i) { |
| 608 | EXPECT_EQ(m_crs_transpose->cols()[i], cols_expected[i]); |
| 609 | } |
| 610 | for (int i = 0; i < values_expected.size(); ++i) { |
| 611 | EXPECT_EQ(m_crs_transpose->values()[i], values_expected[i]); |
| 612 | } |
| 613 | } |
| 614 | { |
| 615 | std::unique_ptr<BlockSparseMatrix> m = CreateTestMatrixFromId(2); |
| 616 | auto m_crs_transpose = m->ToCompressedRowSparseMatrixTranspose(); |
| 617 | std::vector<int> rows_expected = {0, 2, 4, 5, 7, 9, 10}; |
| 618 | std::vector<int> cols_expected = {0, 1, 0, 1, 2, 0, 1, 0, 1, 2}; |
| 619 | std::vector<double> values_expected = {1, 3, 2, 4, 5, 6, 8, 7, 9, 10}; |
| 620 | EXPECT_EQ(m_crs_transpose->num_nonzeros(), cols_expected.size()); |
| 621 | EXPECT_EQ(m_crs_transpose->num_rows(), rows_expected.size() - 1); |
| 622 | for (int i = 0; i < rows_expected.size(); ++i) { |
| 623 | EXPECT_EQ(m_crs_transpose->rows()[i], rows_expected[i]); |
| 624 | } |
| 625 | for (int i = 0; i < cols_expected.size(); ++i) { |
| 626 | EXPECT_EQ(m_crs_transpose->cols()[i], cols_expected[i]); |
| 627 | } |
| 628 | for (int i = 0; i < values_expected.size(); ++i) { |
| 629 | EXPECT_EQ(m_crs_transpose->values()[i], values_expected[i]); |
| 630 | } |
| 631 | } |
| 632 | } |
| 633 | |
| 634 | TEST(BlockSparseMatrix, CreateTranspose) { |
| 635 | constexpr int kNumtrials = 10; |
| 636 | BlockSparseMatrix::RandomMatrixOptions options; |
| 637 | options.num_col_blocks = 10; |
| 638 | options.min_col_block_size = 1; |
| 639 | options.max_col_block_size = 3; |
| 640 | |
| 641 | options.num_row_blocks = 20; |
| 642 | options.min_row_block_size = 1; |
| 643 | options.max_row_block_size = 4; |
| 644 | options.block_density = 0.25; |
| 645 | std::mt19937 prng; |
| 646 | |
| 647 | for (int trial = 0; trial < kNumtrials; ++trial) { |
| 648 | auto a = BlockSparseMatrix::CreateRandomMatrix(options, prng); |
| 649 | |
| 650 | auto ap_bs = std::make_unique<CompressedRowBlockStructure>(); |
| 651 | *ap_bs = *a->block_structure(); |
| 652 | BlockSparseMatrix ap(ap_bs.release()); |
| 653 | std::copy_n(a->values(), a->num_nonzeros(), ap.mutable_values()); |
| 654 | |
| 655 | Vector x = Vector::Random(a->num_cols()); |
| 656 | Vector y = Vector::Random(a->num_rows()); |
| 657 | Vector a_x = Vector::Zero(a->num_rows()); |
| 658 | Vector a_t_y = Vector::Zero(a->num_cols()); |
| 659 | Vector ap_x = Vector::Zero(a->num_rows()); |
| 660 | Vector ap_t_y = Vector::Zero(a->num_cols()); |
| 661 | a->RightMultiplyAndAccumulate(x.data(), a_x.data()); |
| 662 | ap.RightMultiplyAndAccumulate(x.data(), ap_x.data()); |
| 663 | EXPECT_NEAR((a_x - ap_x).norm() / a_x.norm(), |
| 664 | 0.0, |
| 665 | std::numeric_limits<double>::epsilon()); |
| 666 | a->LeftMultiplyAndAccumulate(y.data(), a_t_y.data()); |
| 667 | ap.LeftMultiplyAndAccumulate(y.data(), ap_t_y.data()); |
| 668 | EXPECT_NEAR((a_t_y - ap_t_y).norm() / a_t_y.norm(), |
| 669 | 0.0, |
| 670 | std::numeric_limits<double>::epsilon()); |
| 671 | } |
| 672 | } |
| 673 | |
Austin Schuh | 70cc955 | 2019-01-21 19:46:48 -0800 | [diff] [blame] | 674 | } // namespace internal |
| 675 | } // namespace ceres |