blob: 21697f828a8b67d9b93231f6e440f2fa27a29fda [file] [log] [blame]
Austin Schuh70cc9552019-01-21 19:46:48 -08001// Ceres Solver - A fast non-linear least squares minimizer
Austin Schuh3de38b02024-06-25 18:25:10 -07002// Copyright 2023 Google Inc. All rights reserved.
Austin Schuh70cc9552019-01-21 19:46:48 -08003// http://ceres-solver.org/
4//
5// Redistribution and use in source and binary forms, with or without
6// modification, are permitted provided that the following conditions are met:
7//
8// * Redistributions of source code must retain the above copyright notice,
9// this list of conditions and the following disclaimer.
10// * Redistributions in binary form must reproduce the above copyright notice,
11// this list of conditions and the following disclaimer in the documentation
12// and/or other materials provided with the distribution.
13// * Neither the name of Google Inc. nor the names of its contributors may be
14// used to endorse or promote products derived from this software without
15// specific prior written permission.
16//
17// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27// POSSIBILITY OF SUCH DAMAGE.
28//
29// Author: sameeragarwal@google.com (Sameer Agarwal)
30
31#include "ceres/compressed_row_sparse_matrix.h"
32
33#include <algorithm>
Austin Schuh3de38b02024-06-25 18:25:10 -070034#include <functional>
35#include <memory>
Austin Schuh70cc9552019-01-21 19:46:48 -080036#include <numeric>
Austin Schuh3de38b02024-06-25 18:25:10 -070037#include <random>
Austin Schuh70cc9552019-01-21 19:46:48 -080038#include <vector>
Austin Schuh1d1e6ea2020-12-23 21:56:30 -080039
Austin Schuh3de38b02024-06-25 18:25:10 -070040#include "ceres/context_impl.h"
Austin Schuh70cc9552019-01-21 19:46:48 -080041#include "ceres/crs_matrix.h"
Austin Schuh3de38b02024-06-25 18:25:10 -070042#include "ceres/internal/export.h"
43#include "ceres/parallel_for.h"
Austin Schuh70cc9552019-01-21 19:46:48 -080044#include "ceres/triplet_sparse_matrix.h"
45#include "glog/logging.h"
46
Austin Schuh3de38b02024-06-25 18:25:10 -070047namespace ceres::internal {
Austin Schuh70cc9552019-01-21 19:46:48 -080048namespace {
49
50// Helper functor used by the constructor for reordering the contents
Austin Schuh3de38b02024-06-25 18:25:10 -070051// of a TripletSparseMatrix. This comparator assumes that there are no
Austin Schuh70cc9552019-01-21 19:46:48 -080052// duplicates in the pair of arrays rows and cols, i.e., there is no
53// indices i and j (not equal to each other) s.t.
54//
55// rows[i] == rows[j] && cols[i] == cols[j]
56//
57// If this is the case, this functor will not be a StrictWeakOrdering.
58struct RowColLessThan {
59 RowColLessThan(const int* rows, const int* cols) : rows(rows), cols(cols) {}
60
61 bool operator()(const int x, const int y) const {
62 if (rows[x] == rows[y]) {
63 return (cols[x] < cols[y]);
64 }
65 return (rows[x] < rows[y]);
66 }
67
68 const int* rows;
69 const int* cols;
70};
71
72void TransposeForCompressedRowSparseStructure(const int num_rows,
73 const int num_cols,
74 const int num_nonzeros,
75 const int* rows,
76 const int* cols,
77 const double* values,
78 int* transpose_rows,
79 int* transpose_cols,
80 double* transpose_values) {
81 // Explicitly zero out transpose_rows.
82 std::fill(transpose_rows, transpose_rows + num_cols + 1, 0);
83
84 // Count the number of entries in each column of the original matrix
85 // and assign to transpose_rows[col + 1].
86 for (int idx = 0; idx < num_nonzeros; ++idx) {
87 ++transpose_rows[cols[idx] + 1];
88 }
89
90 // Compute the starting position for each row in the transpose by
91 // computing the cumulative sum of the entries of transpose_rows.
92 for (int i = 1; i < num_cols + 1; ++i) {
93 transpose_rows[i] += transpose_rows[i - 1];
94 }
95
96 // Populate transpose_cols and (optionally) transpose_values by
97 // walking the entries of the source matrices. For each entry that
98 // is added, the value of transpose_row is incremented allowing us
99 // to keep track of where the next entry for that row should go.
100 //
101 // As a result transpose_row is shifted to the left by one entry.
102 for (int r = 0; r < num_rows; ++r) {
103 for (int idx = rows[r]; idx < rows[r + 1]; ++idx) {
104 const int c = cols[idx];
105 const int transpose_idx = transpose_rows[c]++;
106 transpose_cols[transpose_idx] = r;
Austin Schuh3de38b02024-06-25 18:25:10 -0700107 if (values != nullptr && transpose_values != nullptr) {
Austin Schuh70cc9552019-01-21 19:46:48 -0800108 transpose_values[transpose_idx] = values[idx];
109 }
110 }
111 }
112
113 // This loop undoes the left shift to transpose_rows introduced by
114 // the previous loop.
115 for (int i = num_cols - 1; i > 0; --i) {
116 transpose_rows[i] = transpose_rows[i - 1];
117 }
118 transpose_rows[0] = 0;
119}
120
Austin Schuh3de38b02024-06-25 18:25:10 -0700121template <class RandomNormalFunctor>
Austin Schuh70cc9552019-01-21 19:46:48 -0800122void AddRandomBlock(const int num_rows,
123 const int num_cols,
124 const int row_block_begin,
125 const int col_block_begin,
Austin Schuh3de38b02024-06-25 18:25:10 -0700126 RandomNormalFunctor&& randn,
Austin Schuh70cc9552019-01-21 19:46:48 -0800127 std::vector<int>* rows,
128 std::vector<int>* cols,
129 std::vector<double>* values) {
130 for (int r = 0; r < num_rows; ++r) {
131 for (int c = 0; c < num_cols; ++c) {
132 rows->push_back(row_block_begin + r);
133 cols->push_back(col_block_begin + c);
Austin Schuh3de38b02024-06-25 18:25:10 -0700134 values->push_back(randn());
Austin Schuh70cc9552019-01-21 19:46:48 -0800135 }
136 }
137}
138
Austin Schuh3de38b02024-06-25 18:25:10 -0700139template <class RandomNormalFunctor>
Austin Schuh70cc9552019-01-21 19:46:48 -0800140void AddSymmetricRandomBlock(const int num_rows,
141 const int row_block_begin,
Austin Schuh3de38b02024-06-25 18:25:10 -0700142 RandomNormalFunctor&& randn,
Austin Schuh70cc9552019-01-21 19:46:48 -0800143 std::vector<int>* rows,
144 std::vector<int>* cols,
145 std::vector<double>* values) {
146 for (int r = 0; r < num_rows; ++r) {
147 for (int c = r; c < num_rows; ++c) {
Austin Schuh3de38b02024-06-25 18:25:10 -0700148 const double v = randn();
Austin Schuh70cc9552019-01-21 19:46:48 -0800149 rows->push_back(row_block_begin + r);
150 cols->push_back(row_block_begin + c);
151 values->push_back(v);
152 if (r != c) {
153 rows->push_back(row_block_begin + c);
154 cols->push_back(row_block_begin + r);
155 values->push_back(v);
156 }
157 }
158 }
159}
160
161} // namespace
162
163// This constructor gives you a semi-initialized CompressedRowSparseMatrix.
164CompressedRowSparseMatrix::CompressedRowSparseMatrix(int num_rows,
165 int num_cols,
166 int max_num_nonzeros) {
167 num_rows_ = num_rows;
168 num_cols_ = num_cols;
Austin Schuh3de38b02024-06-25 18:25:10 -0700169 storage_type_ = StorageType::UNSYMMETRIC;
Austin Schuh70cc9552019-01-21 19:46:48 -0800170 rows_.resize(num_rows + 1, 0);
171 cols_.resize(max_num_nonzeros, 0);
172 values_.resize(max_num_nonzeros, 0.0);
173
174 VLOG(1) << "# of rows: " << num_rows_ << " # of columns: " << num_cols_
175 << " max_num_nonzeros: " << cols_.size() << ". Allocating "
176 << (num_rows_ + 1) * sizeof(int) + // NOLINT
177 cols_.size() * sizeof(int) + // NOLINT
178 cols_.size() * sizeof(double); // NOLINT
179}
180
Austin Schuh3de38b02024-06-25 18:25:10 -0700181std::unique_ptr<CompressedRowSparseMatrix>
182CompressedRowSparseMatrix::FromTripletSparseMatrix(
Austin Schuh70cc9552019-01-21 19:46:48 -0800183 const TripletSparseMatrix& input) {
184 return CompressedRowSparseMatrix::FromTripletSparseMatrix(input, false);
185}
186
Austin Schuh3de38b02024-06-25 18:25:10 -0700187std::unique_ptr<CompressedRowSparseMatrix>
Austin Schuh70cc9552019-01-21 19:46:48 -0800188CompressedRowSparseMatrix::FromTripletSparseMatrixTransposed(
189 const TripletSparseMatrix& input) {
190 return CompressedRowSparseMatrix::FromTripletSparseMatrix(input, true);
191}
192
Austin Schuh3de38b02024-06-25 18:25:10 -0700193std::unique_ptr<CompressedRowSparseMatrix>
194CompressedRowSparseMatrix::FromTripletSparseMatrix(
Austin Schuh70cc9552019-01-21 19:46:48 -0800195 const TripletSparseMatrix& input, bool transpose) {
196 int num_rows = input.num_rows();
197 int num_cols = input.num_cols();
198 const int* rows = input.rows();
199 const int* cols = input.cols();
200 const double* values = input.values();
201
202 if (transpose) {
203 std::swap(num_rows, num_cols);
204 std::swap(rows, cols);
205 }
206
207 // index is the list of indices into the TripletSparseMatrix input.
Austin Schuh3de38b02024-06-25 18:25:10 -0700208 std::vector<int> index(input.num_nonzeros(), 0);
Austin Schuh70cc9552019-01-21 19:46:48 -0800209 for (int i = 0; i < input.num_nonzeros(); ++i) {
210 index[i] = i;
211 }
212
213 // Sort index such that the entries of m are ordered by row and ties
214 // are broken by column.
215 std::sort(index.begin(), index.end(), RowColLessThan(rows, cols));
216
217 VLOG(1) << "# of rows: " << num_rows << " # of columns: " << num_cols
218 << " num_nonzeros: " << input.num_nonzeros() << ". Allocating "
219 << ((num_rows + 1) * sizeof(int) + // NOLINT
220 input.num_nonzeros() * sizeof(int) + // NOLINT
221 input.num_nonzeros() * sizeof(double)); // NOLINT
222
Austin Schuh3de38b02024-06-25 18:25:10 -0700223 auto output = std::make_unique<CompressedRowSparseMatrix>(
224 num_rows, num_cols, input.num_nonzeros());
Austin Schuh70cc9552019-01-21 19:46:48 -0800225
226 if (num_rows == 0) {
227 // No data to copy.
228 return output;
229 }
230
231 // Copy the contents of the cols and values array in the order given
232 // by index and count the number of entries in each row.
233 int* output_rows = output->mutable_rows();
234 int* output_cols = output->mutable_cols();
235 double* output_values = output->mutable_values();
236
237 output_rows[0] = 0;
238 for (int i = 0; i < index.size(); ++i) {
239 const int idx = index[i];
240 ++output_rows[rows[idx] + 1];
241 output_cols[i] = cols[idx];
242 output_values[i] = values[idx];
243 }
244
245 // Find the cumulative sum of the row counts.
246 for (int i = 1; i < num_rows + 1; ++i) {
247 output_rows[i] += output_rows[i - 1];
248 }
249
250 CHECK_EQ(output->num_nonzeros(), input.num_nonzeros());
251 return output;
252}
253
254CompressedRowSparseMatrix::CompressedRowSparseMatrix(const double* diagonal,
255 int num_rows) {
256 CHECK(diagonal != nullptr);
257
258 num_rows_ = num_rows;
259 num_cols_ = num_rows;
Austin Schuh3de38b02024-06-25 18:25:10 -0700260 storage_type_ = StorageType::UNSYMMETRIC;
Austin Schuh70cc9552019-01-21 19:46:48 -0800261 rows_.resize(num_rows + 1);
262 cols_.resize(num_rows);
263 values_.resize(num_rows);
264
265 rows_[0] = 0;
266 for (int i = 0; i < num_rows_; ++i) {
267 cols_[i] = i;
268 values_[i] = diagonal[i];
269 rows_[i + 1] = i + 1;
270 }
271
272 CHECK_EQ(num_nonzeros(), num_rows);
273}
274
Austin Schuh3de38b02024-06-25 18:25:10 -0700275CompressedRowSparseMatrix::~CompressedRowSparseMatrix() = default;
Austin Schuh70cc9552019-01-21 19:46:48 -0800276
277void CompressedRowSparseMatrix::SetZero() {
278 std::fill(values_.begin(), values_.end(), 0);
279}
280
Austin Schuh3de38b02024-06-25 18:25:10 -0700281// TODO(sameeragarwal): Make RightMultiplyAndAccumulate and
282// LeftMultiplyAndAccumulate block-aware for higher performance.
283void CompressedRowSparseMatrix::RightMultiplyAndAccumulate(
284 const double* x, double* y, ContextImpl* context, int num_threads) const {
285 if (storage_type_ != StorageType::UNSYMMETRIC) {
286 RightMultiplyAndAccumulate(x, y);
287 return;
288 }
289
290 auto values = values_.data();
291 auto rows = rows_.data();
292 auto cols = cols_.data();
293
294 ParallelFor(
295 context, 0, num_rows_, num_threads, [values, rows, cols, x, y](int row) {
296 for (int idx = rows[row]; idx < rows[row + 1]; ++idx) {
297 const int c = cols[idx];
298 const double v = values[idx];
299 y[row] += v * x[c];
300 }
301 });
302}
303
304void CompressedRowSparseMatrix::RightMultiplyAndAccumulate(const double* x,
305 double* y) const {
Austin Schuh70cc9552019-01-21 19:46:48 -0800306 CHECK(x != nullptr);
307 CHECK(y != nullptr);
308
Austin Schuh3de38b02024-06-25 18:25:10 -0700309 if (storage_type_ == StorageType::UNSYMMETRIC) {
310 RightMultiplyAndAccumulate(x, y, nullptr, 1);
311 } else if (storage_type_ == StorageType::UPPER_TRIANGULAR) {
Austin Schuh70cc9552019-01-21 19:46:48 -0800312 // Because of their block structure, we will have entries that lie
313 // above (below) the diagonal for lower (upper) triangular matrices,
314 // so the loops below need to account for this.
315 for (int r = 0; r < num_rows_; ++r) {
316 int idx = rows_[r];
317 const int idx_end = rows_[r + 1];
318
319 // For upper triangular matrices r <= c, so skip entries with r
320 // > c.
321 while (idx < idx_end && r > cols_[idx]) {
322 ++idx;
323 }
324
325 for (; idx < idx_end; ++idx) {
326 const int c = cols_[idx];
327 const double v = values_[idx];
328 y[r] += v * x[c];
329 // Since we are only iterating over the upper triangular part
330 // of the matrix, add contributions for the strictly lower
331 // triangular part.
332 if (r != c) {
333 y[c] += v * x[r];
334 }
335 }
336 }
Austin Schuh3de38b02024-06-25 18:25:10 -0700337 } else if (storage_type_ == StorageType::LOWER_TRIANGULAR) {
Austin Schuh70cc9552019-01-21 19:46:48 -0800338 for (int r = 0; r < num_rows_; ++r) {
339 int idx = rows_[r];
340 const int idx_end = rows_[r + 1];
341 // For lower triangular matrices, we only iterate till we are r >=
342 // c.
343 for (; idx < idx_end && r >= cols_[idx]; ++idx) {
344 const int c = cols_[idx];
345 const double v = values_[idx];
346 y[r] += v * x[c];
347 // Since we are only iterating over the lower triangular part
348 // of the matrix, add contributions for the strictly upper
349 // triangular part.
350 if (r != c) {
351 y[c] += v * x[r];
352 }
353 }
354 }
355 } else {
356 LOG(FATAL) << "Unknown storage type: " << storage_type_;
357 }
358}
359
Austin Schuh3de38b02024-06-25 18:25:10 -0700360void CompressedRowSparseMatrix::LeftMultiplyAndAccumulate(const double* x,
361 double* y) const {
Austin Schuh70cc9552019-01-21 19:46:48 -0800362 CHECK(x != nullptr);
363 CHECK(y != nullptr);
364
Austin Schuh3de38b02024-06-25 18:25:10 -0700365 if (storage_type_ == StorageType::UNSYMMETRIC) {
Austin Schuh70cc9552019-01-21 19:46:48 -0800366 for (int r = 0; r < num_rows_; ++r) {
367 for (int idx = rows_[r]; idx < rows_[r + 1]; ++idx) {
368 y[cols_[idx]] += values_[idx] * x[r];
369 }
370 }
371 } else {
Austin Schuh3de38b02024-06-25 18:25:10 -0700372 // Since the matrix is symmetric, LeftMultiplyAndAccumulate =
373 // RightMultiplyAndAccumulate.
374 RightMultiplyAndAccumulate(x, y);
Austin Schuh70cc9552019-01-21 19:46:48 -0800375 }
376}
377
378void CompressedRowSparseMatrix::SquaredColumnNorm(double* x) const {
379 CHECK(x != nullptr);
380
381 std::fill(x, x + num_cols_, 0.0);
Austin Schuh3de38b02024-06-25 18:25:10 -0700382 if (storage_type_ == StorageType::UNSYMMETRIC) {
Austin Schuh70cc9552019-01-21 19:46:48 -0800383 for (int idx = 0; idx < rows_[num_rows_]; ++idx) {
384 x[cols_[idx]] += values_[idx] * values_[idx];
385 }
Austin Schuh3de38b02024-06-25 18:25:10 -0700386 } else if (storage_type_ == StorageType::UPPER_TRIANGULAR) {
Austin Schuh70cc9552019-01-21 19:46:48 -0800387 // Because of their block structure, we will have entries that lie
388 // above (below) the diagonal for lower (upper) triangular
389 // matrices, so the loops below need to account for this.
390 for (int r = 0; r < num_rows_; ++r) {
391 int idx = rows_[r];
392 const int idx_end = rows_[r + 1];
393
394 // For upper triangular matrices r <= c, so skip entries with r
395 // > c.
396 while (idx < idx_end && r > cols_[idx]) {
397 ++idx;
398 }
399
400 for (; idx < idx_end; ++idx) {
401 const int c = cols_[idx];
402 const double v2 = values_[idx] * values_[idx];
403 x[c] += v2;
404 // Since we are only iterating over the upper triangular part
405 // of the matrix, add contributions for the strictly lower
406 // triangular part.
407 if (r != c) {
408 x[r] += v2;
409 }
410 }
411 }
Austin Schuh3de38b02024-06-25 18:25:10 -0700412 } else if (storage_type_ == StorageType::LOWER_TRIANGULAR) {
Austin Schuh70cc9552019-01-21 19:46:48 -0800413 for (int r = 0; r < num_rows_; ++r) {
414 int idx = rows_[r];
415 const int idx_end = rows_[r + 1];
416 // For lower triangular matrices, we only iterate till we are r >=
417 // c.
418 for (; idx < idx_end && r >= cols_[idx]; ++idx) {
419 const int c = cols_[idx];
420 const double v2 = values_[idx] * values_[idx];
421 x[c] += v2;
422 // Since we are only iterating over the lower triangular part
423 // of the matrix, add contributions for the strictly upper
424 // triangular part.
425 if (r != c) {
426 x[r] += v2;
427 }
428 }
429 }
430 } else {
431 LOG(FATAL) << "Unknown storage type: " << storage_type_;
432 }
433}
434void CompressedRowSparseMatrix::ScaleColumns(const double* scale) {
435 CHECK(scale != nullptr);
436
437 for (int idx = 0; idx < rows_[num_rows_]; ++idx) {
438 values_[idx] *= scale[cols_[idx]];
439 }
440}
441
442void CompressedRowSparseMatrix::ToDenseMatrix(Matrix* dense_matrix) const {
443 CHECK(dense_matrix != nullptr);
444 dense_matrix->resize(num_rows_, num_cols_);
445 dense_matrix->setZero();
446
447 for (int r = 0; r < num_rows_; ++r) {
448 for (int idx = rows_[r]; idx < rows_[r + 1]; ++idx) {
449 (*dense_matrix)(r, cols_[idx]) = values_[idx];
450 }
451 }
452}
453
454void CompressedRowSparseMatrix::DeleteRows(int delta_rows) {
455 CHECK_GE(delta_rows, 0);
456 CHECK_LE(delta_rows, num_rows_);
Austin Schuh3de38b02024-06-25 18:25:10 -0700457 CHECK_EQ(storage_type_, StorageType::UNSYMMETRIC);
Austin Schuh70cc9552019-01-21 19:46:48 -0800458
459 num_rows_ -= delta_rows;
460 rows_.resize(num_rows_ + 1);
461
462 // The rest of the code updates the block information. Immediately
463 // return in case of no block information.
464 if (row_blocks_.empty()) {
465 return;
466 }
467
468 // Walk the list of row blocks until we reach the new number of rows
469 // and the drop the rest of the row blocks.
470 int num_row_blocks = 0;
471 int num_rows = 0;
472 while (num_row_blocks < row_blocks_.size() && num_rows < num_rows_) {
Austin Schuh3de38b02024-06-25 18:25:10 -0700473 num_rows += row_blocks_[num_row_blocks].size;
Austin Schuh70cc9552019-01-21 19:46:48 -0800474 ++num_row_blocks;
475 }
476
477 row_blocks_.resize(num_row_blocks);
478}
479
480void CompressedRowSparseMatrix::AppendRows(const CompressedRowSparseMatrix& m) {
Austin Schuh3de38b02024-06-25 18:25:10 -0700481 CHECK_EQ(storage_type_, StorageType::UNSYMMETRIC);
Austin Schuh70cc9552019-01-21 19:46:48 -0800482 CHECK_EQ(m.num_cols(), num_cols_);
483
484 CHECK((row_blocks_.empty() && m.row_blocks().empty()) ||
485 (!row_blocks_.empty() && !m.row_blocks().empty()))
486 << "Cannot append a matrix with row blocks to one without and vice versa."
487 << "This matrix has : " << row_blocks_.size() << " row blocks."
488 << "The matrix being appended has: " << m.row_blocks().size()
489 << " row blocks.";
490
491 if (m.num_rows() == 0) {
492 return;
493 }
494
495 if (cols_.size() < num_nonzeros() + m.num_nonzeros()) {
496 cols_.resize(num_nonzeros() + m.num_nonzeros());
497 values_.resize(num_nonzeros() + m.num_nonzeros());
498 }
499
500 // Copy the contents of m into this matrix.
501 DCHECK_LT(num_nonzeros(), cols_.size());
502 if (m.num_nonzeros() > 0) {
503 std::copy(m.cols(), m.cols() + m.num_nonzeros(), &cols_[num_nonzeros()]);
504 std::copy(
505 m.values(), m.values() + m.num_nonzeros(), &values_[num_nonzeros()]);
506 }
507
508 rows_.resize(num_rows_ + m.num_rows() + 1);
509 // new_rows = [rows_, m.row() + rows_[num_rows_]]
510 std::fill(rows_.begin() + num_rows_,
511 rows_.begin() + num_rows_ + m.num_rows() + 1,
512 rows_[num_rows_]);
513
514 for (int r = 0; r < m.num_rows() + 1; ++r) {
515 rows_[num_rows_ + r] += m.rows()[r];
516 }
517
518 num_rows_ += m.num_rows();
519
520 // The rest of the code updates the block information. Immediately
521 // return in case of no block information.
522 if (row_blocks_.empty()) {
523 return;
524 }
525
526 row_blocks_.insert(
527 row_blocks_.end(), m.row_blocks().begin(), m.row_blocks().end());
528}
529
530void CompressedRowSparseMatrix::ToTextFile(FILE* file) const {
531 CHECK(file != nullptr);
532 for (int r = 0; r < num_rows_; ++r) {
533 for (int idx = rows_[r]; idx < rows_[r + 1]; ++idx) {
534 fprintf(file, "% 10d % 10d %17f\n", r, cols_[idx], values_[idx]);
535 }
536 }
537}
538
539void CompressedRowSparseMatrix::ToCRSMatrix(CRSMatrix* matrix) const {
540 matrix->num_rows = num_rows_;
541 matrix->num_cols = num_cols_;
542 matrix->rows = rows_;
543 matrix->cols = cols_;
544 matrix->values = values_;
545
546 // Trim.
547 matrix->rows.resize(matrix->num_rows + 1);
548 matrix->cols.resize(matrix->rows[matrix->num_rows]);
549 matrix->values.resize(matrix->rows[matrix->num_rows]);
550}
551
552void CompressedRowSparseMatrix::SetMaxNumNonZeros(int num_nonzeros) {
553 CHECK_GE(num_nonzeros, 0);
554
555 cols_.resize(num_nonzeros);
556 values_.resize(num_nonzeros);
557}
558
Austin Schuh3de38b02024-06-25 18:25:10 -0700559std::unique_ptr<CompressedRowSparseMatrix>
560CompressedRowSparseMatrix::CreateBlockDiagonalMatrix(
561 const double* diagonal, const std::vector<Block>& blocks) {
562 const int num_rows = NumScalarEntries(blocks);
Austin Schuh70cc9552019-01-21 19:46:48 -0800563 int num_nonzeros = 0;
Austin Schuh3de38b02024-06-25 18:25:10 -0700564 for (auto& block : blocks) {
565 num_nonzeros += block.size * block.size;
Austin Schuh70cc9552019-01-21 19:46:48 -0800566 }
567
Austin Schuh3de38b02024-06-25 18:25:10 -0700568 auto matrix = std::make_unique<CompressedRowSparseMatrix>(
569 num_rows, num_rows, num_nonzeros);
Austin Schuh70cc9552019-01-21 19:46:48 -0800570
571 int* rows = matrix->mutable_rows();
572 int* cols = matrix->mutable_cols();
573 double* values = matrix->mutable_values();
574 std::fill(values, values + num_nonzeros, 0.0);
575
576 int idx_cursor = 0;
577 int col_cursor = 0;
Austin Schuh3de38b02024-06-25 18:25:10 -0700578 for (auto& block : blocks) {
579 for (int r = 0; r < block.size; ++r) {
Austin Schuh70cc9552019-01-21 19:46:48 -0800580 *(rows++) = idx_cursor;
Austin Schuh3de38b02024-06-25 18:25:10 -0700581 if (diagonal != nullptr) {
582 values[idx_cursor + r] = diagonal[col_cursor + r];
583 }
584 for (int c = 0; c < block.size; ++c, ++idx_cursor) {
Austin Schuh70cc9552019-01-21 19:46:48 -0800585 *(cols++) = col_cursor + c;
586 }
587 }
Austin Schuh3de38b02024-06-25 18:25:10 -0700588 col_cursor += block.size;
Austin Schuh70cc9552019-01-21 19:46:48 -0800589 }
590 *rows = idx_cursor;
591
592 *matrix->mutable_row_blocks() = blocks;
593 *matrix->mutable_col_blocks() = blocks;
594
595 CHECK_EQ(idx_cursor, num_nonzeros);
596 CHECK_EQ(col_cursor, num_rows);
597 return matrix;
598}
599
Austin Schuh3de38b02024-06-25 18:25:10 -0700600std::unique_ptr<CompressedRowSparseMatrix>
601CompressedRowSparseMatrix::Transpose() const {
602 auto transpose = std::make_unique<CompressedRowSparseMatrix>(
603 num_cols_, num_rows_, num_nonzeros());
Austin Schuh70cc9552019-01-21 19:46:48 -0800604
605 switch (storage_type_) {
Austin Schuh3de38b02024-06-25 18:25:10 -0700606 case StorageType::UNSYMMETRIC:
607 transpose->set_storage_type(StorageType::UNSYMMETRIC);
Austin Schuh70cc9552019-01-21 19:46:48 -0800608 break;
Austin Schuh3de38b02024-06-25 18:25:10 -0700609 case StorageType::LOWER_TRIANGULAR:
610 transpose->set_storage_type(StorageType::UPPER_TRIANGULAR);
Austin Schuh70cc9552019-01-21 19:46:48 -0800611 break;
Austin Schuh3de38b02024-06-25 18:25:10 -0700612 case StorageType::UPPER_TRIANGULAR:
613 transpose->set_storage_type(StorageType::LOWER_TRIANGULAR);
Austin Schuh70cc9552019-01-21 19:46:48 -0800614 break;
615 default:
616 LOG(FATAL) << "Unknown storage type: " << storage_type_;
617 };
618
619 TransposeForCompressedRowSparseStructure(num_rows(),
620 num_cols(),
621 num_nonzeros(),
622 rows(),
623 cols(),
624 values(),
625 transpose->mutable_rows(),
626 transpose->mutable_cols(),
627 transpose->mutable_values());
628
629 // The rest of the code updates the block information. Immediately
630 // return in case of no block information.
631 if (row_blocks_.empty()) {
632 return transpose;
633 }
634
635 *(transpose->mutable_row_blocks()) = col_blocks_;
636 *(transpose->mutable_col_blocks()) = row_blocks_;
637 return transpose;
638}
639
Austin Schuh3de38b02024-06-25 18:25:10 -0700640std::unique_ptr<CompressedRowSparseMatrix>
641CompressedRowSparseMatrix::CreateRandomMatrix(
642 CompressedRowSparseMatrix::RandomMatrixOptions options,
643 std::mt19937& prng) {
Austin Schuh70cc9552019-01-21 19:46:48 -0800644 CHECK_GT(options.num_row_blocks, 0);
645 CHECK_GT(options.min_row_block_size, 0);
646 CHECK_GT(options.max_row_block_size, 0);
647 CHECK_LE(options.min_row_block_size, options.max_row_block_size);
648
Austin Schuh3de38b02024-06-25 18:25:10 -0700649 if (options.storage_type == StorageType::UNSYMMETRIC) {
Austin Schuh70cc9552019-01-21 19:46:48 -0800650 CHECK_GT(options.num_col_blocks, 0);
651 CHECK_GT(options.min_col_block_size, 0);
652 CHECK_GT(options.max_col_block_size, 0);
653 CHECK_LE(options.min_col_block_size, options.max_col_block_size);
654 } else {
655 // Symmetric matrices (LOWER_TRIANGULAR or UPPER_TRIANGULAR);
656 options.num_col_blocks = options.num_row_blocks;
657 options.min_col_block_size = options.min_row_block_size;
658 options.max_col_block_size = options.max_row_block_size;
659 }
660
661 CHECK_GT(options.block_density, 0.0);
662 CHECK_LE(options.block_density, 1.0);
663
Austin Schuh3de38b02024-06-25 18:25:10 -0700664 std::vector<Block> row_blocks;
665 row_blocks.reserve(options.num_row_blocks);
666 std::vector<Block> col_blocks;
667 col_blocks.reserve(options.num_col_blocks);
668
669 std::uniform_int_distribution<int> col_distribution(
670 options.min_col_block_size, options.max_col_block_size);
671 std::uniform_int_distribution<int> row_distribution(
672 options.min_row_block_size, options.max_row_block_size);
673 std::uniform_real_distribution<double> uniform01(0.0, 1.0);
674 std::normal_distribution<double> standard_normal_distribution;
Austin Schuh70cc9552019-01-21 19:46:48 -0800675
676 // Generate the row block structure.
Austin Schuh3de38b02024-06-25 18:25:10 -0700677 int row_pos = 0;
Austin Schuh70cc9552019-01-21 19:46:48 -0800678 for (int i = 0; i < options.num_row_blocks; ++i) {
679 // Generate a random integer in [min_row_block_size, max_row_block_size]
Austin Schuh3de38b02024-06-25 18:25:10 -0700680 row_blocks.emplace_back(row_distribution(prng), row_pos);
681 row_pos += row_blocks.back().size;
Austin Schuh70cc9552019-01-21 19:46:48 -0800682 }
683
Austin Schuh3de38b02024-06-25 18:25:10 -0700684 if (options.storage_type == StorageType::UNSYMMETRIC) {
Austin Schuh70cc9552019-01-21 19:46:48 -0800685 // Generate the col block structure.
Austin Schuh3de38b02024-06-25 18:25:10 -0700686 int col_pos = 0;
Austin Schuh70cc9552019-01-21 19:46:48 -0800687 for (int i = 0; i < options.num_col_blocks; ++i) {
688 // Generate a random integer in [min_col_block_size, max_col_block_size]
Austin Schuh3de38b02024-06-25 18:25:10 -0700689 col_blocks.emplace_back(col_distribution(prng), col_pos);
690 col_pos += col_blocks.back().size;
Austin Schuh70cc9552019-01-21 19:46:48 -0800691 }
692 } else {
693 // Symmetric matrices (LOWER_TRIANGULAR or UPPER_TRIANGULAR);
694 col_blocks = row_blocks;
695 }
696
Austin Schuh3de38b02024-06-25 18:25:10 -0700697 std::vector<int> tsm_rows;
698 std::vector<int> tsm_cols;
699 std::vector<double> tsm_values;
Austin Schuh70cc9552019-01-21 19:46:48 -0800700
701 // For ease of construction, we are going to generate the
702 // CompressedRowSparseMatrix by generating it as a
703 // TripletSparseMatrix and then converting it to a
704 // CompressedRowSparseMatrix.
705
706 // It is possible that the random matrix is empty which is likely
707 // not what the user wants, so do the matrix generation till we have
708 // at least one non-zero entry.
709 while (tsm_values.empty()) {
710 tsm_rows.clear();
711 tsm_cols.clear();
712 tsm_values.clear();
713
714 int row_block_begin = 0;
715 for (int r = 0; r < options.num_row_blocks; ++r) {
716 int col_block_begin = 0;
717 for (int c = 0; c < options.num_col_blocks; ++c) {
Austin Schuh3de38b02024-06-25 18:25:10 -0700718 if (((options.storage_type == StorageType::UPPER_TRIANGULAR) &&
719 (r > c)) ||
720 ((options.storage_type == StorageType::LOWER_TRIANGULAR) &&
721 (r < c))) {
722 col_block_begin += col_blocks[c].size;
Austin Schuh70cc9552019-01-21 19:46:48 -0800723 continue;
724 }
725
726 // Randomly determine if this block is present or not.
Austin Schuh3de38b02024-06-25 18:25:10 -0700727 if (uniform01(prng) <= options.block_density) {
728 auto randn = [&standard_normal_distribution, &prng] {
729 return standard_normal_distribution(prng);
730 };
Austin Schuh70cc9552019-01-21 19:46:48 -0800731 // If the matrix is symmetric, then we take care to generate
732 // symmetric diagonal blocks.
Austin Schuh3de38b02024-06-25 18:25:10 -0700733 if (options.storage_type == StorageType::UNSYMMETRIC || r != c) {
734 AddRandomBlock(row_blocks[r].size,
735 col_blocks[c].size,
Austin Schuh70cc9552019-01-21 19:46:48 -0800736 row_block_begin,
737 col_block_begin,
Austin Schuh3de38b02024-06-25 18:25:10 -0700738 randn,
Austin Schuh70cc9552019-01-21 19:46:48 -0800739 &tsm_rows,
740 &tsm_cols,
741 &tsm_values);
742 } else {
Austin Schuh3de38b02024-06-25 18:25:10 -0700743 AddSymmetricRandomBlock(row_blocks[r].size,
Austin Schuh70cc9552019-01-21 19:46:48 -0800744 row_block_begin,
Austin Schuh3de38b02024-06-25 18:25:10 -0700745 randn,
Austin Schuh70cc9552019-01-21 19:46:48 -0800746 &tsm_rows,
747 &tsm_cols,
748 &tsm_values);
749 }
750 }
Austin Schuh3de38b02024-06-25 18:25:10 -0700751 col_block_begin += col_blocks[c].size;
Austin Schuh70cc9552019-01-21 19:46:48 -0800752 }
Austin Schuh3de38b02024-06-25 18:25:10 -0700753 row_block_begin += row_blocks[r].size;
Austin Schuh70cc9552019-01-21 19:46:48 -0800754 }
755 }
756
Austin Schuh3de38b02024-06-25 18:25:10 -0700757 const int num_rows = NumScalarEntries(row_blocks);
758 const int num_cols = NumScalarEntries(col_blocks);
Austin Schuh70cc9552019-01-21 19:46:48 -0800759 const bool kDoNotTranspose = false;
Austin Schuh3de38b02024-06-25 18:25:10 -0700760 auto matrix = CompressedRowSparseMatrix::FromTripletSparseMatrix(
761 TripletSparseMatrix(num_rows, num_cols, tsm_rows, tsm_cols, tsm_values),
762 kDoNotTranspose);
Austin Schuh70cc9552019-01-21 19:46:48 -0800763 (*matrix->mutable_row_blocks()) = row_blocks;
764 (*matrix->mutable_col_blocks()) = col_blocks;
765 matrix->set_storage_type(options.storage_type);
766 return matrix;
767}
768
Austin Schuh3de38b02024-06-25 18:25:10 -0700769} // namespace ceres::internal