blob: 900586c2c45336ea2e5a56beba5a37952e1ea84b [file] [log] [blame]
Austin Schuh70cc9552019-01-21 19:46:48 -08001// Ceres Solver - A fast non-linear least squares minimizer
2// Copyright 2017 Google Inc. All rights reserved.
3// http://ceres-solver.org/
4//
5// Redistribution and use in source and binary forms, with or without
6// modification, are permitted provided that the following conditions are met:
7//
8// * Redistributions of source code must retain the above copyright notice,
9// this list of conditions and the following disclaimer.
10// * Redistributions in binary form must reproduce the above copyright notice,
11// this list of conditions and the following disclaimer in the documentation
12// and/or other materials provided with the distribution.
13// * Neither the name of Google Inc. nor the names of its contributors may be
14// used to endorse or promote products derived from this software without
15// specific prior written permission.
16//
17// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27// POSSIBILITY OF SUCH DAMAGE.
28//
29// Author: sameeragarwal@google.com (Sameer Agarwal)
30
31#include "ceres/compressed_row_sparse_matrix.h"
32
33#include <algorithm>
34#include <numeric>
35#include <vector>
Austin Schuh1d1e6ea2020-12-23 21:56:30 -080036
Austin Schuh70cc9552019-01-21 19:46:48 -080037#include "ceres/crs_matrix.h"
38#include "ceres/internal/port.h"
39#include "ceres/random.h"
40#include "ceres/triplet_sparse_matrix.h"
41#include "glog/logging.h"
42
43namespace ceres {
44namespace internal {
45
46using std::vector;
47
48namespace {
49
50// Helper functor used by the constructor for reordering the contents
51// of a TripletSparseMatrix. This comparator assumes thay there are no
52// duplicates in the pair of arrays rows and cols, i.e., there is no
53// indices i and j (not equal to each other) s.t.
54//
55// rows[i] == rows[j] && cols[i] == cols[j]
56//
57// If this is the case, this functor will not be a StrictWeakOrdering.
58struct RowColLessThan {
59 RowColLessThan(const int* rows, const int* cols) : rows(rows), cols(cols) {}
60
61 bool operator()(const int x, const int y) const {
62 if (rows[x] == rows[y]) {
63 return (cols[x] < cols[y]);
64 }
65 return (rows[x] < rows[y]);
66 }
67
68 const int* rows;
69 const int* cols;
70};
71
72void TransposeForCompressedRowSparseStructure(const int num_rows,
73 const int num_cols,
74 const int num_nonzeros,
75 const int* rows,
76 const int* cols,
77 const double* values,
78 int* transpose_rows,
79 int* transpose_cols,
80 double* transpose_values) {
81 // Explicitly zero out transpose_rows.
82 std::fill(transpose_rows, transpose_rows + num_cols + 1, 0);
83
84 // Count the number of entries in each column of the original matrix
85 // and assign to transpose_rows[col + 1].
86 for (int idx = 0; idx < num_nonzeros; ++idx) {
87 ++transpose_rows[cols[idx] + 1];
88 }
89
90 // Compute the starting position for each row in the transpose by
91 // computing the cumulative sum of the entries of transpose_rows.
92 for (int i = 1; i < num_cols + 1; ++i) {
93 transpose_rows[i] += transpose_rows[i - 1];
94 }
95
96 // Populate transpose_cols and (optionally) transpose_values by
97 // walking the entries of the source matrices. For each entry that
98 // is added, the value of transpose_row is incremented allowing us
99 // to keep track of where the next entry for that row should go.
100 //
101 // As a result transpose_row is shifted to the left by one entry.
102 for (int r = 0; r < num_rows; ++r) {
103 for (int idx = rows[r]; idx < rows[r + 1]; ++idx) {
104 const int c = cols[idx];
105 const int transpose_idx = transpose_rows[c]++;
106 transpose_cols[transpose_idx] = r;
107 if (values != NULL && transpose_values != NULL) {
108 transpose_values[transpose_idx] = values[idx];
109 }
110 }
111 }
112
113 // This loop undoes the left shift to transpose_rows introduced by
114 // the previous loop.
115 for (int i = num_cols - 1; i > 0; --i) {
116 transpose_rows[i] = transpose_rows[i - 1];
117 }
118 transpose_rows[0] = 0;
119}
120
121void AddRandomBlock(const int num_rows,
122 const int num_cols,
123 const int row_block_begin,
124 const int col_block_begin,
125 std::vector<int>* rows,
126 std::vector<int>* cols,
127 std::vector<double>* values) {
128 for (int r = 0; r < num_rows; ++r) {
129 for (int c = 0; c < num_cols; ++c) {
130 rows->push_back(row_block_begin + r);
131 cols->push_back(col_block_begin + c);
132 values->push_back(RandNormal());
133 }
134 }
135}
136
137void AddSymmetricRandomBlock(const int num_rows,
138 const int row_block_begin,
139 std::vector<int>* rows,
140 std::vector<int>* cols,
141 std::vector<double>* values) {
142 for (int r = 0; r < num_rows; ++r) {
143 for (int c = r; c < num_rows; ++c) {
144 const double v = RandNormal();
145 rows->push_back(row_block_begin + r);
146 cols->push_back(row_block_begin + c);
147 values->push_back(v);
148 if (r != c) {
149 rows->push_back(row_block_begin + c);
150 cols->push_back(row_block_begin + r);
151 values->push_back(v);
152 }
153 }
154 }
155}
156
157} // namespace
158
159// This constructor gives you a semi-initialized CompressedRowSparseMatrix.
160CompressedRowSparseMatrix::CompressedRowSparseMatrix(int num_rows,
161 int num_cols,
162 int max_num_nonzeros) {
163 num_rows_ = num_rows;
164 num_cols_ = num_cols;
165 storage_type_ = UNSYMMETRIC;
166 rows_.resize(num_rows + 1, 0);
167 cols_.resize(max_num_nonzeros, 0);
168 values_.resize(max_num_nonzeros, 0.0);
169
170 VLOG(1) << "# of rows: " << num_rows_ << " # of columns: " << num_cols_
171 << " max_num_nonzeros: " << cols_.size() << ". Allocating "
172 << (num_rows_ + 1) * sizeof(int) + // NOLINT
173 cols_.size() * sizeof(int) + // NOLINT
174 cols_.size() * sizeof(double); // NOLINT
175}
176
177CompressedRowSparseMatrix* CompressedRowSparseMatrix::FromTripletSparseMatrix(
178 const TripletSparseMatrix& input) {
179 return CompressedRowSparseMatrix::FromTripletSparseMatrix(input, false);
180}
181
182CompressedRowSparseMatrix*
183CompressedRowSparseMatrix::FromTripletSparseMatrixTransposed(
184 const TripletSparseMatrix& input) {
185 return CompressedRowSparseMatrix::FromTripletSparseMatrix(input, true);
186}
187
188CompressedRowSparseMatrix* CompressedRowSparseMatrix::FromTripletSparseMatrix(
189 const TripletSparseMatrix& input, bool transpose) {
190 int num_rows = input.num_rows();
191 int num_cols = input.num_cols();
192 const int* rows = input.rows();
193 const int* cols = input.cols();
194 const double* values = input.values();
195
196 if (transpose) {
197 std::swap(num_rows, num_cols);
198 std::swap(rows, cols);
199 }
200
201 // index is the list of indices into the TripletSparseMatrix input.
202 vector<int> index(input.num_nonzeros(), 0);
203 for (int i = 0; i < input.num_nonzeros(); ++i) {
204 index[i] = i;
205 }
206
207 // Sort index such that the entries of m are ordered by row and ties
208 // are broken by column.
209 std::sort(index.begin(), index.end(), RowColLessThan(rows, cols));
210
211 VLOG(1) << "# of rows: " << num_rows << " # of columns: " << num_cols
212 << " num_nonzeros: " << input.num_nonzeros() << ". Allocating "
213 << ((num_rows + 1) * sizeof(int) + // NOLINT
214 input.num_nonzeros() * sizeof(int) + // NOLINT
215 input.num_nonzeros() * sizeof(double)); // NOLINT
216
217 CompressedRowSparseMatrix* output =
218 new CompressedRowSparseMatrix(num_rows, num_cols, input.num_nonzeros());
219
220 if (num_rows == 0) {
221 // No data to copy.
222 return output;
223 }
224
225 // Copy the contents of the cols and values array in the order given
226 // by index and count the number of entries in each row.
227 int* output_rows = output->mutable_rows();
228 int* output_cols = output->mutable_cols();
229 double* output_values = output->mutable_values();
230
231 output_rows[0] = 0;
232 for (int i = 0; i < index.size(); ++i) {
233 const int idx = index[i];
234 ++output_rows[rows[idx] + 1];
235 output_cols[i] = cols[idx];
236 output_values[i] = values[idx];
237 }
238
239 // Find the cumulative sum of the row counts.
240 for (int i = 1; i < num_rows + 1; ++i) {
241 output_rows[i] += output_rows[i - 1];
242 }
243
244 CHECK_EQ(output->num_nonzeros(), input.num_nonzeros());
245 return output;
246}
247
248CompressedRowSparseMatrix::CompressedRowSparseMatrix(const double* diagonal,
249 int num_rows) {
250 CHECK(diagonal != nullptr);
251
252 num_rows_ = num_rows;
253 num_cols_ = num_rows;
254 storage_type_ = UNSYMMETRIC;
255 rows_.resize(num_rows + 1);
256 cols_.resize(num_rows);
257 values_.resize(num_rows);
258
259 rows_[0] = 0;
260 for (int i = 0; i < num_rows_; ++i) {
261 cols_[i] = i;
262 values_[i] = diagonal[i];
263 rows_[i + 1] = i + 1;
264 }
265
266 CHECK_EQ(num_nonzeros(), num_rows);
267}
268
269CompressedRowSparseMatrix::~CompressedRowSparseMatrix() {}
270
271void CompressedRowSparseMatrix::SetZero() {
272 std::fill(values_.begin(), values_.end(), 0);
273}
274
275// TODO(sameeragarwal): Make RightMultiply and LeftMultiply
276// block-aware for higher performance.
277void CompressedRowSparseMatrix::RightMultiply(const double* x,
278 double* y) const {
279 CHECK(x != nullptr);
280 CHECK(y != nullptr);
281
282 if (storage_type_ == UNSYMMETRIC) {
283 for (int r = 0; r < num_rows_; ++r) {
284 for (int idx = rows_[r]; idx < rows_[r + 1]; ++idx) {
285 const int c = cols_[idx];
286 const double v = values_[idx];
287 y[r] += v * x[c];
288 }
289 }
290 } else if (storage_type_ == UPPER_TRIANGULAR) {
291 // Because of their block structure, we will have entries that lie
292 // above (below) the diagonal for lower (upper) triangular matrices,
293 // so the loops below need to account for this.
294 for (int r = 0; r < num_rows_; ++r) {
295 int idx = rows_[r];
296 const int idx_end = rows_[r + 1];
297
298 // For upper triangular matrices r <= c, so skip entries with r
299 // > c.
300 while (idx < idx_end && r > cols_[idx]) {
301 ++idx;
302 }
303
304 for (; idx < idx_end; ++idx) {
305 const int c = cols_[idx];
306 const double v = values_[idx];
307 y[r] += v * x[c];
308 // Since we are only iterating over the upper triangular part
309 // of the matrix, add contributions for the strictly lower
310 // triangular part.
311 if (r != c) {
312 y[c] += v * x[r];
313 }
314 }
315 }
316 } else if (storage_type_ == LOWER_TRIANGULAR) {
317 for (int r = 0; r < num_rows_; ++r) {
318 int idx = rows_[r];
319 const int idx_end = rows_[r + 1];
320 // For lower triangular matrices, we only iterate till we are r >=
321 // c.
322 for (; idx < idx_end && r >= cols_[idx]; ++idx) {
323 const int c = cols_[idx];
324 const double v = values_[idx];
325 y[r] += v * x[c];
326 // Since we are only iterating over the lower triangular part
327 // of the matrix, add contributions for the strictly upper
328 // triangular part.
329 if (r != c) {
330 y[c] += v * x[r];
331 }
332 }
333 }
334 } else {
335 LOG(FATAL) << "Unknown storage type: " << storage_type_;
336 }
337}
338
339void CompressedRowSparseMatrix::LeftMultiply(const double* x, double* y) const {
340 CHECK(x != nullptr);
341 CHECK(y != nullptr);
342
343 if (storage_type_ == UNSYMMETRIC) {
344 for (int r = 0; r < num_rows_; ++r) {
345 for (int idx = rows_[r]; idx < rows_[r + 1]; ++idx) {
346 y[cols_[idx]] += values_[idx] * x[r];
347 }
348 }
349 } else {
350 // Since the matrix is symmetric, LeftMultiply = RightMultiply.
351 RightMultiply(x, y);
352 }
353}
354
355void CompressedRowSparseMatrix::SquaredColumnNorm(double* x) const {
356 CHECK(x != nullptr);
357
358 std::fill(x, x + num_cols_, 0.0);
359 if (storage_type_ == UNSYMMETRIC) {
360 for (int idx = 0; idx < rows_[num_rows_]; ++idx) {
361 x[cols_[idx]] += values_[idx] * values_[idx];
362 }
363 } else if (storage_type_ == UPPER_TRIANGULAR) {
364 // Because of their block structure, we will have entries that lie
365 // above (below) the diagonal for lower (upper) triangular
366 // matrices, so the loops below need to account for this.
367 for (int r = 0; r < num_rows_; ++r) {
368 int idx = rows_[r];
369 const int idx_end = rows_[r + 1];
370
371 // For upper triangular matrices r <= c, so skip entries with r
372 // > c.
373 while (idx < idx_end && r > cols_[idx]) {
374 ++idx;
375 }
376
377 for (; idx < idx_end; ++idx) {
378 const int c = cols_[idx];
379 const double v2 = values_[idx] * values_[idx];
380 x[c] += v2;
381 // Since we are only iterating over the upper triangular part
382 // of the matrix, add contributions for the strictly lower
383 // triangular part.
384 if (r != c) {
385 x[r] += v2;
386 }
387 }
388 }
389 } else if (storage_type_ == LOWER_TRIANGULAR) {
390 for (int r = 0; r < num_rows_; ++r) {
391 int idx = rows_[r];
392 const int idx_end = rows_[r + 1];
393 // For lower triangular matrices, we only iterate till we are r >=
394 // c.
395 for (; idx < idx_end && r >= cols_[idx]; ++idx) {
396 const int c = cols_[idx];
397 const double v2 = values_[idx] * values_[idx];
398 x[c] += v2;
399 // Since we are only iterating over the lower triangular part
400 // of the matrix, add contributions for the strictly upper
401 // triangular part.
402 if (r != c) {
403 x[r] += v2;
404 }
405 }
406 }
407 } else {
408 LOG(FATAL) << "Unknown storage type: " << storage_type_;
409 }
410}
411void CompressedRowSparseMatrix::ScaleColumns(const double* scale) {
412 CHECK(scale != nullptr);
413
414 for (int idx = 0; idx < rows_[num_rows_]; ++idx) {
415 values_[idx] *= scale[cols_[idx]];
416 }
417}
418
419void CompressedRowSparseMatrix::ToDenseMatrix(Matrix* dense_matrix) const {
420 CHECK(dense_matrix != nullptr);
421 dense_matrix->resize(num_rows_, num_cols_);
422 dense_matrix->setZero();
423
424 for (int r = 0; r < num_rows_; ++r) {
425 for (int idx = rows_[r]; idx < rows_[r + 1]; ++idx) {
426 (*dense_matrix)(r, cols_[idx]) = values_[idx];
427 }
428 }
429}
430
431void CompressedRowSparseMatrix::DeleteRows(int delta_rows) {
432 CHECK_GE(delta_rows, 0);
433 CHECK_LE(delta_rows, num_rows_);
434 CHECK_EQ(storage_type_, UNSYMMETRIC);
435
436 num_rows_ -= delta_rows;
437 rows_.resize(num_rows_ + 1);
438
439 // The rest of the code updates the block information. Immediately
440 // return in case of no block information.
441 if (row_blocks_.empty()) {
442 return;
443 }
444
445 // Walk the list of row blocks until we reach the new number of rows
446 // and the drop the rest of the row blocks.
447 int num_row_blocks = 0;
448 int num_rows = 0;
449 while (num_row_blocks < row_blocks_.size() && num_rows < num_rows_) {
450 num_rows += row_blocks_[num_row_blocks];
451 ++num_row_blocks;
452 }
453
454 row_blocks_.resize(num_row_blocks);
455}
456
457void CompressedRowSparseMatrix::AppendRows(const CompressedRowSparseMatrix& m) {
458 CHECK_EQ(storage_type_, UNSYMMETRIC);
459 CHECK_EQ(m.num_cols(), num_cols_);
460
461 CHECK((row_blocks_.empty() && m.row_blocks().empty()) ||
462 (!row_blocks_.empty() && !m.row_blocks().empty()))
463 << "Cannot append a matrix with row blocks to one without and vice versa."
464 << "This matrix has : " << row_blocks_.size() << " row blocks."
465 << "The matrix being appended has: " << m.row_blocks().size()
466 << " row blocks.";
467
468 if (m.num_rows() == 0) {
469 return;
470 }
471
472 if (cols_.size() < num_nonzeros() + m.num_nonzeros()) {
473 cols_.resize(num_nonzeros() + m.num_nonzeros());
474 values_.resize(num_nonzeros() + m.num_nonzeros());
475 }
476
477 // Copy the contents of m into this matrix.
478 DCHECK_LT(num_nonzeros(), cols_.size());
479 if (m.num_nonzeros() > 0) {
480 std::copy(m.cols(), m.cols() + m.num_nonzeros(), &cols_[num_nonzeros()]);
481 std::copy(
482 m.values(), m.values() + m.num_nonzeros(), &values_[num_nonzeros()]);
483 }
484
485 rows_.resize(num_rows_ + m.num_rows() + 1);
486 // new_rows = [rows_, m.row() + rows_[num_rows_]]
487 std::fill(rows_.begin() + num_rows_,
488 rows_.begin() + num_rows_ + m.num_rows() + 1,
489 rows_[num_rows_]);
490
491 for (int r = 0; r < m.num_rows() + 1; ++r) {
492 rows_[num_rows_ + r] += m.rows()[r];
493 }
494
495 num_rows_ += m.num_rows();
496
497 // The rest of the code updates the block information. Immediately
498 // return in case of no block information.
499 if (row_blocks_.empty()) {
500 return;
501 }
502
503 row_blocks_.insert(
504 row_blocks_.end(), m.row_blocks().begin(), m.row_blocks().end());
505}
506
507void CompressedRowSparseMatrix::ToTextFile(FILE* file) const {
508 CHECK(file != nullptr);
509 for (int r = 0; r < num_rows_; ++r) {
510 for (int idx = rows_[r]; idx < rows_[r + 1]; ++idx) {
511 fprintf(file, "% 10d % 10d %17f\n", r, cols_[idx], values_[idx]);
512 }
513 }
514}
515
516void CompressedRowSparseMatrix::ToCRSMatrix(CRSMatrix* matrix) const {
517 matrix->num_rows = num_rows_;
518 matrix->num_cols = num_cols_;
519 matrix->rows = rows_;
520 matrix->cols = cols_;
521 matrix->values = values_;
522
523 // Trim.
524 matrix->rows.resize(matrix->num_rows + 1);
525 matrix->cols.resize(matrix->rows[matrix->num_rows]);
526 matrix->values.resize(matrix->rows[matrix->num_rows]);
527}
528
529void CompressedRowSparseMatrix::SetMaxNumNonZeros(int num_nonzeros) {
530 CHECK_GE(num_nonzeros, 0);
531
532 cols_.resize(num_nonzeros);
533 values_.resize(num_nonzeros);
534}
535
536CompressedRowSparseMatrix* CompressedRowSparseMatrix::CreateBlockDiagonalMatrix(
537 const double* diagonal, const vector<int>& blocks) {
538 int num_rows = 0;
539 int num_nonzeros = 0;
540 for (int i = 0; i < blocks.size(); ++i) {
541 num_rows += blocks[i];
542 num_nonzeros += blocks[i] * blocks[i];
543 }
544
545 CompressedRowSparseMatrix* matrix =
546 new CompressedRowSparseMatrix(num_rows, num_rows, num_nonzeros);
547
548 int* rows = matrix->mutable_rows();
549 int* cols = matrix->mutable_cols();
550 double* values = matrix->mutable_values();
551 std::fill(values, values + num_nonzeros, 0.0);
552
553 int idx_cursor = 0;
554 int col_cursor = 0;
555 for (int i = 0; i < blocks.size(); ++i) {
556 const int block_size = blocks[i];
557 for (int r = 0; r < block_size; ++r) {
558 *(rows++) = idx_cursor;
559 values[idx_cursor + r] = diagonal[col_cursor + r];
560 for (int c = 0; c < block_size; ++c, ++idx_cursor) {
561 *(cols++) = col_cursor + c;
562 }
563 }
564 col_cursor += block_size;
565 }
566 *rows = idx_cursor;
567
568 *matrix->mutable_row_blocks() = blocks;
569 *matrix->mutable_col_blocks() = blocks;
570
571 CHECK_EQ(idx_cursor, num_nonzeros);
572 CHECK_EQ(col_cursor, num_rows);
573 return matrix;
574}
575
576CompressedRowSparseMatrix* CompressedRowSparseMatrix::Transpose() const {
577 CompressedRowSparseMatrix* transpose =
578 new CompressedRowSparseMatrix(num_cols_, num_rows_, num_nonzeros());
579
580 switch (storage_type_) {
581 case UNSYMMETRIC:
582 transpose->set_storage_type(UNSYMMETRIC);
583 break;
584 case LOWER_TRIANGULAR:
585 transpose->set_storage_type(UPPER_TRIANGULAR);
586 break;
587 case UPPER_TRIANGULAR:
588 transpose->set_storage_type(LOWER_TRIANGULAR);
589 break;
590 default:
591 LOG(FATAL) << "Unknown storage type: " << storage_type_;
592 };
593
594 TransposeForCompressedRowSparseStructure(num_rows(),
595 num_cols(),
596 num_nonzeros(),
597 rows(),
598 cols(),
599 values(),
600 transpose->mutable_rows(),
601 transpose->mutable_cols(),
602 transpose->mutable_values());
603
604 // The rest of the code updates the block information. Immediately
605 // return in case of no block information.
606 if (row_blocks_.empty()) {
607 return transpose;
608 }
609
610 *(transpose->mutable_row_blocks()) = col_blocks_;
611 *(transpose->mutable_col_blocks()) = row_blocks_;
612 return transpose;
613}
614
615CompressedRowSparseMatrix* CompressedRowSparseMatrix::CreateRandomMatrix(
616 CompressedRowSparseMatrix::RandomMatrixOptions options) {
617 CHECK_GT(options.num_row_blocks, 0);
618 CHECK_GT(options.min_row_block_size, 0);
619 CHECK_GT(options.max_row_block_size, 0);
620 CHECK_LE(options.min_row_block_size, options.max_row_block_size);
621
622 if (options.storage_type == UNSYMMETRIC) {
623 CHECK_GT(options.num_col_blocks, 0);
624 CHECK_GT(options.min_col_block_size, 0);
625 CHECK_GT(options.max_col_block_size, 0);
626 CHECK_LE(options.min_col_block_size, options.max_col_block_size);
627 } else {
628 // Symmetric matrices (LOWER_TRIANGULAR or UPPER_TRIANGULAR);
629 options.num_col_blocks = options.num_row_blocks;
630 options.min_col_block_size = options.min_row_block_size;
631 options.max_col_block_size = options.max_row_block_size;
632 }
633
634 CHECK_GT(options.block_density, 0.0);
635 CHECK_LE(options.block_density, 1.0);
636
637 vector<int> row_blocks;
638 vector<int> col_blocks;
639
640 // Generate the row block structure.
641 for (int i = 0; i < options.num_row_blocks; ++i) {
642 // Generate a random integer in [min_row_block_size, max_row_block_size]
643 const int delta_block_size =
644 Uniform(options.max_row_block_size - options.min_row_block_size);
645 row_blocks.push_back(options.min_row_block_size + delta_block_size);
646 }
647
648 if (options.storage_type == UNSYMMETRIC) {
649 // Generate the col block structure.
650 for (int i = 0; i < options.num_col_blocks; ++i) {
651 // Generate a random integer in [min_col_block_size, max_col_block_size]
652 const int delta_block_size =
653 Uniform(options.max_col_block_size - options.min_col_block_size);
654 col_blocks.push_back(options.min_col_block_size + delta_block_size);
655 }
656 } else {
657 // Symmetric matrices (LOWER_TRIANGULAR or UPPER_TRIANGULAR);
658 col_blocks = row_blocks;
659 }
660
661 vector<int> tsm_rows;
662 vector<int> tsm_cols;
663 vector<double> tsm_values;
664
665 // For ease of construction, we are going to generate the
666 // CompressedRowSparseMatrix by generating it as a
667 // TripletSparseMatrix and then converting it to a
668 // CompressedRowSparseMatrix.
669
670 // It is possible that the random matrix is empty which is likely
671 // not what the user wants, so do the matrix generation till we have
672 // at least one non-zero entry.
673 while (tsm_values.empty()) {
674 tsm_rows.clear();
675 tsm_cols.clear();
676 tsm_values.clear();
677
678 int row_block_begin = 0;
679 for (int r = 0; r < options.num_row_blocks; ++r) {
680 int col_block_begin = 0;
681 for (int c = 0; c < options.num_col_blocks; ++c) {
682 if (((options.storage_type == UPPER_TRIANGULAR) && (r > c)) ||
683 ((options.storage_type == LOWER_TRIANGULAR) && (r < c))) {
684 col_block_begin += col_blocks[c];
685 continue;
686 }
687
688 // Randomly determine if this block is present or not.
689 if (RandDouble() <= options.block_density) {
690 // If the matrix is symmetric, then we take care to generate
691 // symmetric diagonal blocks.
692 if (options.storage_type == UNSYMMETRIC || r != c) {
693 AddRandomBlock(row_blocks[r],
694 col_blocks[c],
695 row_block_begin,
696 col_block_begin,
697 &tsm_rows,
698 &tsm_cols,
699 &tsm_values);
700 } else {
701 AddSymmetricRandomBlock(row_blocks[r],
702 row_block_begin,
703 &tsm_rows,
704 &tsm_cols,
705 &tsm_values);
706 }
707 }
708 col_block_begin += col_blocks[c];
709 }
710 row_block_begin += row_blocks[r];
711 }
712 }
713
714 const int num_rows = std::accumulate(row_blocks.begin(), row_blocks.end(), 0);
715 const int num_cols = std::accumulate(col_blocks.begin(), col_blocks.end(), 0);
716 const bool kDoNotTranspose = false;
717 CompressedRowSparseMatrix* matrix =
718 CompressedRowSparseMatrix::FromTripletSparseMatrix(
719 TripletSparseMatrix(
720 num_rows, num_cols, tsm_rows, tsm_cols, tsm_values),
721 kDoNotTranspose);
722 (*matrix->mutable_row_blocks()) = row_blocks;
723 (*matrix->mutable_col_blocks()) = col_blocks;
724 matrix->set_storage_type(options.storage_type);
725 return matrix;
726}
727
728} // namespace internal
729} // namespace ceres