blob: e56de16bf92abd97fe96538eea4bb90697916a02 [file] [log] [blame]
Austin Schuh70cc9552019-01-21 19:46:48 -08001// Ceres Solver - A fast non-linear least squares minimizer
2// Copyright 2017 Google Inc. All rights reserved.
3// http://ceres-solver.org/
4//
5// Redistribution and use in source and binary forms, with or without
6// modification, are permitted provided that the following conditions are met:
7//
8// * Redistributions of source code must retain the above copyright notice,
9// this list of conditions and the following disclaimer.
10// * Redistributions in binary form must reproduce the above copyright notice,
11// this list of conditions and the following disclaimer in the documentation
12// and/or other materials provided with the distribution.
13// * Neither the name of Google Inc. nor the names of its contributors may be
14// used to endorse or promote products derived from this software without
15// specific prior written permission.
16//
17// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27// POSSIBILITY OF SUCH DAMAGE.
28//
29// Author: sameeragarwal@google.com (Sameer Agarwal)
30
31#include "ceres/compressed_row_sparse_matrix.h"
32
33#include <algorithm>
34#include <numeric>
35#include <vector>
36#include "ceres/crs_matrix.h"
37#include "ceres/internal/port.h"
38#include "ceres/random.h"
39#include "ceres/triplet_sparse_matrix.h"
40#include "glog/logging.h"
41
42namespace ceres {
43namespace internal {
44
45using std::vector;
46
47namespace {
48
49// Helper functor used by the constructor for reordering the contents
50// of a TripletSparseMatrix. This comparator assumes thay there are no
51// duplicates in the pair of arrays rows and cols, i.e., there is no
52// indices i and j (not equal to each other) s.t.
53//
54// rows[i] == rows[j] && cols[i] == cols[j]
55//
56// If this is the case, this functor will not be a StrictWeakOrdering.
57struct RowColLessThan {
58 RowColLessThan(const int* rows, const int* cols) : rows(rows), cols(cols) {}
59
60 bool operator()(const int x, const int y) const {
61 if (rows[x] == rows[y]) {
62 return (cols[x] < cols[y]);
63 }
64 return (rows[x] < rows[y]);
65 }
66
67 const int* rows;
68 const int* cols;
69};
70
71void TransposeForCompressedRowSparseStructure(const int num_rows,
72 const int num_cols,
73 const int num_nonzeros,
74 const int* rows,
75 const int* cols,
76 const double* values,
77 int* transpose_rows,
78 int* transpose_cols,
79 double* transpose_values) {
80 // Explicitly zero out transpose_rows.
81 std::fill(transpose_rows, transpose_rows + num_cols + 1, 0);
82
83 // Count the number of entries in each column of the original matrix
84 // and assign to transpose_rows[col + 1].
85 for (int idx = 0; idx < num_nonzeros; ++idx) {
86 ++transpose_rows[cols[idx] + 1];
87 }
88
89 // Compute the starting position for each row in the transpose by
90 // computing the cumulative sum of the entries of transpose_rows.
91 for (int i = 1; i < num_cols + 1; ++i) {
92 transpose_rows[i] += transpose_rows[i - 1];
93 }
94
95 // Populate transpose_cols and (optionally) transpose_values by
96 // walking the entries of the source matrices. For each entry that
97 // is added, the value of transpose_row is incremented allowing us
98 // to keep track of where the next entry for that row should go.
99 //
100 // As a result transpose_row is shifted to the left by one entry.
101 for (int r = 0; r < num_rows; ++r) {
102 for (int idx = rows[r]; idx < rows[r + 1]; ++idx) {
103 const int c = cols[idx];
104 const int transpose_idx = transpose_rows[c]++;
105 transpose_cols[transpose_idx] = r;
106 if (values != NULL && transpose_values != NULL) {
107 transpose_values[transpose_idx] = values[idx];
108 }
109 }
110 }
111
112 // This loop undoes the left shift to transpose_rows introduced by
113 // the previous loop.
114 for (int i = num_cols - 1; i > 0; --i) {
115 transpose_rows[i] = transpose_rows[i - 1];
116 }
117 transpose_rows[0] = 0;
118}
119
120void AddRandomBlock(const int num_rows,
121 const int num_cols,
122 const int row_block_begin,
123 const int col_block_begin,
124 std::vector<int>* rows,
125 std::vector<int>* cols,
126 std::vector<double>* values) {
127 for (int r = 0; r < num_rows; ++r) {
128 for (int c = 0; c < num_cols; ++c) {
129 rows->push_back(row_block_begin + r);
130 cols->push_back(col_block_begin + c);
131 values->push_back(RandNormal());
132 }
133 }
134}
135
136void AddSymmetricRandomBlock(const int num_rows,
137 const int row_block_begin,
138 std::vector<int>* rows,
139 std::vector<int>* cols,
140 std::vector<double>* values) {
141 for (int r = 0; r < num_rows; ++r) {
142 for (int c = r; c < num_rows; ++c) {
143 const double v = RandNormal();
144 rows->push_back(row_block_begin + r);
145 cols->push_back(row_block_begin + c);
146 values->push_back(v);
147 if (r != c) {
148 rows->push_back(row_block_begin + c);
149 cols->push_back(row_block_begin + r);
150 values->push_back(v);
151 }
152 }
153 }
154}
155
156} // namespace
157
158// This constructor gives you a semi-initialized CompressedRowSparseMatrix.
159CompressedRowSparseMatrix::CompressedRowSparseMatrix(int num_rows,
160 int num_cols,
161 int max_num_nonzeros) {
162 num_rows_ = num_rows;
163 num_cols_ = num_cols;
164 storage_type_ = UNSYMMETRIC;
165 rows_.resize(num_rows + 1, 0);
166 cols_.resize(max_num_nonzeros, 0);
167 values_.resize(max_num_nonzeros, 0.0);
168
169 VLOG(1) << "# of rows: " << num_rows_ << " # of columns: " << num_cols_
170 << " max_num_nonzeros: " << cols_.size() << ". Allocating "
171 << (num_rows_ + 1) * sizeof(int) + // NOLINT
172 cols_.size() * sizeof(int) + // NOLINT
173 cols_.size() * sizeof(double); // NOLINT
174}
175
176CompressedRowSparseMatrix* CompressedRowSparseMatrix::FromTripletSparseMatrix(
177 const TripletSparseMatrix& input) {
178 return CompressedRowSparseMatrix::FromTripletSparseMatrix(input, false);
179}
180
181CompressedRowSparseMatrix*
182CompressedRowSparseMatrix::FromTripletSparseMatrixTransposed(
183 const TripletSparseMatrix& input) {
184 return CompressedRowSparseMatrix::FromTripletSparseMatrix(input, true);
185}
186
187CompressedRowSparseMatrix* CompressedRowSparseMatrix::FromTripletSparseMatrix(
188 const TripletSparseMatrix& input, bool transpose) {
189 int num_rows = input.num_rows();
190 int num_cols = input.num_cols();
191 const int* rows = input.rows();
192 const int* cols = input.cols();
193 const double* values = input.values();
194
195 if (transpose) {
196 std::swap(num_rows, num_cols);
197 std::swap(rows, cols);
198 }
199
200 // index is the list of indices into the TripletSparseMatrix input.
201 vector<int> index(input.num_nonzeros(), 0);
202 for (int i = 0; i < input.num_nonzeros(); ++i) {
203 index[i] = i;
204 }
205
206 // Sort index such that the entries of m are ordered by row and ties
207 // are broken by column.
208 std::sort(index.begin(), index.end(), RowColLessThan(rows, cols));
209
210 VLOG(1) << "# of rows: " << num_rows << " # of columns: " << num_cols
211 << " num_nonzeros: " << input.num_nonzeros() << ". Allocating "
212 << ((num_rows + 1) * sizeof(int) + // NOLINT
213 input.num_nonzeros() * sizeof(int) + // NOLINT
214 input.num_nonzeros() * sizeof(double)); // NOLINT
215
216 CompressedRowSparseMatrix* output =
217 new CompressedRowSparseMatrix(num_rows, num_cols, input.num_nonzeros());
218
219 if (num_rows == 0) {
220 // No data to copy.
221 return output;
222 }
223
224 // Copy the contents of the cols and values array in the order given
225 // by index and count the number of entries in each row.
226 int* output_rows = output->mutable_rows();
227 int* output_cols = output->mutable_cols();
228 double* output_values = output->mutable_values();
229
230 output_rows[0] = 0;
231 for (int i = 0; i < index.size(); ++i) {
232 const int idx = index[i];
233 ++output_rows[rows[idx] + 1];
234 output_cols[i] = cols[idx];
235 output_values[i] = values[idx];
236 }
237
238 // Find the cumulative sum of the row counts.
239 for (int i = 1; i < num_rows + 1; ++i) {
240 output_rows[i] += output_rows[i - 1];
241 }
242
243 CHECK_EQ(output->num_nonzeros(), input.num_nonzeros());
244 return output;
245}
246
247CompressedRowSparseMatrix::CompressedRowSparseMatrix(const double* diagonal,
248 int num_rows) {
249 CHECK(diagonal != nullptr);
250
251 num_rows_ = num_rows;
252 num_cols_ = num_rows;
253 storage_type_ = UNSYMMETRIC;
254 rows_.resize(num_rows + 1);
255 cols_.resize(num_rows);
256 values_.resize(num_rows);
257
258 rows_[0] = 0;
259 for (int i = 0; i < num_rows_; ++i) {
260 cols_[i] = i;
261 values_[i] = diagonal[i];
262 rows_[i + 1] = i + 1;
263 }
264
265 CHECK_EQ(num_nonzeros(), num_rows);
266}
267
268CompressedRowSparseMatrix::~CompressedRowSparseMatrix() {}
269
270void CompressedRowSparseMatrix::SetZero() {
271 std::fill(values_.begin(), values_.end(), 0);
272}
273
274// TODO(sameeragarwal): Make RightMultiply and LeftMultiply
275// block-aware for higher performance.
276void CompressedRowSparseMatrix::RightMultiply(const double* x,
277 double* y) const {
278 CHECK(x != nullptr);
279 CHECK(y != nullptr);
280
281 if (storage_type_ == UNSYMMETRIC) {
282 for (int r = 0; r < num_rows_; ++r) {
283 for (int idx = rows_[r]; idx < rows_[r + 1]; ++idx) {
284 const int c = cols_[idx];
285 const double v = values_[idx];
286 y[r] += v * x[c];
287 }
288 }
289 } else if (storage_type_ == UPPER_TRIANGULAR) {
290 // Because of their block structure, we will have entries that lie
291 // above (below) the diagonal for lower (upper) triangular matrices,
292 // so the loops below need to account for this.
293 for (int r = 0; r < num_rows_; ++r) {
294 int idx = rows_[r];
295 const int idx_end = rows_[r + 1];
296
297 // For upper triangular matrices r <= c, so skip entries with r
298 // > c.
299 while (idx < idx_end && r > cols_[idx]) {
300 ++idx;
301 }
302
303 for (; idx < idx_end; ++idx) {
304 const int c = cols_[idx];
305 const double v = values_[idx];
306 y[r] += v * x[c];
307 // Since we are only iterating over the upper triangular part
308 // of the matrix, add contributions for the strictly lower
309 // triangular part.
310 if (r != c) {
311 y[c] += v * x[r];
312 }
313 }
314 }
315 } else if (storage_type_ == LOWER_TRIANGULAR) {
316 for (int r = 0; r < num_rows_; ++r) {
317 int idx = rows_[r];
318 const int idx_end = rows_[r + 1];
319 // For lower triangular matrices, we only iterate till we are r >=
320 // c.
321 for (; idx < idx_end && r >= cols_[idx]; ++idx) {
322 const int c = cols_[idx];
323 const double v = values_[idx];
324 y[r] += v * x[c];
325 // Since we are only iterating over the lower triangular part
326 // of the matrix, add contributions for the strictly upper
327 // triangular part.
328 if (r != c) {
329 y[c] += v * x[r];
330 }
331 }
332 }
333 } else {
334 LOG(FATAL) << "Unknown storage type: " << storage_type_;
335 }
336}
337
338void CompressedRowSparseMatrix::LeftMultiply(const double* x, double* y) const {
339 CHECK(x != nullptr);
340 CHECK(y != nullptr);
341
342 if (storage_type_ == UNSYMMETRIC) {
343 for (int r = 0; r < num_rows_; ++r) {
344 for (int idx = rows_[r]; idx < rows_[r + 1]; ++idx) {
345 y[cols_[idx]] += values_[idx] * x[r];
346 }
347 }
348 } else {
349 // Since the matrix is symmetric, LeftMultiply = RightMultiply.
350 RightMultiply(x, y);
351 }
352}
353
354void CompressedRowSparseMatrix::SquaredColumnNorm(double* x) const {
355 CHECK(x != nullptr);
356
357 std::fill(x, x + num_cols_, 0.0);
358 if (storage_type_ == UNSYMMETRIC) {
359 for (int idx = 0; idx < rows_[num_rows_]; ++idx) {
360 x[cols_[idx]] += values_[idx] * values_[idx];
361 }
362 } else if (storage_type_ == UPPER_TRIANGULAR) {
363 // Because of their block structure, we will have entries that lie
364 // above (below) the diagonal for lower (upper) triangular
365 // matrices, so the loops below need to account for this.
366 for (int r = 0; r < num_rows_; ++r) {
367 int idx = rows_[r];
368 const int idx_end = rows_[r + 1];
369
370 // For upper triangular matrices r <= c, so skip entries with r
371 // > c.
372 while (idx < idx_end && r > cols_[idx]) {
373 ++idx;
374 }
375
376 for (; idx < idx_end; ++idx) {
377 const int c = cols_[idx];
378 const double v2 = values_[idx] * values_[idx];
379 x[c] += v2;
380 // Since we are only iterating over the upper triangular part
381 // of the matrix, add contributions for the strictly lower
382 // triangular part.
383 if (r != c) {
384 x[r] += v2;
385 }
386 }
387 }
388 } else if (storage_type_ == LOWER_TRIANGULAR) {
389 for (int r = 0; r < num_rows_; ++r) {
390 int idx = rows_[r];
391 const int idx_end = rows_[r + 1];
392 // For lower triangular matrices, we only iterate till we are r >=
393 // c.
394 for (; idx < idx_end && r >= cols_[idx]; ++idx) {
395 const int c = cols_[idx];
396 const double v2 = values_[idx] * values_[idx];
397 x[c] += v2;
398 // Since we are only iterating over the lower triangular part
399 // of the matrix, add contributions for the strictly upper
400 // triangular part.
401 if (r != c) {
402 x[r] += v2;
403 }
404 }
405 }
406 } else {
407 LOG(FATAL) << "Unknown storage type: " << storage_type_;
408 }
409}
410void CompressedRowSparseMatrix::ScaleColumns(const double* scale) {
411 CHECK(scale != nullptr);
412
413 for (int idx = 0; idx < rows_[num_rows_]; ++idx) {
414 values_[idx] *= scale[cols_[idx]];
415 }
416}
417
418void CompressedRowSparseMatrix::ToDenseMatrix(Matrix* dense_matrix) const {
419 CHECK(dense_matrix != nullptr);
420 dense_matrix->resize(num_rows_, num_cols_);
421 dense_matrix->setZero();
422
423 for (int r = 0; r < num_rows_; ++r) {
424 for (int idx = rows_[r]; idx < rows_[r + 1]; ++idx) {
425 (*dense_matrix)(r, cols_[idx]) = values_[idx];
426 }
427 }
428}
429
430void CompressedRowSparseMatrix::DeleteRows(int delta_rows) {
431 CHECK_GE(delta_rows, 0);
432 CHECK_LE(delta_rows, num_rows_);
433 CHECK_EQ(storage_type_, UNSYMMETRIC);
434
435 num_rows_ -= delta_rows;
436 rows_.resize(num_rows_ + 1);
437
438 // The rest of the code updates the block information. Immediately
439 // return in case of no block information.
440 if (row_blocks_.empty()) {
441 return;
442 }
443
444 // Walk the list of row blocks until we reach the new number of rows
445 // and the drop the rest of the row blocks.
446 int num_row_blocks = 0;
447 int num_rows = 0;
448 while (num_row_blocks < row_blocks_.size() && num_rows < num_rows_) {
449 num_rows += row_blocks_[num_row_blocks];
450 ++num_row_blocks;
451 }
452
453 row_blocks_.resize(num_row_blocks);
454}
455
456void CompressedRowSparseMatrix::AppendRows(const CompressedRowSparseMatrix& m) {
457 CHECK_EQ(storage_type_, UNSYMMETRIC);
458 CHECK_EQ(m.num_cols(), num_cols_);
459
460 CHECK((row_blocks_.empty() && m.row_blocks().empty()) ||
461 (!row_blocks_.empty() && !m.row_blocks().empty()))
462 << "Cannot append a matrix with row blocks to one without and vice versa."
463 << "This matrix has : " << row_blocks_.size() << " row blocks."
464 << "The matrix being appended has: " << m.row_blocks().size()
465 << " row blocks.";
466
467 if (m.num_rows() == 0) {
468 return;
469 }
470
471 if (cols_.size() < num_nonzeros() + m.num_nonzeros()) {
472 cols_.resize(num_nonzeros() + m.num_nonzeros());
473 values_.resize(num_nonzeros() + m.num_nonzeros());
474 }
475
476 // Copy the contents of m into this matrix.
477 DCHECK_LT(num_nonzeros(), cols_.size());
478 if (m.num_nonzeros() > 0) {
479 std::copy(m.cols(), m.cols() + m.num_nonzeros(), &cols_[num_nonzeros()]);
480 std::copy(
481 m.values(), m.values() + m.num_nonzeros(), &values_[num_nonzeros()]);
482 }
483
484 rows_.resize(num_rows_ + m.num_rows() + 1);
485 // new_rows = [rows_, m.row() + rows_[num_rows_]]
486 std::fill(rows_.begin() + num_rows_,
487 rows_.begin() + num_rows_ + m.num_rows() + 1,
488 rows_[num_rows_]);
489
490 for (int r = 0; r < m.num_rows() + 1; ++r) {
491 rows_[num_rows_ + r] += m.rows()[r];
492 }
493
494 num_rows_ += m.num_rows();
495
496 // The rest of the code updates the block information. Immediately
497 // return in case of no block information.
498 if (row_blocks_.empty()) {
499 return;
500 }
501
502 row_blocks_.insert(
503 row_blocks_.end(), m.row_blocks().begin(), m.row_blocks().end());
504}
505
506void CompressedRowSparseMatrix::ToTextFile(FILE* file) const {
507 CHECK(file != nullptr);
508 for (int r = 0; r < num_rows_; ++r) {
509 for (int idx = rows_[r]; idx < rows_[r + 1]; ++idx) {
510 fprintf(file, "% 10d % 10d %17f\n", r, cols_[idx], values_[idx]);
511 }
512 }
513}
514
515void CompressedRowSparseMatrix::ToCRSMatrix(CRSMatrix* matrix) const {
516 matrix->num_rows = num_rows_;
517 matrix->num_cols = num_cols_;
518 matrix->rows = rows_;
519 matrix->cols = cols_;
520 matrix->values = values_;
521
522 // Trim.
523 matrix->rows.resize(matrix->num_rows + 1);
524 matrix->cols.resize(matrix->rows[matrix->num_rows]);
525 matrix->values.resize(matrix->rows[matrix->num_rows]);
526}
527
528void CompressedRowSparseMatrix::SetMaxNumNonZeros(int num_nonzeros) {
529 CHECK_GE(num_nonzeros, 0);
530
531 cols_.resize(num_nonzeros);
532 values_.resize(num_nonzeros);
533}
534
535CompressedRowSparseMatrix* CompressedRowSparseMatrix::CreateBlockDiagonalMatrix(
536 const double* diagonal, const vector<int>& blocks) {
537 int num_rows = 0;
538 int num_nonzeros = 0;
539 for (int i = 0; i < blocks.size(); ++i) {
540 num_rows += blocks[i];
541 num_nonzeros += blocks[i] * blocks[i];
542 }
543
544 CompressedRowSparseMatrix* matrix =
545 new CompressedRowSparseMatrix(num_rows, num_rows, num_nonzeros);
546
547 int* rows = matrix->mutable_rows();
548 int* cols = matrix->mutable_cols();
549 double* values = matrix->mutable_values();
550 std::fill(values, values + num_nonzeros, 0.0);
551
552 int idx_cursor = 0;
553 int col_cursor = 0;
554 for (int i = 0; i < blocks.size(); ++i) {
555 const int block_size = blocks[i];
556 for (int r = 0; r < block_size; ++r) {
557 *(rows++) = idx_cursor;
558 values[idx_cursor + r] = diagonal[col_cursor + r];
559 for (int c = 0; c < block_size; ++c, ++idx_cursor) {
560 *(cols++) = col_cursor + c;
561 }
562 }
563 col_cursor += block_size;
564 }
565 *rows = idx_cursor;
566
567 *matrix->mutable_row_blocks() = blocks;
568 *matrix->mutable_col_blocks() = blocks;
569
570 CHECK_EQ(idx_cursor, num_nonzeros);
571 CHECK_EQ(col_cursor, num_rows);
572 return matrix;
573}
574
575CompressedRowSparseMatrix* CompressedRowSparseMatrix::Transpose() const {
576 CompressedRowSparseMatrix* transpose =
577 new CompressedRowSparseMatrix(num_cols_, num_rows_, num_nonzeros());
578
579 switch (storage_type_) {
580 case UNSYMMETRIC:
581 transpose->set_storage_type(UNSYMMETRIC);
582 break;
583 case LOWER_TRIANGULAR:
584 transpose->set_storage_type(UPPER_TRIANGULAR);
585 break;
586 case UPPER_TRIANGULAR:
587 transpose->set_storage_type(LOWER_TRIANGULAR);
588 break;
589 default:
590 LOG(FATAL) << "Unknown storage type: " << storage_type_;
591 };
592
593 TransposeForCompressedRowSparseStructure(num_rows(),
594 num_cols(),
595 num_nonzeros(),
596 rows(),
597 cols(),
598 values(),
599 transpose->mutable_rows(),
600 transpose->mutable_cols(),
601 transpose->mutable_values());
602
603 // The rest of the code updates the block information. Immediately
604 // return in case of no block information.
605 if (row_blocks_.empty()) {
606 return transpose;
607 }
608
609 *(transpose->mutable_row_blocks()) = col_blocks_;
610 *(transpose->mutable_col_blocks()) = row_blocks_;
611 return transpose;
612}
613
614CompressedRowSparseMatrix* CompressedRowSparseMatrix::CreateRandomMatrix(
615 CompressedRowSparseMatrix::RandomMatrixOptions options) {
616 CHECK_GT(options.num_row_blocks, 0);
617 CHECK_GT(options.min_row_block_size, 0);
618 CHECK_GT(options.max_row_block_size, 0);
619 CHECK_LE(options.min_row_block_size, options.max_row_block_size);
620
621 if (options.storage_type == UNSYMMETRIC) {
622 CHECK_GT(options.num_col_blocks, 0);
623 CHECK_GT(options.min_col_block_size, 0);
624 CHECK_GT(options.max_col_block_size, 0);
625 CHECK_LE(options.min_col_block_size, options.max_col_block_size);
626 } else {
627 // Symmetric matrices (LOWER_TRIANGULAR or UPPER_TRIANGULAR);
628 options.num_col_blocks = options.num_row_blocks;
629 options.min_col_block_size = options.min_row_block_size;
630 options.max_col_block_size = options.max_row_block_size;
631 }
632
633 CHECK_GT(options.block_density, 0.0);
634 CHECK_LE(options.block_density, 1.0);
635
636 vector<int> row_blocks;
637 vector<int> col_blocks;
638
639 // Generate the row block structure.
640 for (int i = 0; i < options.num_row_blocks; ++i) {
641 // Generate a random integer in [min_row_block_size, max_row_block_size]
642 const int delta_block_size =
643 Uniform(options.max_row_block_size - options.min_row_block_size);
644 row_blocks.push_back(options.min_row_block_size + delta_block_size);
645 }
646
647 if (options.storage_type == UNSYMMETRIC) {
648 // Generate the col block structure.
649 for (int i = 0; i < options.num_col_blocks; ++i) {
650 // Generate a random integer in [min_col_block_size, max_col_block_size]
651 const int delta_block_size =
652 Uniform(options.max_col_block_size - options.min_col_block_size);
653 col_blocks.push_back(options.min_col_block_size + delta_block_size);
654 }
655 } else {
656 // Symmetric matrices (LOWER_TRIANGULAR or UPPER_TRIANGULAR);
657 col_blocks = row_blocks;
658 }
659
660 vector<int> tsm_rows;
661 vector<int> tsm_cols;
662 vector<double> tsm_values;
663
664 // For ease of construction, we are going to generate the
665 // CompressedRowSparseMatrix by generating it as a
666 // TripletSparseMatrix and then converting it to a
667 // CompressedRowSparseMatrix.
668
669 // It is possible that the random matrix is empty which is likely
670 // not what the user wants, so do the matrix generation till we have
671 // at least one non-zero entry.
672 while (tsm_values.empty()) {
673 tsm_rows.clear();
674 tsm_cols.clear();
675 tsm_values.clear();
676
677 int row_block_begin = 0;
678 for (int r = 0; r < options.num_row_blocks; ++r) {
679 int col_block_begin = 0;
680 for (int c = 0; c < options.num_col_blocks; ++c) {
681 if (((options.storage_type == UPPER_TRIANGULAR) && (r > c)) ||
682 ((options.storage_type == LOWER_TRIANGULAR) && (r < c))) {
683 col_block_begin += col_blocks[c];
684 continue;
685 }
686
687 // Randomly determine if this block is present or not.
688 if (RandDouble() <= options.block_density) {
689 // If the matrix is symmetric, then we take care to generate
690 // symmetric diagonal blocks.
691 if (options.storage_type == UNSYMMETRIC || r != c) {
692 AddRandomBlock(row_blocks[r],
693 col_blocks[c],
694 row_block_begin,
695 col_block_begin,
696 &tsm_rows,
697 &tsm_cols,
698 &tsm_values);
699 } else {
700 AddSymmetricRandomBlock(row_blocks[r],
701 row_block_begin,
702 &tsm_rows,
703 &tsm_cols,
704 &tsm_values);
705 }
706 }
707 col_block_begin += col_blocks[c];
708 }
709 row_block_begin += row_blocks[r];
710 }
711 }
712
713 const int num_rows = std::accumulate(row_blocks.begin(), row_blocks.end(), 0);
714 const int num_cols = std::accumulate(col_blocks.begin(), col_blocks.end(), 0);
715 const bool kDoNotTranspose = false;
716 CompressedRowSparseMatrix* matrix =
717 CompressedRowSparseMatrix::FromTripletSparseMatrix(
718 TripletSparseMatrix(
719 num_rows, num_cols, tsm_rows, tsm_cols, tsm_values),
720 kDoNotTranspose);
721 (*matrix->mutable_row_blocks()) = row_blocks;
722 (*matrix->mutable_col_blocks()) = col_blocks;
723 matrix->set_storage_type(options.storage_type);
724 return matrix;
725}
726
727} // namespace internal
728} // namespace ceres