blob: 7770d9efea96dc4985b9caddf466dee465d44f9a [file] [log] [blame]
Austin Schuh70cc9552019-01-21 19:46:48 -08001// Ceres Solver - A fast non-linear least squares minimizer
2// Copyright 2017 Google Inc. All rights reserved.
3// http://ceres-solver.org/
4//
5// Redistribution and use in source and binary forms, with or without
6// modification, are permitted provided that the following conditions are met:
7//
8// * Redistributions of source code must retain the above copyright notice,
9// this list of conditions and the following disclaimer.
10// * Redistributions in binary form must reproduce the above copyright notice,
11// this list of conditions and the following disclaimer in the documentation
12// and/or other materials provided with the distribution.
13// * Neither the name of Google Inc. nor the names of its contributors may be
14// used to endorse or promote products derived from this software without
15// specific prior written permission.
16//
17// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27// POSSIBILITY OF SUCH DAMAGE.
28//
29// Author: sameeragarwal@google.com (Sameer Agarwal)
30//
31// A simple C++ interface to the SuiteSparse and CHOLMOD libraries.
32
33#ifndef CERES_INTERNAL_SUITESPARSE_H_
34#define CERES_INTERNAL_SUITESPARSE_H_
35
36// This include must come before any #ifndef check on Ceres compile options.
37#include "ceres/internal/port.h"
38
39#ifndef CERES_NO_SUITESPARSE
40
41#include <cstring>
42#include <string>
43#include <vector>
44#include "SuiteSparseQR.hpp"
45#include "ceres/linear_solver.h"
46#include "ceres/sparse_cholesky.h"
47#include "cholmod.h"
48#include "glog/logging.h"
49
50// Before SuiteSparse version 4.2.0, cholmod_camd was only enabled
51// if SuiteSparse was compiled with Metis support. This makes
52// calling and linking into cholmod_camd problematic even though it
53// has nothing to do with Metis. This has been fixed reliably in
54// 4.2.0.
55//
56// The fix was actually committed in 4.1.0, but there is
57// some confusion about a silent update to the tar ball, so we are
58// being conservative and choosing the next minor version where
59// things are stable.
60#if (SUITESPARSE_VERSION < 4002)
61#define CERES_NO_CAMD
62#endif
63
64// UF_long is deprecated but SuiteSparse_long is only available in
65// newer versions of SuiteSparse. So for older versions of
66// SuiteSparse, we define SuiteSparse_long to be the same as UF_long,
67// which is what recent versions of SuiteSparse do anyways.
68#ifndef SuiteSparse_long
69#define SuiteSparse_long UF_long
70#endif
71
72namespace ceres {
73namespace internal {
74
75class CompressedRowSparseMatrix;
76class TripletSparseMatrix;
77
78// The raw CHOLMOD and SuiteSparseQR libraries have a slightly
79// cumbersome c like calling format. This object abstracts it away and
80// provides the user with a simpler interface. The methods here cannot
81// be static as a cholmod_common object serves as a global variable
82// for all cholmod function calls.
83class SuiteSparse {
84 public:
85 SuiteSparse();
86 ~SuiteSparse();
87
88 // Functions for building cholmod_sparse objects from sparse
89 // matrices stored in triplet form. The matrix A is not
90 // modifed. Called owns the result.
91 cholmod_sparse* CreateSparseMatrix(TripletSparseMatrix* A);
92
93 // This function works like CreateSparseMatrix, except that the
94 // return value corresponds to A' rather than A.
95 cholmod_sparse* CreateSparseMatrixTranspose(TripletSparseMatrix* A);
96
97 // Create a cholmod_sparse wrapper around the contents of A. This is
98 // a shallow object, which refers to the contents of A and does not
99 // use the SuiteSparse machinery to allocate memory.
100 cholmod_sparse CreateSparseMatrixTransposeView(CompressedRowSparseMatrix* A);
101
102 // Create a cholmod_dense vector around the contents of the array x.
103 // This is a shallow object, which refers to the contents of x and
104 // does not use the SuiteSparse machinery to allocate memory.
105 cholmod_dense CreateDenseVectorView(const double* x, int size);
106
107 // Given a vector x, build a cholmod_dense vector of size out_size
108 // with the first in_size entries copied from x. If x is NULL, then
109 // an all zeros vector is returned. Caller owns the result.
110 cholmod_dense* CreateDenseVector(const double* x, int in_size, int out_size);
111
112 // The matrix A is scaled using the matrix whose diagonal is the
113 // vector scale. mode describes how scaling is applied. Possible
114 // values are CHOLMOD_ROW for row scaling - diag(scale) * A,
115 // CHOLMOD_COL for column scaling - A * diag(scale) and CHOLMOD_SYM
116 // for symmetric scaling which scales both the rows and the columns
117 // - diag(scale) * A * diag(scale).
118 void Scale(cholmod_dense* scale, int mode, cholmod_sparse* A) {
119 cholmod_scale(scale, mode, A, &cc_);
120 }
121
122 // Create and return a matrix m = A * A'. Caller owns the
123 // result. The matrix A is not modified.
124 cholmod_sparse* AATranspose(cholmod_sparse* A) {
125 cholmod_sparse*m = cholmod_aat(A, NULL, A->nrow, 1, &cc_);
126 m->stype = 1; // Pay attention to the upper triangular part.
127 return m;
128 }
129
130 // y = alpha * A * x + beta * y. Only y is modified.
131 void SparseDenseMultiply(cholmod_sparse* A, double alpha, double beta,
132 cholmod_dense* x, cholmod_dense* y) {
133 double alpha_[2] = {alpha, 0};
134 double beta_[2] = {beta, 0};
135 cholmod_sdmult(A, 0, alpha_, beta_, x, y, &cc_);
136 }
137
138 // Find an ordering of A or AA' (if A is unsymmetric) that minimizes
139 // the fill-in in the Cholesky factorization of the corresponding
140 // matrix. This is done by using the AMD algorithm.
141 //
142 // Using this ordering, the symbolic Cholesky factorization of A (or
143 // AA') is computed and returned.
144 //
145 // A is not modified, only the pattern of non-zeros of A is used,
146 // the actual numerical values in A are of no consequence.
147 //
148 // message contains an explanation of the failures if any.
149 //
150 // Caller owns the result.
151 cholmod_factor* AnalyzeCholesky(cholmod_sparse* A, std::string* message);
152
153 cholmod_factor* BlockAnalyzeCholesky(cholmod_sparse* A,
154 const std::vector<int>& row_blocks,
155 const std::vector<int>& col_blocks,
156 std::string* message);
157
158 // If A is symmetric, then compute the symbolic Cholesky
159 // factorization of A(ordering, ordering). If A is unsymmetric, then
160 // compute the symbolic factorization of
161 // A(ordering,:) A(ordering,:)'.
162 //
163 // A is not modified, only the pattern of non-zeros of A is used,
164 // the actual numerical values in A are of no consequence.
165 //
166 // message contains an explanation of the failures if any.
167 //
168 // Caller owns the result.
169 cholmod_factor* AnalyzeCholeskyWithUserOrdering(
170 cholmod_sparse* A,
171 const std::vector<int>& ordering,
172 std::string* message);
173
174 // Perform a symbolic factorization of A without re-ordering A. No
175 // postordering of the elimination tree is performed. This ensures
176 // that the symbolic factor does not introduce an extra permutation
177 // on the matrix. See the documentation for CHOLMOD for more details.
178 //
179 // message contains an explanation of the failures if any.
180 cholmod_factor* AnalyzeCholeskyWithNaturalOrdering(cholmod_sparse* A,
181 std::string* message);
182
183 // Use the symbolic factorization in L, to find the numerical
184 // factorization for the matrix A or AA^T. Return true if
185 // successful, false otherwise. L contains the numeric factorization
186 // on return.
187 //
188 // message contains an explanation of the failures if any.
189 LinearSolverTerminationType Cholesky(cholmod_sparse* A,
190 cholmod_factor* L,
191 std::string* message);
192
193 // Given a Cholesky factorization of a matrix A = LL^T, solve the
194 // linear system Ax = b, and return the result. If the Solve fails
195 // NULL is returned. Caller owns the result.
196 //
197 // message contains an explanation of the failures if any.
198 cholmod_dense* Solve(cholmod_factor* L, cholmod_dense* b, std::string* message);
199
200 // By virtue of the modeling layer in Ceres being block oriented,
201 // all the matrices used by Ceres are also block oriented. When
202 // doing sparse direct factorization of these matrices the
203 // fill-reducing ordering algorithms (in particular AMD) can either
204 // be run on the block or the scalar form of these matrices. The two
205 // SuiteSparse::AnalyzeCholesky methods allows the client to
206 // compute the symbolic factorization of a matrix by either using
207 // AMD on the matrix or a user provided ordering of the rows.
208 //
209 // But since the underlying matrices are block oriented, it is worth
210 // running AMD on just the block structure of these matrices and then
211 // lifting these block orderings to a full scalar ordering. This
212 // preserves the block structure of the permuted matrix, and exposes
213 // more of the super-nodal structure of the matrix to the numerical
214 // factorization routines.
215 //
216 // Find the block oriented AMD ordering of a matrix A, whose row and
217 // column blocks are given by row_blocks, and col_blocks
218 // respectively. The matrix may or may not be symmetric. The entries
219 // of col_blocks do not need to sum to the number of columns in
220 // A. If this is the case, only the first sum(col_blocks) are used
221 // to compute the ordering.
222 bool BlockAMDOrdering(const cholmod_sparse* A,
223 const std::vector<int>& row_blocks,
224 const std::vector<int>& col_blocks,
225 std::vector<int>* ordering);
226
227 // Find a fill reducing approximate minimum degree
228 // ordering. ordering is expected to be large enough to hold the
229 // ordering.
230 bool ApproximateMinimumDegreeOrdering(cholmod_sparse* matrix, int* ordering);
231
232
233 // Before SuiteSparse version 4.2.0, cholmod_camd was only enabled
234 // if SuiteSparse was compiled with Metis support. This makes
235 // calling and linking into cholmod_camd problematic even though it
236 // has nothing to do with Metis. This has been fixed reliably in
237 // 4.2.0.
238 //
239 // The fix was actually committed in 4.1.0, but there is
240 // some confusion about a silent update to the tar ball, so we are
241 // being conservative and choosing the next minor version where
242 // things are stable.
243 static bool IsConstrainedApproximateMinimumDegreeOrderingAvailable() {
244 return (SUITESPARSE_VERSION > 4001);
245 }
246
247 // Find a fill reducing approximate minimum degree
248 // ordering. constraints is an array which associates with each
249 // column of the matrix an elimination group. i.e., all columns in
250 // group 0 are eliminated first, all columns in group 1 are
251 // eliminated next etc. This function finds a fill reducing ordering
252 // that obeys these constraints.
253 //
254 // Calling ApproximateMinimumDegreeOrdering is equivalent to calling
255 // ConstrainedApproximateMinimumDegreeOrdering with a constraint
256 // array that puts all columns in the same elimination group.
257 //
258 // If CERES_NO_CAMD is defined then calling this function will
259 // result in a crash.
260 bool ConstrainedApproximateMinimumDegreeOrdering(cholmod_sparse* matrix,
261 int* constraints,
262 int* ordering);
263
264 void Free(cholmod_sparse* m) { cholmod_free_sparse(&m, &cc_); }
265 void Free(cholmod_dense* m) { cholmod_free_dense(&m, &cc_); }
266 void Free(cholmod_factor* m) { cholmod_free_factor(&m, &cc_); }
267
268 void Print(cholmod_sparse* m, const std::string& name) {
269 cholmod_print_sparse(m, const_cast<char*>(name.c_str()), &cc_);
270 }
271
272 void Print(cholmod_dense* m, const std::string& name) {
273 cholmod_print_dense(m, const_cast<char*>(name.c_str()), &cc_);
274 }
275
276 void Print(cholmod_triplet* m, const std::string& name) {
277 cholmod_print_triplet(m, const_cast<char*>(name.c_str()), &cc_);
278 }
279
280 cholmod_common* mutable_cc() { return &cc_; }
281
282 private:
283 cholmod_common cc_;
284};
285
286class SuiteSparseCholesky : public SparseCholesky {
287 public:
288 static std::unique_ptr<SparseCholesky> Create(
289 OrderingType ordering_type);
290
291 // SparseCholesky interface.
292 virtual ~SuiteSparseCholesky();
293 virtual CompressedRowSparseMatrix::StorageType StorageType() const;
294 virtual LinearSolverTerminationType Factorize(
295 CompressedRowSparseMatrix* lhs, std::string* message);
296 virtual LinearSolverTerminationType Solve(const double* rhs,
297 double* solution,
298 std::string* message);
299 private:
300 SuiteSparseCholesky(const OrderingType ordering_type);
301
302 const OrderingType ordering_type_;
303 SuiteSparse ss_;
304 cholmod_factor* factor_;
305};
306
307} // namespace internal
308} // namespace ceres
309
310#else // CERES_NO_SUITESPARSE
311
312typedef void cholmod_factor;
313
314namespace ceres {
315namespace internal {
316
317class SuiteSparse {
318 public:
319 // Defining this static function even when SuiteSparse is not
320 // available, allows client code to check for the presence of CAMD
321 // without checking for the absence of the CERES_NO_CAMD symbol.
322 //
323 // This is safer because the symbol maybe missing due to a user
324 // accidentally not including suitesparse.h in their code when
325 // checking for the symbol.
326 static bool IsConstrainedApproximateMinimumDegreeOrderingAvailable() {
327 return false;
328 }
329
330 void Free(void* arg) {}
331};
332
333} // namespace internal
334} // namespace ceres
335
336#endif // CERES_NO_SUITESPARSE
337
338#endif // CERES_INTERNAL_SUITESPARSE_H_