blob: dc02986427c42ad995bcb57c3bad696e5af9c8c0 [file] [log] [blame]
Austin Schuh70cc9552019-01-21 19:46:48 -08001// Ceres Solver - A fast non-linear least squares minimizer
2// Copyright 2018 Google Inc. All rights reserved.
3// http://ceres-solver.org/
4//
5// Redistribution and use in source and binary forms, with or without
6// modification, are permitted provided that the following conditions are met:
7//
8// * Redistributions of source code must retain the above copyright notice,
9// this list of conditions and the following disclaimer.
10// * Redistributions in binary form must reproduce the above copyright notice,
11// this list of conditions and the following disclaimer in the documentation
12// and/or other materials provided with the distribution.
13// * Neither the name of Google Inc. nor the names of its contributors may be
14// used to endorse or promote products derived from this software without
15// specific prior written permission.
16//
17// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27// POSSIBILITY OF SUCH DAMAGE.
28//
29// Author: alexs.mac@gmail.com (Alex Stewart)
30
31// This include must come before any #ifndef check on Ceres compile options.
32#include "ceres/internal/port.h"
33
34#ifndef CERES_NO_ACCELERATE_SPARSE
35
36#include "ceres/accelerate_sparse.h"
37
38#include <algorithm>
39#include <string>
40#include <vector>
41
42#include "ceres/compressed_col_sparse_matrix_utils.h"
43#include "ceres/compressed_row_sparse_matrix.h"
44#include "ceres/triplet_sparse_matrix.h"
45#include "glog/logging.h"
46
47#define CASESTR(x) case x: return #x
48
49namespace ceres {
50namespace internal {
51
52const char* SparseStatusToString(SparseStatus_t status) {
53 switch (status) {
54 CASESTR(SparseStatusOK);
55 CASESTR(SparseFactorizationFailed);
56 CASESTR(SparseMatrixIsSingular);
57 CASESTR(SparseInternalError);
58 CASESTR(SparseParameterError);
59 CASESTR(SparseStatusReleased);
60 default:
61 return "UKNOWN";
62 }
63}
64
65template<typename Scalar>
66void AccelerateSparse<Scalar>::Solve(NumericFactorization* numeric_factor,
67 DenseVector* rhs_and_solution) {
68 SparseSolve(*numeric_factor, *rhs_and_solution);
69}
70
71template<typename Scalar>
72typename AccelerateSparse<Scalar>::ASSparseMatrix
73AccelerateSparse<Scalar>::CreateSparseMatrixTransposeView(
74 CompressedRowSparseMatrix* A) {
75 // Accelerate uses CSC as its sparse storage format whereas Ceres uses CSR.
76 // As this method returns the transpose view we can flip rows/cols to map
77 // from CSR to CSC^T.
78 //
79 // Accelerate's columnStarts is a long*, not an int*. These types might be
80 // different (e.g. ARM on iOS) so always make a copy.
81 column_starts_.resize(A->num_rows() +1); // +1 for final column length.
82 std::copy_n(A->rows(), column_starts_.size(), &column_starts_[0]);
83
84 ASSparseMatrix At;
85 At.structure.rowCount = A->num_cols();
86 At.structure.columnCount = A->num_rows();
87 At.structure.columnStarts = &column_starts_[0];
88 At.structure.rowIndices = A->mutable_cols();
89 At.structure.attributes.transpose = false;
90 At.structure.attributes.triangle = SparseUpperTriangle;
91 At.structure.attributes.kind = SparseSymmetric;
92 At.structure.attributes._reserved = 0;
93 At.structure.attributes._allocatedBySparse = 0;
94 At.structure.blockSize = 1;
95 if (std::is_same<Scalar, double>::value) {
96 At.data = reinterpret_cast<Scalar*>(A->mutable_values());
97 } else {
98 values_ =
99 ConstVectorRef(A->values(), A->num_nonzeros()).template cast<Scalar>();
100 At.data = values_.data();
101 }
102 return At;
103}
104
105template<typename Scalar>
106typename AccelerateSparse<Scalar>::SymbolicFactorization
107AccelerateSparse<Scalar>::AnalyzeCholesky(ASSparseMatrix* A) {
108 return SparseFactor(SparseFactorizationCholesky, A->structure);
109}
110
111template<typename Scalar>
112typename AccelerateSparse<Scalar>::NumericFactorization
113AccelerateSparse<Scalar>::Cholesky(ASSparseMatrix* A,
114 SymbolicFactorization* symbolic_factor) {
115 return SparseFactor(*symbolic_factor, *A);
116}
117
118template<typename Scalar>
119void AccelerateSparse<Scalar>::Cholesky(ASSparseMatrix* A,
120 NumericFactorization* numeric_factor) {
121 return SparseRefactor(*A, numeric_factor);
122}
123
124// Instantiate only for the specific template types required/supported s/t the
125// definition can be in the .cc file.
126template class AccelerateSparse<double>;
127template class AccelerateSparse<float>;
128
129template<typename Scalar>
130std::unique_ptr<SparseCholesky>
131AppleAccelerateCholesky<Scalar>::Create(OrderingType ordering_type) {
132 return std::unique_ptr<SparseCholesky>(
133 new AppleAccelerateCholesky<Scalar>(ordering_type));
134}
135
136template<typename Scalar>
137AppleAccelerateCholesky<Scalar>::AppleAccelerateCholesky(
138 const OrderingType ordering_type)
139 : ordering_type_(ordering_type) {}
140
141template<typename Scalar>
142AppleAccelerateCholesky<Scalar>::~AppleAccelerateCholesky() {
143 FreeSymbolicFactorization();
144 FreeNumericFactorization();
145}
146
147template<typename Scalar>
148CompressedRowSparseMatrix::StorageType
149AppleAccelerateCholesky<Scalar>::StorageType() const {
150 return CompressedRowSparseMatrix::LOWER_TRIANGULAR;
151}
152
153template<typename Scalar>
154LinearSolverTerminationType
155AppleAccelerateCholesky<Scalar>::Factorize(CompressedRowSparseMatrix* lhs,
156 std::string* message) {
157 CHECK_EQ(lhs->storage_type(), StorageType());
158 if (lhs == NULL) {
159 *message = "Failure: Input lhs is NULL.";
160 return LINEAR_SOLVER_FATAL_ERROR;
161 }
162 typename SparseTypesTrait<Scalar>::SparseMatrix as_lhs =
163 as_.CreateSparseMatrixTransposeView(lhs);
164
165 if (!symbolic_factor_) {
166 symbolic_factor_.reset(
167 new typename SparseTypesTrait<Scalar>::SymbolicFactorization(
168 as_.AnalyzeCholesky(&as_lhs)));
169 if (symbolic_factor_->status != SparseStatusOK) {
170 *message = StringPrintf(
171 "Apple Accelerate Failure : Symbolic factorisation failed: %s",
172 SparseStatusToString(symbolic_factor_->status));
173 FreeSymbolicFactorization();
174 return LINEAR_SOLVER_FATAL_ERROR;
175 }
176 }
177
178 if (!numeric_factor_) {
179 numeric_factor_.reset(
180 new typename SparseTypesTrait<Scalar>::NumericFactorization(
181 as_.Cholesky(&as_lhs, symbolic_factor_.get())));
182 } else {
183 // Recycle memory from previous numeric factorization.
184 as_.Cholesky(&as_lhs, numeric_factor_.get());
185 }
186 if (numeric_factor_->status != SparseStatusOK) {
187 *message = StringPrintf(
188 "Apple Accelerate Failure : Numeric factorisation failed: %s",
189 SparseStatusToString(numeric_factor_->status));
190 FreeNumericFactorization();
191 return LINEAR_SOLVER_FAILURE;
192 }
193
194 return LINEAR_SOLVER_SUCCESS;
195}
196
197template<typename Scalar>
198LinearSolverTerminationType
199AppleAccelerateCholesky<Scalar>::Solve(const double* rhs,
200 double* solution,
201 std::string* message) {
202 CHECK_EQ(numeric_factor_->status, SparseStatusOK)
203 << "Solve called without a call to Factorize first ("
204 << SparseStatusToString(numeric_factor_->status) << ").";
205 const int num_cols = numeric_factor_->symbolicFactorization.columnCount;
206
207 typename SparseTypesTrait<Scalar>::DenseVector as_rhs_and_solution;
208 as_rhs_and_solution.count = num_cols;
209 if (std::is_same<Scalar, double>::value) {
210 as_rhs_and_solution.data = reinterpret_cast<Scalar*>(solution);
211 std::copy_n(rhs, num_cols, solution);
212 } else {
213 scalar_rhs_and_solution_ =
214 ConstVectorRef(rhs, num_cols).template cast<Scalar>();
215 as_rhs_and_solution.data = scalar_rhs_and_solution_.data();
216 }
217 as_.Solve(numeric_factor_.get(), &as_rhs_and_solution);
218 if (!std::is_same<Scalar, double>::value) {
219 VectorRef(solution, num_cols) =
220 scalar_rhs_and_solution_.template cast<double>();
221 }
222 return LINEAR_SOLVER_SUCCESS;
223}
224
225template<typename Scalar>
226void AppleAccelerateCholesky<Scalar>::FreeSymbolicFactorization() {
227 if (symbolic_factor_) {
228 SparseCleanup(*symbolic_factor_);
229 symbolic_factor_.reset();
230 }
231}
232
233template<typename Scalar>
234void AppleAccelerateCholesky<Scalar>::FreeNumericFactorization() {
235 if (numeric_factor_) {
236 SparseCleanup(*numeric_factor_);
237 numeric_factor_.reset();
238 }
239}
240
241// Instantiate only for the specific template types required/supported s/t the
242// definition can be in the .cc file.
243template class AppleAccelerateCholesky<double>;
244template class AppleAccelerateCholesky<float>;
245
246}
247}
248
249#endif // CERES_NO_ACCELERATE_SPARSE