1 // Ceres Solver - A fast non-linear least squares minimizer
2 // Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
3 // http://code.google.com/p/ceres-solver/
4 //
5 // Redistribution and use in source and binary forms, with or without
6 // modification, are permitted provided that the following conditions are met:
7 //
8 // * Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer.
10 // * Redistributions in binary form must reproduce the above copyright notice,
11 // this list of conditions and the following disclaimer in the documentation
12 // and/or other materials provided with the distribution.
13 // * Neither the name of Google Inc. nor the names of its contributors may be
14 // used to endorse or promote products derived from this software without
15 // specific prior written permission.
16 //
17 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21 // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 // POSSIBILITY OF SUCH DAMAGE.
28 //
29 // Author: sameeragarwal@google.com (Sameer Agarwal)
30
31 // This include must come before any #ifndef check on Ceres compile options.
32 #include "ceres/internal/port.h"
33
34 #include "ceres/sparse_normal_cholesky_solver.h"
35
36 #include <algorithm>
37 #include <cstring>
38 #include <ctime>
39
40 #include "ceres/compressed_row_sparse_matrix.h"
41 #include "ceres/cxsparse.h"
42 #include "ceres/internal/eigen.h"
43 #include "ceres/internal/scoped_ptr.h"
44 #include "ceres/linear_solver.h"
45 #include "ceres/suitesparse.h"
46 #include "ceres/triplet_sparse_matrix.h"
47 #include "ceres/types.h"
48 #include "ceres/wall_time.h"
49 #include "Eigen/SparseCore"
50
51
52 namespace ceres {
53 namespace internal {
54
SparseNormalCholeskySolver(const LinearSolver::Options & options)55 SparseNormalCholeskySolver::SparseNormalCholeskySolver(
56 const LinearSolver::Options& options)
57 : factor_(NULL),
58 cxsparse_factor_(NULL),
59 options_(options){
60 }
61
FreeFactorization()62 void SparseNormalCholeskySolver::FreeFactorization() {
63 if (factor_ != NULL) {
64 ss_.Free(factor_);
65 factor_ = NULL;
66 }
67
68 if (cxsparse_factor_ != NULL) {
69 cxsparse_.Free(cxsparse_factor_);
70 cxsparse_factor_ = NULL;
71 }
72 }
73
~SparseNormalCholeskySolver()74 SparseNormalCholeskySolver::~SparseNormalCholeskySolver() {
75 FreeFactorization();
76 }
77
SolveImpl(CompressedRowSparseMatrix * A,const double * b,const LinearSolver::PerSolveOptions & per_solve_options,double * x)78 LinearSolver::Summary SparseNormalCholeskySolver::SolveImpl(
79 CompressedRowSparseMatrix* A,
80 const double* b,
81 const LinearSolver::PerSolveOptions& per_solve_options,
82 double * x) {
83
84 const int num_cols = A->num_cols();
85 VectorRef(x, num_cols).setZero();
86 A->LeftMultiply(b, x);
87
88 if (per_solve_options.D != NULL) {
89 // Temporarily append a diagonal block to the A matrix, but undo
90 // it before returning the matrix to the user.
91 scoped_ptr<CompressedRowSparseMatrix> regularizer;
92 if (A->col_blocks().size() > 0) {
93 regularizer.reset(CompressedRowSparseMatrix::CreateBlockDiagonalMatrix(
94 per_solve_options.D, A->col_blocks()));
95 } else {
96 regularizer.reset(new CompressedRowSparseMatrix(
97 per_solve_options.D, num_cols));
98 }
99 A->AppendRows(*regularizer);
100 }
101
102 LinearSolver::Summary summary;
103 switch (options_.sparse_linear_algebra_library_type) {
104 case SUITE_SPARSE:
105 summary = SolveImplUsingSuiteSparse(A, per_solve_options, x);
106 break;
107 case CX_SPARSE:
108 summary = SolveImplUsingCXSparse(A, per_solve_options, x);
109 break;
110 case EIGEN_SPARSE:
111 summary = SolveImplUsingEigen(A, per_solve_options, x);
112 break;
113 default:
114 LOG(FATAL) << "Unknown sparse linear algebra library : "
115 << options_.sparse_linear_algebra_library_type;
116 }
117
118 if (per_solve_options.D != NULL) {
119 A->DeleteRows(num_cols);
120 }
121
122 return summary;
123 }
124
SolveImplUsingEigen(CompressedRowSparseMatrix * A,const LinearSolver::PerSolveOptions & per_solve_options,double * rhs_and_solution)125 LinearSolver::Summary SparseNormalCholeskySolver::SolveImplUsingEigen(
126 CompressedRowSparseMatrix* A,
127 const LinearSolver::PerSolveOptions& per_solve_options,
128 double * rhs_and_solution) {
129 #ifndef CERES_USE_EIGEN_SPARSE
130
131 LinearSolver::Summary summary;
132 summary.num_iterations = 0;
133 summary.termination_type = LINEAR_SOLVER_FATAL_ERROR;
134 summary.message =
135 "SPARSE_NORMAL_CHOLESKY cannot be used with EIGEN_SPARSE "
136 "because Ceres was not built with support for "
137 "Eigen's SimplicialLDLT decomposition. "
138 "This requires enabling building with -DEIGENSPARSE=ON.";
139 return summary;
140
141 #else
142
143 EventLogger event_logger("SparseNormalCholeskySolver::Eigen::Solve");
144
145 LinearSolver::Summary summary;
146 summary.num_iterations = 1;
147 summary.termination_type = LINEAR_SOLVER_SUCCESS;
148 summary.message = "Success.";
149
150 // Compute the normal equations. J'J delta = J'f and solve them
151 // using a sparse Cholesky factorization. Notice that when compared
152 // to SuiteSparse we have to explicitly compute the normal equations
153 // before they can be factorized. CHOLMOD/SuiteSparse on the other
154 // hand can just work off of Jt to compute the Cholesky
155 // factorization of the normal equations.
156 //
157 // TODO(sameeragarwal): See note about how this maybe a bad idea for
158 // dynamic sparsity.
159 if (outer_product_.get() == NULL) {
160 outer_product_.reset(
161 CompressedRowSparseMatrix::CreateOuterProductMatrixAndProgram(
162 *A, &pattern_));
163 }
164
165 CompressedRowSparseMatrix::ComputeOuterProduct(
166 *A, pattern_, outer_product_.get());
167
168 // Map to an upper triangular column major matrix.
169 //
170 // outer_product_ is a compressed row sparse matrix and in lower
171 // triangular form, when mapped to a compressed column sparse
172 // matrix, it becomes an upper triangular matrix.
173 Eigen::MappedSparseMatrix<double, Eigen::ColMajor> AtA(
174 outer_product_->num_rows(),
175 outer_product_->num_rows(),
176 outer_product_->num_nonzeros(),
177 outer_product_->mutable_rows(),
178 outer_product_->mutable_cols(),
179 outer_product_->mutable_values());
180
181 const Vector b = VectorRef(rhs_and_solution, outer_product_->num_rows());
182 if (simplicial_ldlt_.get() == NULL || options_.dynamic_sparsity) {
183 simplicial_ldlt_.reset(new SimplicialLDLT);
184 // This is a crappy way to be doing this. But right now Eigen does
185 // not expose a way to do symbolic analysis with a given
186 // permutation pattern, so we cannot use a block analysis of the
187 // Jacobian.
188 simplicial_ldlt_->analyzePattern(AtA);
189 if (simplicial_ldlt_->info() != Eigen::Success) {
190 summary.termination_type = LINEAR_SOLVER_FATAL_ERROR;
191 summary.message =
192 "Eigen failure. Unable to find symbolic factorization.";
193 return summary;
194 }
195 }
196 event_logger.AddEvent("Analysis");
197
198 simplicial_ldlt_->factorize(AtA);
199 if(simplicial_ldlt_->info() != Eigen::Success) {
200 summary.termination_type = LINEAR_SOLVER_FAILURE;
201 summary.message =
202 "Eigen failure. Unable to find numeric factorization.";
203 return summary;
204 }
205
206 VectorRef(rhs_and_solution, outer_product_->num_rows()) =
207 simplicial_ldlt_->solve(b);
208 if(simplicial_ldlt_->info() != Eigen::Success) {
209 summary.termination_type = LINEAR_SOLVER_FAILURE;
210 summary.message =
211 "Eigen failure. Unable to do triangular solve.";
212 return summary;
213 }
214
215 event_logger.AddEvent("Solve");
216 return summary;
217 #endif // EIGEN_USE_EIGEN_SPARSE
218 }
219
220
221
SolveImplUsingCXSparse(CompressedRowSparseMatrix * A,const LinearSolver::PerSolveOptions & per_solve_options,double * rhs_and_solution)222 LinearSolver::Summary SparseNormalCholeskySolver::SolveImplUsingCXSparse(
223 CompressedRowSparseMatrix* A,
224 const LinearSolver::PerSolveOptions& per_solve_options,
225 double * rhs_and_solution) {
226 #ifdef CERES_NO_CXSPARSE
227
228 LinearSolver::Summary summary;
229 summary.num_iterations = 0;
230 summary.termination_type = LINEAR_SOLVER_FATAL_ERROR;
231 summary.message =
232 "SPARSE_NORMAL_CHOLESKY cannot be used with CX_SPARSE "
233 "because Ceres was not built with support for CXSparse. "
234 "This requires enabling building with -DCXSPARSE=ON.";
235
236 return summary;
237
238 #else
239
240 EventLogger event_logger("SparseNormalCholeskySolver::CXSparse::Solve");
241
242 LinearSolver::Summary summary;
243 summary.num_iterations = 1;
244 summary.termination_type = LINEAR_SOLVER_SUCCESS;
245 summary.message = "Success.";
246
247 // Compute the normal equations. J'J delta = J'f and solve them
248 // using a sparse Cholesky factorization. Notice that when compared
249 // to SuiteSparse we have to explicitly compute the normal equations
250 // before they can be factorized. CHOLMOD/SuiteSparse on the other
251 // hand can just work off of Jt to compute the Cholesky
252 // factorization of the normal equations.
253 //
254 // TODO(sameeragarwal): If dynamic sparsity is enabled, then this is
255 // not a good idea performance wise, since the jacobian has far too
256 // many entries and the program will go crazy with memory.
257 if (outer_product_.get() == NULL) {
258 outer_product_.reset(
259 CompressedRowSparseMatrix::CreateOuterProductMatrixAndProgram(
260 *A, &pattern_));
261 }
262
263 CompressedRowSparseMatrix::ComputeOuterProduct(
264 *A, pattern_, outer_product_.get());
265 cs_di AtA_view =
266 cxsparse_.CreateSparseMatrixTransposeView(outer_product_.get());
267 cs_di* AtA = &AtA_view;
268
269 event_logger.AddEvent("Setup");
270
271 // Compute symbolic factorization if not available.
272 if (options_.dynamic_sparsity) {
273 FreeFactorization();
274 }
275 if (cxsparse_factor_ == NULL) {
276 if (options_.use_postordering) {
277 cxsparse_factor_ = cxsparse_.BlockAnalyzeCholesky(AtA,
278 A->col_blocks(),
279 A->col_blocks());
280 } else {
281 if (options_.dynamic_sparsity) {
282 cxsparse_factor_ = cxsparse_.AnalyzeCholesky(AtA);
283 } else {
284 cxsparse_factor_ = cxsparse_.AnalyzeCholeskyWithNaturalOrdering(AtA);
285 }
286 }
287 }
288 event_logger.AddEvent("Analysis");
289
290 if (cxsparse_factor_ == NULL) {
291 summary.termination_type = LINEAR_SOLVER_FATAL_ERROR;
292 summary.message =
293 "CXSparse failure. Unable to find symbolic factorization.";
294 } else if (!cxsparse_.SolveCholesky(AtA, cxsparse_factor_, rhs_and_solution)) {
295 summary.termination_type = LINEAR_SOLVER_FAILURE;
296 summary.message = "CXSparse::SolveCholesky failed.";
297 }
298 event_logger.AddEvent("Solve");
299
300 return summary;
301 #endif
302 }
303
SolveImplUsingSuiteSparse(CompressedRowSparseMatrix * A,const LinearSolver::PerSolveOptions & per_solve_options,double * rhs_and_solution)304 LinearSolver::Summary SparseNormalCholeskySolver::SolveImplUsingSuiteSparse(
305 CompressedRowSparseMatrix* A,
306 const LinearSolver::PerSolveOptions& per_solve_options,
307 double * rhs_and_solution) {
308 #ifdef CERES_NO_SUITESPARSE
309
310 LinearSolver::Summary summary;
311 summary.num_iterations = 0;
312 summary.termination_type = LINEAR_SOLVER_FATAL_ERROR;
313 summary.message =
314 "SPARSE_NORMAL_CHOLESKY cannot be used with SUITE_SPARSE "
315 "because Ceres was not built with support for SuiteSparse. "
316 "This requires enabling building with -DSUITESPARSE=ON.";
317 return summary;
318
319 #else
320
321 EventLogger event_logger("SparseNormalCholeskySolver::SuiteSparse::Solve");
322 LinearSolver::Summary summary;
323 summary.termination_type = LINEAR_SOLVER_SUCCESS;
324 summary.num_iterations = 1;
325 summary.message = "Success.";
326
327 const int num_cols = A->num_cols();
328 cholmod_sparse lhs = ss_.CreateSparseMatrixTransposeView(A);
329 event_logger.AddEvent("Setup");
330
331 if (options_.dynamic_sparsity) {
332 FreeFactorization();
333 }
334 if (factor_ == NULL) {
335 if (options_.use_postordering) {
336 factor_ = ss_.BlockAnalyzeCholesky(&lhs,
337 A->col_blocks(),
338 A->row_blocks(),
339 &summary.message);
340 } else {
341 if (options_.dynamic_sparsity) {
342 factor_ = ss_.AnalyzeCholesky(&lhs, &summary.message);
343 } else {
344 factor_ = ss_.AnalyzeCholeskyWithNaturalOrdering(&lhs, &summary.message);
345 }
346 }
347 }
348 event_logger.AddEvent("Analysis");
349
350 if (factor_ == NULL) {
351 summary.termination_type = LINEAR_SOLVER_FATAL_ERROR;
352 // No need to set message as it has already been set by the
353 // symbolic analysis routines above.
354 return summary;
355 }
356
357 summary.termination_type = ss_.Cholesky(&lhs, factor_, &summary.message);
358 if (summary.termination_type != LINEAR_SOLVER_SUCCESS) {
359 return summary;
360 }
361
362 cholmod_dense* rhs = ss_.CreateDenseVector(rhs_and_solution, num_cols, num_cols);
363 cholmod_dense* solution = ss_.Solve(factor_, rhs, &summary.message);
364 event_logger.AddEvent("Solve");
365
366 ss_.Free(rhs);
367 if (solution != NULL) {
368 memcpy(rhs_and_solution, solution->x, num_cols * sizeof(*rhs_and_solution));
369 ss_.Free(solution);
370 } else {
371 // No need to set message as it has already been set by the
372 // numeric factorization routine above.
373 summary.termination_type = LINEAR_SOLVER_FAILURE;
374 }
375
376 event_logger.AddEvent("Teardown");
377 return summary;
378 #endif
379 }
380
381 } // namespace internal
382 } // namespace ceres
383