1 // Ceres Solver - A fast non-linear least squares minimizer
2 // Copyright 2014 Google Inc. All rights reserved.
3 // http://code.google.com/p/ceres-solver/
4 //
5 // Redistribution and use in source and binary forms, with or without
6 // modification, are permitted provided that the following conditions are met:
7 //
8 // * Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer.
10 // * Redistributions in binary form must reproduce the above copyright notice,
11 // this list of conditions and the following disclaimer in the documentation
12 // and/or other materials provided with the distribution.
13 // * Neither the name of Google Inc. nor the names of its contributors may be
14 // used to endorse or promote products derived from this software without
15 // specific prior written permission.
16 //
17 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21 // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 // POSSIBILITY OF SUCH DAMAGE.
28 //
29 // Author: sameeragarwal@google.com (Sameer Agarwal)
30
31 #include "ceres/reorder_program.h"
32
33 #include <algorithm>
34 #include <numeric>
35 #include <vector>
36
37 #include "ceres/cxsparse.h"
38 #include "ceres/internal/port.h"
39 #include "ceres/ordered_groups.h"
40 #include "ceres/parameter_block.h"
41 #include "ceres/parameter_block_ordering.h"
42 #include "ceres/problem_impl.h"
43 #include "ceres/program.h"
44 #include "ceres/program.h"
45 #include "ceres/residual_block.h"
46 #include "ceres/solver.h"
47 #include "ceres/suitesparse.h"
48 #include "ceres/triplet_sparse_matrix.h"
49 #include "ceres/types.h"
50 #include "glog/logging.h"
51
52 namespace ceres {
53 namespace internal {
54 namespace {
55
56 // Find the minimum index of any parameter block to the given residual.
57 // Parameter blocks that have indices greater than num_eliminate_blocks are
58 // considered to have an index equal to num_eliminate_blocks.
MinParameterBlock(const ResidualBlock * residual_block,int num_eliminate_blocks)59 static int MinParameterBlock(const ResidualBlock* residual_block,
60 int num_eliminate_blocks) {
61 int min_parameter_block_position = num_eliminate_blocks;
62 for (int i = 0; i < residual_block->NumParameterBlocks(); ++i) {
63 ParameterBlock* parameter_block = residual_block->parameter_blocks()[i];
64 if (!parameter_block->IsConstant()) {
65 CHECK_NE(parameter_block->index(), -1)
66 << "Did you forget to call Program::SetParameterOffsetsAndIndex()? "
67 << "This is a Ceres bug; please contact the developers!";
68 min_parameter_block_position = std::min(parameter_block->index(),
69 min_parameter_block_position);
70 }
71 }
72 return min_parameter_block_position;
73 }
74
OrderingForSparseNormalCholeskyUsingSuiteSparse(const TripletSparseMatrix & tsm_block_jacobian_transpose,const vector<ParameterBlock * > & parameter_blocks,const ParameterBlockOrdering & parameter_block_ordering,int * ordering)75 void OrderingForSparseNormalCholeskyUsingSuiteSparse(
76 const TripletSparseMatrix& tsm_block_jacobian_transpose,
77 const vector<ParameterBlock*>& parameter_blocks,
78 const ParameterBlockOrdering& parameter_block_ordering,
79 int* ordering) {
80 #ifdef CERES_NO_SUITESPARSE
81 LOG(FATAL) << "Congratulations, you found a Ceres bug! "
82 << "Please report this error to the developers.";
83 #else
84 SuiteSparse ss;
85 cholmod_sparse* block_jacobian_transpose =
86 ss.CreateSparseMatrix(
87 const_cast<TripletSparseMatrix*>(&tsm_block_jacobian_transpose));
88
89 // No CAMD or the user did not supply a useful ordering, then just
90 // use regular AMD.
91 if (parameter_block_ordering.NumGroups() <= 1 ||
92 !SuiteSparse::IsConstrainedApproximateMinimumDegreeOrderingAvailable()) {
93 ss.ApproximateMinimumDegreeOrdering(block_jacobian_transpose, &ordering[0]);
94 } else {
95 vector<int> constraints;
96 for (int i = 0; i < parameter_blocks.size(); ++i) {
97 constraints.push_back(
98 parameter_block_ordering.GroupId(
99 parameter_blocks[i]->mutable_user_state()));
100 }
101 ss.ConstrainedApproximateMinimumDegreeOrdering(block_jacobian_transpose,
102 &constraints[0],
103 ordering);
104 }
105
106 ss.Free(block_jacobian_transpose);
107 #endif // CERES_NO_SUITESPARSE
108 }
109
OrderingForSparseNormalCholeskyUsingCXSparse(const TripletSparseMatrix & tsm_block_jacobian_transpose,int * ordering)110 void OrderingForSparseNormalCholeskyUsingCXSparse(
111 const TripletSparseMatrix& tsm_block_jacobian_transpose,
112 int* ordering) {
113 #ifdef CERES_NO_CXSPARSE
114 LOG(FATAL) << "Congratulations, you found a Ceres bug! "
115 << "Please report this error to the developers.";
116 #else // CERES_NO_CXSPARSE
117 // CXSparse works with J'J instead of J'. So compute the block
118 // sparsity for J'J and compute an approximate minimum degree
119 // ordering.
120 CXSparse cxsparse;
121 cs_di* block_jacobian_transpose;
122 block_jacobian_transpose =
123 cxsparse.CreateSparseMatrix(
124 const_cast<TripletSparseMatrix*>(&tsm_block_jacobian_transpose));
125 cs_di* block_jacobian = cxsparse.TransposeMatrix(block_jacobian_transpose);
126 cs_di* block_hessian =
127 cxsparse.MatrixMatrixMultiply(block_jacobian_transpose, block_jacobian);
128 cxsparse.Free(block_jacobian);
129 cxsparse.Free(block_jacobian_transpose);
130
131 cxsparse.ApproximateMinimumDegreeOrdering(block_hessian, ordering);
132 cxsparse.Free(block_hessian);
133 #endif // CERES_NO_CXSPARSE
134 }
135
136 } // namespace
137
ApplyOrdering(const ProblemImpl::ParameterMap & parameter_map,const ParameterBlockOrdering & ordering,Program * program,string * error)138 bool ApplyOrdering(const ProblemImpl::ParameterMap& parameter_map,
139 const ParameterBlockOrdering& ordering,
140 Program* program,
141 string* error) {
142 const int num_parameter_blocks = program->NumParameterBlocks();
143 if (ordering.NumElements() != num_parameter_blocks) {
144 *error = StringPrintf("User specified ordering does not have the same "
145 "number of parameters as the problem. The problem"
146 "has %d blocks while the ordering has %d blocks.",
147 num_parameter_blocks,
148 ordering.NumElements());
149 return false;
150 }
151
152 vector<ParameterBlock*>* parameter_blocks =
153 program->mutable_parameter_blocks();
154 parameter_blocks->clear();
155
156 const map<int, set<double*> >& groups =
157 ordering.group_to_elements();
158
159 for (map<int, set<double*> >::const_iterator group_it = groups.begin();
160 group_it != groups.end();
161 ++group_it) {
162 const set<double*>& group = group_it->second;
163 for (set<double*>::const_iterator parameter_block_ptr_it = group.begin();
164 parameter_block_ptr_it != group.end();
165 ++parameter_block_ptr_it) {
166 ProblemImpl::ParameterMap::const_iterator parameter_block_it =
167 parameter_map.find(*parameter_block_ptr_it);
168 if (parameter_block_it == parameter_map.end()) {
169 *error = StringPrintf("User specified ordering contains a pointer "
170 "to a double that is not a parameter block in "
171 "the problem. The invalid double is in group: %d",
172 group_it->first);
173 return false;
174 }
175 parameter_blocks->push_back(parameter_block_it->second);
176 }
177 }
178 return true;
179 }
180
LexicographicallyOrderResidualBlocks(const int num_eliminate_blocks,Program * program,string * error)181 bool LexicographicallyOrderResidualBlocks(const int num_eliminate_blocks,
182 Program* program,
183 string* error) {
184 CHECK_GE(num_eliminate_blocks, 1)
185 << "Congratulations, you found a Ceres bug! Please report this error "
186 << "to the developers.";
187
188 // Create a histogram of the number of residuals for each E block. There is an
189 // extra bucket at the end to catch all non-eliminated F blocks.
190 vector<int> residual_blocks_per_e_block(num_eliminate_blocks + 1);
191 vector<ResidualBlock*>* residual_blocks = program->mutable_residual_blocks();
192 vector<int> min_position_per_residual(residual_blocks->size());
193 for (int i = 0; i < residual_blocks->size(); ++i) {
194 ResidualBlock* residual_block = (*residual_blocks)[i];
195 int position = MinParameterBlock(residual_block, num_eliminate_blocks);
196 min_position_per_residual[i] = position;
197 DCHECK_LE(position, num_eliminate_blocks);
198 residual_blocks_per_e_block[position]++;
199 }
200
201 // Run a cumulative sum on the histogram, to obtain offsets to the start of
202 // each histogram bucket (where each bucket is for the residuals for that
203 // E-block).
204 vector<int> offsets(num_eliminate_blocks + 1);
205 std::partial_sum(residual_blocks_per_e_block.begin(),
206 residual_blocks_per_e_block.end(),
207 offsets.begin());
208 CHECK_EQ(offsets.back(), residual_blocks->size())
209 << "Congratulations, you found a Ceres bug! Please report this error "
210 << "to the developers.";
211
212 CHECK(find(residual_blocks_per_e_block.begin(),
213 residual_blocks_per_e_block.end() - 1, 0) !=
214 residual_blocks_per_e_block.end())
215 << "Congratulations, you found a Ceres bug! Please report this error "
216 << "to the developers.";
217
218 // Fill in each bucket with the residual blocks for its corresponding E block.
219 // Each bucket is individually filled from the back of the bucket to the front
220 // of the bucket. The filling order among the buckets is dictated by the
221 // residual blocks. This loop uses the offsets as counters; subtracting one
222 // from each offset as a residual block is placed in the bucket. When the
223 // filling is finished, the offset pointerts should have shifted down one
224 // entry (this is verified below).
225 vector<ResidualBlock*> reordered_residual_blocks(
226 (*residual_blocks).size(), static_cast<ResidualBlock*>(NULL));
227 for (int i = 0; i < residual_blocks->size(); ++i) {
228 int bucket = min_position_per_residual[i];
229
230 // Decrement the cursor, which should now point at the next empty position.
231 offsets[bucket]--;
232
233 // Sanity.
234 CHECK(reordered_residual_blocks[offsets[bucket]] == NULL)
235 << "Congratulations, you found a Ceres bug! Please report this error "
236 << "to the developers.";
237
238 reordered_residual_blocks[offsets[bucket]] = (*residual_blocks)[i];
239 }
240
241 // Sanity check #1: The difference in bucket offsets should match the
242 // histogram sizes.
243 for (int i = 0; i < num_eliminate_blocks; ++i) {
244 CHECK_EQ(residual_blocks_per_e_block[i], offsets[i + 1] - offsets[i])
245 << "Congratulations, you found a Ceres bug! Please report this error "
246 << "to the developers.";
247 }
248 // Sanity check #2: No NULL's left behind.
249 for (int i = 0; i < reordered_residual_blocks.size(); ++i) {
250 CHECK(reordered_residual_blocks[i] != NULL)
251 << "Congratulations, you found a Ceres bug! Please report this error "
252 << "to the developers.";
253 }
254
255 // Now that the residuals are collected by E block, swap them in place.
256 swap(*program->mutable_residual_blocks(), reordered_residual_blocks);
257 return true;
258 }
259
MaybeReorderSchurComplementColumnsUsingSuiteSparse(const ParameterBlockOrdering & parameter_block_ordering,Program * program)260 void MaybeReorderSchurComplementColumnsUsingSuiteSparse(
261 const ParameterBlockOrdering& parameter_block_ordering,
262 Program* program) {
263 // Pre-order the columns corresponding to the schur complement if
264 // possible.
265 #ifndef CERES_NO_SUITESPARSE
266 SuiteSparse ss;
267 if (!SuiteSparse::IsConstrainedApproximateMinimumDegreeOrderingAvailable()) {
268 return;
269 }
270
271 vector<int> constraints;
272 vector<ParameterBlock*>& parameter_blocks =
273 *(program->mutable_parameter_blocks());
274
275 for (int i = 0; i < parameter_blocks.size(); ++i) {
276 constraints.push_back(
277 parameter_block_ordering.GroupId(
278 parameter_blocks[i]->mutable_user_state()));
279 }
280
281 // Renumber the entries of constraints to be contiguous integers
282 // as camd requires that the group ids be in the range [0,
283 // parameter_blocks.size() - 1].
284 MapValuesToContiguousRange(constraints.size(), &constraints[0]);
285
286 // Set the offsets and index for CreateJacobianSparsityTranspose.
287 program->SetParameterOffsetsAndIndex();
288 // Compute a block sparse presentation of J'.
289 scoped_ptr<TripletSparseMatrix> tsm_block_jacobian_transpose(
290 program->CreateJacobianBlockSparsityTranspose());
291
292
293 cholmod_sparse* block_jacobian_transpose =
294 ss.CreateSparseMatrix(tsm_block_jacobian_transpose.get());
295
296 vector<int> ordering(parameter_blocks.size(), 0);
297 ss.ConstrainedApproximateMinimumDegreeOrdering(block_jacobian_transpose,
298 &constraints[0],
299 &ordering[0]);
300 ss.Free(block_jacobian_transpose);
301
302 const vector<ParameterBlock*> parameter_blocks_copy(parameter_blocks);
303 for (int i = 0; i < program->NumParameterBlocks(); ++i) {
304 parameter_blocks[i] = parameter_blocks_copy[ordering[i]];
305 }
306 #endif
307 }
308
ReorderProgramForSchurTypeLinearSolver(const LinearSolverType linear_solver_type,const SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type,const ProblemImpl::ParameterMap & parameter_map,ParameterBlockOrdering * parameter_block_ordering,Program * program,string * error)309 bool ReorderProgramForSchurTypeLinearSolver(
310 const LinearSolverType linear_solver_type,
311 const SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type,
312 const ProblemImpl::ParameterMap& parameter_map,
313 ParameterBlockOrdering* parameter_block_ordering,
314 Program* program,
315 string* error) {
316 if (parameter_block_ordering->NumGroups() == 1) {
317 // If the user supplied an parameter_block_ordering with just one
318 // group, it is equivalent to the user supplying NULL as an
319 // parameter_block_ordering. Ceres is completely free to choose the
320 // parameter block ordering as it sees fit. For Schur type solvers,
321 // this means that the user wishes for Ceres to identify the
322 // e_blocks, which we do by computing a maximal independent set.
323 vector<ParameterBlock*> schur_ordering;
324 const int num_eliminate_blocks =
325 ComputeStableSchurOrdering(*program, &schur_ordering);
326
327 CHECK_EQ(schur_ordering.size(), program->NumParameterBlocks())
328 << "Congratulations, you found a Ceres bug! Please report this error "
329 << "to the developers.";
330
331 // Update the parameter_block_ordering object.
332 for (int i = 0; i < schur_ordering.size(); ++i) {
333 double* parameter_block = schur_ordering[i]->mutable_user_state();
334 const int group_id = (i < num_eliminate_blocks) ? 0 : 1;
335 parameter_block_ordering->AddElementToGroup(parameter_block, group_id);
336 }
337
338 // We could call ApplyOrdering but this is cheaper and
339 // simpler.
340 swap(*program->mutable_parameter_blocks(), schur_ordering);
341 } else {
342 // The user provided an ordering with more than one elimination
343 // group. Trust the user and apply the ordering.
344 if (!ApplyOrdering(parameter_map,
345 *parameter_block_ordering,
346 program,
347 error)) {
348 return false;
349 }
350 }
351
352 if (linear_solver_type == SPARSE_SCHUR &&
353 sparse_linear_algebra_library_type == SUITE_SPARSE) {
354 MaybeReorderSchurComplementColumnsUsingSuiteSparse(
355 *parameter_block_ordering,
356 program);
357 }
358
359 program->SetParameterOffsetsAndIndex();
360 // Schur type solvers also require that their residual blocks be
361 // lexicographically ordered.
362 const int num_eliminate_blocks =
363 parameter_block_ordering->group_to_elements().begin()->second.size();
364 if (!LexicographicallyOrderResidualBlocks(num_eliminate_blocks,
365 program,
366 error)) {
367 return false;
368 }
369
370 program->SetParameterOffsetsAndIndex();
371 return true;
372 }
373
ReorderProgramForSparseNormalCholesky(const SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type,const ParameterBlockOrdering & parameter_block_ordering,Program * program,string * error)374 bool ReorderProgramForSparseNormalCholesky(
375 const SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type,
376 const ParameterBlockOrdering& parameter_block_ordering,
377 Program* program,
378 string* error) {
379
380 if (sparse_linear_algebra_library_type != SUITE_SPARSE &&
381 sparse_linear_algebra_library_type != CX_SPARSE &&
382 sparse_linear_algebra_library_type != EIGEN_SPARSE) {
383 *error = "Unknown sparse linear algebra library.";
384 return false;
385 }
386
387 // For Eigen, there is nothing to do. This is because Eigen in its
388 // current stable version does not expose a method for doing
389 // symbolic analysis on pre-ordered matrices, so a block
390 // pre-ordering is a bit pointless.
391 //
392 // The dev version as recently as July 20, 2014 has support for
393 // pre-ordering. Once this becomes more widespread, or we add
394 // support for detecting Eigen versions, we can add support for this
395 // along the lines of CXSparse.
396 if (sparse_linear_algebra_library_type == EIGEN_SPARSE) {
397 program->SetParameterOffsetsAndIndex();
398 return true;
399 }
400
401 // Set the offsets and index for CreateJacobianSparsityTranspose.
402 program->SetParameterOffsetsAndIndex();
403 // Compute a block sparse presentation of J'.
404 scoped_ptr<TripletSparseMatrix> tsm_block_jacobian_transpose(
405 program->CreateJacobianBlockSparsityTranspose());
406
407 vector<int> ordering(program->NumParameterBlocks(), 0);
408 vector<ParameterBlock*>& parameter_blocks =
409 *(program->mutable_parameter_blocks());
410
411 if (sparse_linear_algebra_library_type == SUITE_SPARSE) {
412 OrderingForSparseNormalCholeskyUsingSuiteSparse(
413 *tsm_block_jacobian_transpose,
414 parameter_blocks,
415 parameter_block_ordering,
416 &ordering[0]);
417 } else if (sparse_linear_algebra_library_type == CX_SPARSE){
418 OrderingForSparseNormalCholeskyUsingCXSparse(
419 *tsm_block_jacobian_transpose,
420 &ordering[0]);
421 }
422
423 // Apply ordering.
424 const vector<ParameterBlock*> parameter_blocks_copy(parameter_blocks);
425 for (int i = 0; i < program->NumParameterBlocks(); ++i) {
426 parameter_blocks[i] = parameter_blocks_copy[ordering[i]];
427 }
428
429 program->SetParameterOffsetsAndIndex();
430 return true;
431 }
432
433 } // namespace internal
434 } // namespace ceres
435