1 // Ceres Solver - A fast non-linear least squares minimizer
2 // Copyright 2012 Google Inc. All rights reserved.
3 // http://code.google.com/p/ceres-solver/
4 //
5 // Redistribution and use in source and binary forms, with or without
6 // modification, are permitted provided that the following conditions are met:
7 //
8 // * Redistributions of source code must retain the above copyright notice,
9 //   this list of conditions and the following disclaimer.
10 // * Redistributions in binary form must reproduce the above copyright notice,
11 //   this list of conditions and the following disclaimer in the documentation
12 //   and/or other materials provided with the distribution.
13 // * Neither the name of Google Inc. nor the names of its contributors may be
14 //   used to endorse or promote products derived from this software without
15 //   specific prior written permission.
16 //
17 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21 // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 // POSSIBILITY OF SUCH DAMAGE.
28 //
29 // Author: keir@google.com (Keir Mierle)
30 
31 #include "ceres/block_jacobi_preconditioner.h"
32 
33 #include "Eigen/Cholesky"
34 #include "ceres/block_sparse_matrix.h"
35 #include "ceres/block_structure.h"
36 #include "ceres/casts.h"
37 #include "ceres/integral_types.h"
38 #include "ceres/internal/eigen.h"
39 
40 namespace ceres {
41 namespace internal {
42 
BlockJacobiPreconditioner(const BlockSparseMatrix & A)43 BlockJacobiPreconditioner::BlockJacobiPreconditioner(
44     const BlockSparseMatrix& A)
45     : num_rows_(A.num_rows()),
46       block_structure_(*A.block_structure()) {
47   // Calculate the amount of storage needed.
48   int storage_needed = 0;
49   for (int c = 0; c < block_structure_.cols.size(); ++c) {
50     int size = block_structure_.cols[c].size;
51     storage_needed += size * size;
52   }
53 
54   // Size the offsets and storage.
55   blocks_.resize(block_structure_.cols.size());
56   block_storage_.resize(storage_needed);
57 
58   // Put pointers to the storage in the offsets.
59   double* block_cursor = &block_storage_[0];
60   for (int c = 0; c < block_structure_.cols.size(); ++c) {
61     int size = block_structure_.cols[c].size;
62     blocks_[c] = block_cursor;
63     block_cursor += size * size;
64   }
65 }
66 
~BlockJacobiPreconditioner()67 BlockJacobiPreconditioner::~BlockJacobiPreconditioner() {}
68 
UpdateImpl(const BlockSparseMatrix & A,const double * D)69 bool BlockJacobiPreconditioner::UpdateImpl(const BlockSparseMatrix& A,
70                                            const double* D) {
71   const CompressedRowBlockStructure* bs = A.block_structure();
72 
73   // Compute the diagonal blocks by block inner products.
74   std::fill(block_storage_.begin(), block_storage_.end(), 0.0);
75   const double* values = A.values();
76   for (int r = 0; r < bs->rows.size(); ++r) {
77     const int row_block_size = bs->rows[r].block.size;
78     const vector<Cell>& cells = bs->rows[r].cells;
79     for (int c = 0; c < cells.size(); ++c) {
80       const int col_block_size = bs->cols[cells[c].block_id].size;
81       ConstMatrixRef m(values + cells[c].position,
82                        row_block_size,
83                        col_block_size);
84 
85       MatrixRef(blocks_[cells[c].block_id],
86                 col_block_size,
87                 col_block_size).noalias() += m.transpose() * m;
88 
89       // TODO(keir): Figure out when the below expression is actually faster
90       // than doing the full rank update. The issue is that for smaller sizes,
91       // the rankUpdate() function is slower than the full product done above.
92       //
93       // On the typical bundling problems, the above product is ~5% faster.
94       //
95       //   MatrixRef(blocks_[cells[c].block_id],
96       //             col_block_size,
97       //             col_block_size)
98       //      .selfadjointView<Eigen::Upper>()
99       //      .rankUpdate(m);
100       //
101     }
102   }
103 
104   // Add the diagonal and invert each block.
105   for (int c = 0; c < bs->cols.size(); ++c) {
106     const int size = block_structure_.cols[c].size;
107     const int position = block_structure_.cols[c].position;
108     MatrixRef block(blocks_[c], size, size);
109 
110     if (D != NULL) {
111       block.diagonal() +=
112           ConstVectorRef(D + position, size).array().square().matrix();
113     }
114 
115     block = block.selfadjointView<Eigen::Upper>()
116                  .llt()
117                  .solve(Matrix::Identity(size, size));
118   }
119   return true;
120 }
121 
RightMultiply(const double * x,double * y) const122 void BlockJacobiPreconditioner::RightMultiply(const double* x,
123                                               double* y) const {
124   for (int c = 0; c < block_structure_.cols.size(); ++c) {
125     const int size = block_structure_.cols[c].size;
126     const int position = block_structure_.cols[c].position;
127     ConstMatrixRef D(blocks_[c], size, size);
128     ConstVectorRef x_block(x + position, size);
129     VectorRef y_block(y + position, size);
130     y_block += D * x_block;
131   }
132 }
133 
LeftMultiply(const double * x,double * y) const134 void BlockJacobiPreconditioner::LeftMultiply(const double* x, double* y) const {
135   RightMultiply(x, y);
136 }
137 
138 }  // namespace internal
139 }  // namespace ceres
140