1 /*M///////////////////////////////////////////////////////////////////////////////////////
2 //
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
4 //
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
8 //
9 //
10 // License Agreement
11 // For Open Source Computer Vision Library
12 //
13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
14 // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
15 // Third party copyrights are property of their respective owners.
16 //
17 // Redistribution and use in source and binary forms, with or without modification,
18 // are permitted provided that the following conditions are met:
19 //
20 // * Redistribution's of source code must retain the above copyright notice,
21 // this list of conditions and the following disclaimer.
22 //
23 // * Redistribution's in binary form must reproduce the above copyright notice,
24 // this list of conditions and the following disclaimer in the documentation
25 // and/or other materials provided with the distribution.
26 //
27 // * The name of the copyright holders may not be used to endorse or promote products
28 // derived from this software without specific prior written permission.
29 //
30 // This software is provided by the copyright holders and contributors "as is" and
31 // any express or implied warranties, including, but not limited to, the implied
32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
33 // In no event shall the Intel Corporation or contributors be liable for any direct,
34 // indirect, incidental, special, exemplary, or consequential damages
35 // (including, but not limited to, procurement of substitute goods or services;
36 // loss of use, data, or profits; or business interruption) however caused
37 // and on any theory of liability, whether in contract, strict liability,
38 // or tort (including negligence or otherwise) arising in any way out of
39 // the use of this software, even if advised of the possibility of such damage.
40 //
41 //M*/
42
43 #include "precomp.hpp"
44
45 using namespace cv;
46 using namespace cv::cuda;
47
48 #if !defined HAVE_CUDA || !defined HAVE_OPENCV_CALIB3D || defined(CUDA_DISABLER)
49
transformPoints(const GpuMat &,const Mat &,const Mat &,GpuMat &,Stream &)50 void cv::cuda::transformPoints(const GpuMat&, const Mat&, const Mat&, GpuMat&, Stream&) { throw_no_cuda(); }
51
projectPoints(const GpuMat &,const Mat &,const Mat &,const Mat &,const Mat &,GpuMat &,Stream &)52 void cv::cuda::projectPoints(const GpuMat&, const Mat&, const Mat&, const Mat&, const Mat&, GpuMat&, Stream&) { throw_no_cuda(); }
53
solvePnPRansac(const Mat &,const Mat &,const Mat &,const Mat &,Mat &,Mat &,bool,int,float,int,std::vector<int> *)54 void cv::cuda::solvePnPRansac(const Mat&, const Mat&, const Mat&, const Mat&, Mat&, Mat&, bool, int, float, int, std::vector<int>*) { throw_no_cuda(); }
55
56 #else
57
58 namespace cv { namespace cuda { namespace device
59 {
60 namespace transform_points
61 {
62 void call(const PtrStepSz<float3> src, const float* rot, const float* transl, PtrStepSz<float3> dst, cudaStream_t stream);
63 }
64
65 namespace project_points
66 {
67 void call(const PtrStepSz<float3> src, const float* rot, const float* transl, const float* proj, PtrStepSz<float2> dst, cudaStream_t stream);
68 }
69
70 namespace solve_pnp_ransac
71 {
72 int maxNumIters();
73
74 void computeHypothesisScores(
75 const int num_hypotheses, const int num_points, const float* rot_matrices,
76 const float3* transl_vectors, const float3* object, const float2* image,
77 const float dist_threshold, int* hypothesis_scores);
78 }
79 }}}
80
81 using namespace ::cv::cuda::device;
82
83 namespace
84 {
transformPointsCaller(const GpuMat & src,const Mat & rvec,const Mat & tvec,GpuMat & dst,cudaStream_t stream)85 void transformPointsCaller(const GpuMat& src, const Mat& rvec, const Mat& tvec, GpuMat& dst, cudaStream_t stream)
86 {
87 CV_Assert(src.rows == 1 && src.cols > 0 && src.type() == CV_32FC3);
88 CV_Assert(rvec.size() == Size(3, 1) && rvec.type() == CV_32F);
89 CV_Assert(tvec.size() == Size(3, 1) && tvec.type() == CV_32F);
90
91 // Convert rotation vector into matrix
92 Mat rot;
93 Rodrigues(rvec, rot);
94
95 dst.create(src.size(), src.type());
96 transform_points::call(src, rot.ptr<float>(), tvec.ptr<float>(), dst, stream);
97 }
98 }
99
transformPoints(const GpuMat & src,const Mat & rvec,const Mat & tvec,GpuMat & dst,Stream & stream)100 void cv::cuda::transformPoints(const GpuMat& src, const Mat& rvec, const Mat& tvec, GpuMat& dst, Stream& stream)
101 {
102 transformPointsCaller(src, rvec, tvec, dst, StreamAccessor::getStream(stream));
103 }
104
105 namespace
106 {
projectPointsCaller(const GpuMat & src,const Mat & rvec,const Mat & tvec,const Mat & camera_mat,const Mat & dist_coef,GpuMat & dst,cudaStream_t stream)107 void projectPointsCaller(const GpuMat& src, const Mat& rvec, const Mat& tvec, const Mat& camera_mat, const Mat& dist_coef, GpuMat& dst, cudaStream_t stream)
108 {
109 CV_Assert(src.rows == 1 && src.cols > 0 && src.type() == CV_32FC3);
110 CV_Assert(rvec.size() == Size(3, 1) && rvec.type() == CV_32F);
111 CV_Assert(tvec.size() == Size(3, 1) && tvec.type() == CV_32F);
112 CV_Assert(camera_mat.size() == Size(3, 3) && camera_mat.type() == CV_32F);
113 CV_Assert(dist_coef.empty()); // Undistortion isn't supported
114
115 // Convert rotation vector into matrix
116 Mat rot;
117 Rodrigues(rvec, rot);
118
119 dst.create(src.size(), CV_32FC2);
120 project_points::call(src, rot.ptr<float>(), tvec.ptr<float>(), camera_mat.ptr<float>(), dst,stream);
121 }
122 }
123
projectPoints(const GpuMat & src,const Mat & rvec,const Mat & tvec,const Mat & camera_mat,const Mat & dist_coef,GpuMat & dst,Stream & stream)124 void cv::cuda::projectPoints(const GpuMat& src, const Mat& rvec, const Mat& tvec, const Mat& camera_mat, const Mat& dist_coef, GpuMat& dst, Stream& stream)
125 {
126 projectPointsCaller(src, rvec, tvec, camera_mat, dist_coef, dst, StreamAccessor::getStream(stream));
127 }
128
129 namespace
130 {
131 // Selects subset_size random different points from [0, num_points - 1] range
selectRandom(int subset_size,int num_points,std::vector<int> & subset)132 void selectRandom(int subset_size, int num_points, std::vector<int>& subset)
133 {
134 subset.resize(subset_size);
135 for (int i = 0; i < subset_size; ++i)
136 {
137 bool was;
138 do
139 {
140 subset[i] = rand() % num_points;
141 was = false;
142 for (int j = 0; j < i; ++j)
143 if (subset[j] == subset[i])
144 {
145 was = true;
146 break;
147 }
148 } while (was);
149 }
150 }
151
152 // Computes rotation, translation pair for small subsets if the input data
153 class TransformHypothesesGenerator : public ParallelLoopBody
154 {
155 public:
TransformHypothesesGenerator(const Mat & object_,const Mat & image_,const Mat & dist_coef_,const Mat & camera_mat_,int num_points_,int subset_size_,Mat rot_matrices_,Mat transl_vectors_)156 TransformHypothesesGenerator(const Mat& object_, const Mat& image_, const Mat& dist_coef_,
157 const Mat& camera_mat_, int num_points_, int subset_size_,
158 Mat rot_matrices_, Mat transl_vectors_)
159 : object(&object_), image(&image_), dist_coef(&dist_coef_), camera_mat(&camera_mat_),
160 num_points(num_points_), subset_size(subset_size_), rot_matrices(rot_matrices_),
161 transl_vectors(transl_vectors_) {}
162
operator ()(const Range & range) const163 void operator()(const Range& range) const
164 {
165 // Input data for generation of the current hypothesis
166 std::vector<int> subset_indices(subset_size);
167 Mat_<Point3f> object_subset(1, subset_size);
168 Mat_<Point2f> image_subset(1, subset_size);
169
170 // Current hypothesis data
171 Mat rot_vec(1, 3, CV_64F);
172 Mat rot_mat(3, 3, CV_64F);
173 Mat transl_vec(1, 3, CV_64F);
174
175 for (int iter = range.start; iter < range.end; ++iter)
176 {
177 selectRandom(subset_size, num_points, subset_indices);
178 for (int i = 0; i < subset_size; ++i)
179 {
180 object_subset(0, i) = object->at<Point3f>(subset_indices[i]);
181 image_subset(0, i) = image->at<Point2f>(subset_indices[i]);
182 }
183
184 solvePnP(object_subset, image_subset, *camera_mat, *dist_coef, rot_vec, transl_vec);
185
186 // Remember translation vector
187 Mat transl_vec_ = transl_vectors.colRange(iter * 3, (iter + 1) * 3);
188 transl_vec = transl_vec.reshape(0, 1);
189 transl_vec.convertTo(transl_vec_, CV_32F);
190
191 // Remember rotation matrix
192 Rodrigues(rot_vec, rot_mat);
193 Mat rot_mat_ = rot_matrices.colRange(iter * 9, (iter + 1) * 9).reshape(0, 3);
194 rot_mat.convertTo(rot_mat_, CV_32F);
195 }
196 }
197
198 const Mat* object;
199 const Mat* image;
200 const Mat* dist_coef;
201 const Mat* camera_mat;
202 int num_points;
203 int subset_size;
204
205 // Hypotheses storage (global)
206 Mat rot_matrices;
207 Mat transl_vectors;
208 };
209 }
210
solvePnPRansac(const Mat & object,const Mat & image,const Mat & camera_mat,const Mat & dist_coef,Mat & rvec,Mat & tvec,bool use_extrinsic_guess,int num_iters,float max_dist,int min_inlier_count,std::vector<int> * inliers)211 void cv::cuda::solvePnPRansac(const Mat& object, const Mat& image, const Mat& camera_mat,
212 const Mat& dist_coef, Mat& rvec, Mat& tvec, bool use_extrinsic_guess,
213 int num_iters, float max_dist, int min_inlier_count,
214 std::vector<int>* inliers)
215 {
216 (void)min_inlier_count;
217 CV_Assert(object.rows == 1 && object.cols > 0 && object.type() == CV_32FC3);
218 CV_Assert(image.rows == 1 && image.cols > 0 && image.type() == CV_32FC2);
219 CV_Assert(object.cols == image.cols);
220 CV_Assert(camera_mat.size() == Size(3, 3) && camera_mat.type() == CV_32F);
221 CV_Assert(!use_extrinsic_guess); // We don't support initial guess for now
222 CV_Assert(num_iters <= solve_pnp_ransac::maxNumIters());
223
224 const int subset_size = 4;
225 const int num_points = object.cols;
226 CV_Assert(num_points >= subset_size);
227
228 // Unapply distortion and intrinsic camera transformations
229 Mat eye_camera_mat = Mat::eye(3, 3, CV_32F);
230 Mat empty_dist_coef;
231 Mat image_normalized;
232 undistortPoints(image, image_normalized, camera_mat, dist_coef, Mat(), eye_camera_mat);
233
234 // Hypotheses storage (global)
235 Mat rot_matrices(1, num_iters * 9, CV_32F);
236 Mat transl_vectors(1, num_iters * 3, CV_32F);
237
238 // Generate set of hypotheses using small subsets of the input data
239 TransformHypothesesGenerator body(object, image_normalized, empty_dist_coef, eye_camera_mat,
240 num_points, subset_size, rot_matrices, transl_vectors);
241 parallel_for_(Range(0, num_iters), body);
242
243 // Compute scores (i.e. number of inliers) for each hypothesis
244 GpuMat d_object(object);
245 GpuMat d_image_normalized(image_normalized);
246 GpuMat d_hypothesis_scores(1, num_iters, CV_32S);
247 solve_pnp_ransac::computeHypothesisScores(
248 num_iters, num_points, rot_matrices.ptr<float>(), transl_vectors.ptr<float3>(),
249 d_object.ptr<float3>(), d_image_normalized.ptr<float2>(), max_dist * max_dist,
250 d_hypothesis_scores.ptr<int>());
251
252 // Find the best hypothesis index
253 Point best_idx;
254 double best_score;
255 cuda::minMaxLoc(d_hypothesis_scores, NULL, &best_score, NULL, &best_idx);
256 int num_inliers = static_cast<int>(best_score);
257
258 // Extract the best hypothesis data
259
260 Mat rot_mat = rot_matrices.colRange(best_idx.x * 9, (best_idx.x + 1) * 9).reshape(0, 3);
261 Rodrigues(rot_mat, rvec);
262 rvec = rvec.reshape(0, 1);
263
264 tvec = transl_vectors.colRange(best_idx.x * 3, (best_idx.x + 1) * 3).clone();
265 tvec = tvec.reshape(0, 1);
266
267 // Build vector of inlier indices
268 if (inliers != NULL)
269 {
270 inliers->clear();
271 inliers->reserve(num_inliers);
272
273 Point3f p, p_transf;
274 Point2f p_proj;
275 const float* rot = rot_mat.ptr<float>();
276 const float* transl = tvec.ptr<float>();
277
278 for (int i = 0; i < num_points; ++i)
279 {
280 p = object.at<Point3f>(0, i);
281 p_transf.x = rot[0] * p.x + rot[1] * p.y + rot[2] * p.z + transl[0];
282 p_transf.y = rot[3] * p.x + rot[4] * p.y + rot[5] * p.z + transl[1];
283 p_transf.z = rot[6] * p.x + rot[7] * p.y + rot[8] * p.z + transl[2];
284 p_proj.x = p_transf.x / p_transf.z;
285 p_proj.y = p_transf.y / p_transf.z;
286 if (norm(p_proj - image_normalized.at<Point2f>(0, i)) < max_dist)
287 inliers->push_back(i);
288 }
289 }
290 }
291
292 #endif
293