1 /*M///////////////////////////////////////////////////////////////////////////////////////
2 //
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
4 //
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
8 //
9 //
10 // License Agreement
11 // For Open Source Computer Vision Library
12 //
13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
14 // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
15 // Third party copyrights are property of their respective owners.
16 //
17 // Redistribution and use in source and binary forms, with or without modification,
18 // are permitted provided that the following conditions are met:
19 //
20 // * Redistribution's of source code must retain the above copyright notice,
21 // this list of conditions and the following disclaimer.
22 //
23 // * Redistribution's in binary form must reproduce the above copyright notice,
24 // this list of conditions and the following disclaimer in the documentation
25 // and/or other materials provided with the distribution.
26 //
27 // * The name of the copyright holders may not be used to endorse or promote products
28 // derived from this software without specific prior written permission.
29 //
30 // This software is provided by the copyright holders and contributors "as is" and
31 // any express or implied warranties, including, but not limited to, the implied
32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
33 // In no event shall the Intel Corporation or contributors be liable for any direct,
34 // indirect, incidental, special, exemplary, or consequential damages
35 // (including, but not limited to, procurement of substitute goods or services;
36 // loss of use, data, or profits; or business interruption) however caused
37 // and on any theory of liability, whether in contract, strict liability,
38 // or tort (including negligence or otherwise) arising in any way out of
39 // the use of this software, even if advised of the possibility of such damage.
40 //
41 //M*/
42
43 #include "precomp.hpp"
44
45 namespace cv {
46
createDefault(bool try_use_gpu)47 Stitcher Stitcher::createDefault(bool try_use_gpu)
48 {
49 Stitcher stitcher;
50 stitcher.setRegistrationResol(0.6);
51 stitcher.setSeamEstimationResol(0.1);
52 stitcher.setCompositingResol(ORIG_RESOL);
53 stitcher.setPanoConfidenceThresh(1);
54 stitcher.setWaveCorrection(true);
55 stitcher.setWaveCorrectKind(detail::WAVE_CORRECT_HORIZ);
56 stitcher.setFeaturesMatcher(makePtr<detail::BestOf2NearestMatcher>(try_use_gpu));
57 stitcher.setBundleAdjuster(makePtr<detail::BundleAdjusterRay>());
58
59 #ifdef HAVE_CUDA
60 if (try_use_gpu && cuda::getCudaEnabledDeviceCount() > 0)
61 {
62 #ifdef HAVE_OPENCV_XFEATURES2D
63 stitcher.setFeaturesFinder(makePtr<detail::SurfFeaturesFinderGpu>());
64 #else
65 stitcher.setFeaturesFinder(makePtr<detail::OrbFeaturesFinder>());
66 #endif
67 stitcher.setWarper(makePtr<SphericalWarperGpu>());
68 stitcher.setSeamFinder(makePtr<detail::GraphCutSeamFinderGpu>());
69 }
70 else
71 #endif
72 {
73 #ifdef HAVE_OPENCV_XFEATURES2D
74 stitcher.setFeaturesFinder(makePtr<detail::SurfFeaturesFinder>());
75 #else
76 stitcher.setFeaturesFinder(makePtr<detail::OrbFeaturesFinder>());
77 #endif
78 stitcher.setWarper(makePtr<SphericalWarper>());
79 stitcher.setSeamFinder(makePtr<detail::GraphCutSeamFinder>(detail::GraphCutSeamFinderBase::COST_COLOR));
80 }
81
82 stitcher.setExposureCompensator(makePtr<detail::BlocksGainCompensator>());
83 stitcher.setBlender(makePtr<detail::MultiBandBlender>(try_use_gpu));
84
85 return stitcher;
86 }
87
88
estimateTransform(InputArrayOfArrays images)89 Stitcher::Status Stitcher::estimateTransform(InputArrayOfArrays images)
90 {
91 return estimateTransform(images, std::vector<std::vector<Rect> >());
92 }
93
94
estimateTransform(InputArrayOfArrays images,const std::vector<std::vector<Rect>> & rois)95 Stitcher::Status Stitcher::estimateTransform(InputArrayOfArrays images, const std::vector<std::vector<Rect> > &rois)
96 {
97 images.getUMatVector(imgs_);
98 rois_ = rois;
99
100 Status status;
101
102 if ((status = matchImages()) != OK)
103 return status;
104
105 if ((status = estimateCameraParams()) != OK)
106 return status;
107
108 return OK;
109 }
110
111
112
composePanorama(OutputArray pano)113 Stitcher::Status Stitcher::composePanorama(OutputArray pano)
114 {
115 return composePanorama(std::vector<UMat>(), pano);
116 }
117
118
composePanorama(InputArrayOfArrays images,OutputArray pano)119 Stitcher::Status Stitcher::composePanorama(InputArrayOfArrays images, OutputArray pano)
120 {
121 LOGLN("Warping images (auxiliary)... ");
122
123 std::vector<UMat> imgs;
124 images.getUMatVector(imgs);
125 if (!imgs.empty())
126 {
127 CV_Assert(imgs.size() == imgs_.size());
128
129 UMat img;
130 seam_est_imgs_.resize(imgs.size());
131
132 for (size_t i = 0; i < imgs.size(); ++i)
133 {
134 imgs_[i] = imgs[i];
135 resize(imgs[i], img, Size(), seam_scale_, seam_scale_);
136 seam_est_imgs_[i] = img.clone();
137 }
138
139 std::vector<UMat> seam_est_imgs_subset;
140 std::vector<UMat> imgs_subset;
141
142 for (size_t i = 0; i < indices_.size(); ++i)
143 {
144 imgs_subset.push_back(imgs_[indices_[i]]);
145 seam_est_imgs_subset.push_back(seam_est_imgs_[indices_[i]]);
146 }
147
148 seam_est_imgs_ = seam_est_imgs_subset;
149 imgs_ = imgs_subset;
150 }
151
152 UMat pano_;
153
154 #if ENABLE_LOG
155 int64 t = getTickCount();
156 #endif
157
158 std::vector<Point> corners(imgs_.size());
159 std::vector<UMat> masks_warped(imgs_.size());
160 std::vector<UMat> images_warped(imgs_.size());
161 std::vector<Size> sizes(imgs_.size());
162 std::vector<UMat> masks(imgs_.size());
163
164 // Prepare image masks
165 for (size_t i = 0; i < imgs_.size(); ++i)
166 {
167 masks[i].create(seam_est_imgs_[i].size(), CV_8U);
168 masks[i].setTo(Scalar::all(255));
169 }
170
171 // Warp images and their masks
172 Ptr<detail::RotationWarper> w = warper_->create(float(warped_image_scale_ * seam_work_aspect_));
173 for (size_t i = 0; i < imgs_.size(); ++i)
174 {
175 Mat_<float> K;
176 cameras_[i].K().convertTo(K, CV_32F);
177 K(0,0) *= (float)seam_work_aspect_;
178 K(0,2) *= (float)seam_work_aspect_;
179 K(1,1) *= (float)seam_work_aspect_;
180 K(1,2) *= (float)seam_work_aspect_;
181
182 corners[i] = w->warp(seam_est_imgs_[i], K, cameras_[i].R, INTER_LINEAR, BORDER_CONSTANT, images_warped[i]);
183 sizes[i] = images_warped[i].size();
184
185 w->warp(masks[i], K, cameras_[i].R, INTER_NEAREST, BORDER_CONSTANT, masks_warped[i]);
186 }
187
188 std::vector<UMat> images_warped_f(imgs_.size());
189 for (size_t i = 0; i < imgs_.size(); ++i)
190 images_warped[i].convertTo(images_warped_f[i], CV_32F);
191
192 LOGLN("Warping images, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
193
194 // Find seams
195 exposure_comp_->feed(corners, images_warped, masks_warped);
196 seam_finder_->find(images_warped_f, corners, masks_warped);
197
198 // Release unused memory
199 seam_est_imgs_.clear();
200 images_warped.clear();
201 images_warped_f.clear();
202 masks.clear();
203
204 LOGLN("Compositing...");
205 #if ENABLE_LOG
206 t = getTickCount();
207 #endif
208
209 UMat img_warped, img_warped_s;
210 UMat dilated_mask, seam_mask, mask, mask_warped;
211
212 //double compose_seam_aspect = 1;
213 double compose_work_aspect = 1;
214 bool is_blender_prepared = false;
215
216 double compose_scale = 1;
217 bool is_compose_scale_set = false;
218
219 UMat full_img, img;
220 for (size_t img_idx = 0; img_idx < imgs_.size(); ++img_idx)
221 {
222 LOGLN("Compositing image #" << indices_[img_idx] + 1);
223 #if ENABLE_LOG
224 int64 compositing_t = getTickCount();
225 #endif
226
227 // Read image and resize it if necessary
228 full_img = imgs_[img_idx];
229 if (!is_compose_scale_set)
230 {
231 if (compose_resol_ > 0)
232 compose_scale = std::min(1.0, std::sqrt(compose_resol_ * 1e6 / full_img.size().area()));
233 is_compose_scale_set = true;
234
235 // Compute relative scales
236 //compose_seam_aspect = compose_scale / seam_scale_;
237 compose_work_aspect = compose_scale / work_scale_;
238
239 // Update warped image scale
240 warped_image_scale_ *= static_cast<float>(compose_work_aspect);
241 w = warper_->create((float)warped_image_scale_);
242
243 // Update corners and sizes
244 for (size_t i = 0; i < imgs_.size(); ++i)
245 {
246 // Update intrinsics
247 cameras_[i].focal *= compose_work_aspect;
248 cameras_[i].ppx *= compose_work_aspect;
249 cameras_[i].ppy *= compose_work_aspect;
250
251 // Update corner and size
252 Size sz = full_img_sizes_[i];
253 if (std::abs(compose_scale - 1) > 1e-1)
254 {
255 sz.width = cvRound(full_img_sizes_[i].width * compose_scale);
256 sz.height = cvRound(full_img_sizes_[i].height * compose_scale);
257 }
258
259 Mat K;
260 cameras_[i].K().convertTo(K, CV_32F);
261 Rect roi = w->warpRoi(sz, K, cameras_[i].R);
262 corners[i] = roi.tl();
263 sizes[i] = roi.size();
264 }
265 }
266 if (std::abs(compose_scale - 1) > 1e-1)
267 {
268 #if ENABLE_LOG
269 int64 resize_t = getTickCount();
270 #endif
271 resize(full_img, img, Size(), compose_scale, compose_scale);
272 LOGLN(" resize time: " << ((getTickCount() - resize_t) / getTickFrequency()) << " sec");
273 }
274 else
275 img = full_img;
276 full_img.release();
277 Size img_size = img.size();
278
279 LOGLN(" after resize time: " << ((getTickCount() - compositing_t) / getTickFrequency()) << " sec");
280
281 Mat K;
282 cameras_[img_idx].K().convertTo(K, CV_32F);
283
284 #if ENABLE_LOG
285 int64 pt = getTickCount();
286 #endif
287 // Warp the current image
288 w->warp(img, K, cameras_[img_idx].R, INTER_LINEAR, BORDER_CONSTANT, img_warped);
289 LOGLN(" warp the current image: " << ((getTickCount() - pt) / getTickFrequency()) << " sec");
290 #if ENABLE_LOG
291 pt = getTickCount();
292 #endif
293
294 // Warp the current image mask
295 mask.create(img_size, CV_8U);
296 mask.setTo(Scalar::all(255));
297 w->warp(mask, K, cameras_[img_idx].R, INTER_NEAREST, BORDER_CONSTANT, mask_warped);
298 LOGLN(" warp the current image mask: " << ((getTickCount() - pt) / getTickFrequency()) << " sec");
299 #if ENABLE_LOG
300 pt = getTickCount();
301 #endif
302
303 // Compensate exposure
304 exposure_comp_->apply((int)img_idx, corners[img_idx], img_warped, mask_warped);
305 LOGLN(" compensate exposure: " << ((getTickCount() - pt) / getTickFrequency()) << " sec");
306 #if ENABLE_LOG
307 pt = getTickCount();
308 #endif
309
310 img_warped.convertTo(img_warped_s, CV_16S);
311 img_warped.release();
312 img.release();
313 mask.release();
314
315 // Make sure seam mask has proper size
316 dilate(masks_warped[img_idx], dilated_mask, Mat());
317 resize(dilated_mask, seam_mask, mask_warped.size());
318
319 bitwise_and(seam_mask, mask_warped, mask_warped);
320
321 LOGLN(" other: " << ((getTickCount() - pt) / getTickFrequency()) << " sec");
322 #if ENABLE_LOG
323 pt = getTickCount();
324 #endif
325
326 if (!is_blender_prepared)
327 {
328 blender_->prepare(corners, sizes);
329 is_blender_prepared = true;
330 }
331
332 LOGLN(" other2: " << ((getTickCount() - pt) / getTickFrequency()) << " sec");
333
334 LOGLN(" feed...");
335 #if ENABLE_LOG
336 int64 feed_t = getTickCount();
337 #endif
338 // Blend the current image
339 blender_->feed(img_warped_s, mask_warped, corners[img_idx]);
340 LOGLN(" feed time: " << ((getTickCount() - feed_t) / getTickFrequency()) << " sec");
341 LOGLN("Compositing ## time: " << ((getTickCount() - compositing_t) / getTickFrequency()) << " sec");
342 }
343
344 #if ENABLE_LOG
345 int64 blend_t = getTickCount();
346 #endif
347 UMat result, result_mask;
348 blender_->blend(result, result_mask);
349 LOGLN("blend time: " << ((getTickCount() - blend_t) / getTickFrequency()) << " sec");
350
351 LOGLN("Compositing, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
352
353 // Preliminary result is in CV_16SC3 format, but all values are in [0,255] range,
354 // so convert it to avoid user confusing
355 result.convertTo(pano, CV_8U);
356
357 return OK;
358 }
359
360
stitch(InputArrayOfArrays images,OutputArray pano)361 Stitcher::Status Stitcher::stitch(InputArrayOfArrays images, OutputArray pano)
362 {
363 Status status = estimateTransform(images);
364 if (status != OK)
365 return status;
366 return composePanorama(pano);
367 }
368
369
stitch(InputArrayOfArrays images,const std::vector<std::vector<Rect>> & rois,OutputArray pano)370 Stitcher::Status Stitcher::stitch(InputArrayOfArrays images, const std::vector<std::vector<Rect> > &rois, OutputArray pano)
371 {
372 Status status = estimateTransform(images, rois);
373 if (status != OK)
374 return status;
375 return composePanorama(pano);
376 }
377
378
matchImages()379 Stitcher::Status Stitcher::matchImages()
380 {
381 if ((int)imgs_.size() < 2)
382 {
383 LOGLN("Need more images");
384 return ERR_NEED_MORE_IMGS;
385 }
386
387 work_scale_ = 1;
388 seam_work_aspect_ = 1;
389 seam_scale_ = 1;
390 bool is_work_scale_set = false;
391 bool is_seam_scale_set = false;
392 UMat full_img, img;
393 features_.resize(imgs_.size());
394 seam_est_imgs_.resize(imgs_.size());
395 full_img_sizes_.resize(imgs_.size());
396
397 LOGLN("Finding features...");
398 #if ENABLE_LOG
399 int64 t = getTickCount();
400 #endif
401
402 for (size_t i = 0; i < imgs_.size(); ++i)
403 {
404 full_img = imgs_[i];
405 full_img_sizes_[i] = full_img.size();
406
407 if (registr_resol_ < 0)
408 {
409 img = full_img;
410 work_scale_ = 1;
411 is_work_scale_set = true;
412 }
413 else
414 {
415 if (!is_work_scale_set)
416 {
417 work_scale_ = std::min(1.0, std::sqrt(registr_resol_ * 1e6 / full_img.size().area()));
418 is_work_scale_set = true;
419 }
420 resize(full_img, img, Size(), work_scale_, work_scale_);
421 }
422 if (!is_seam_scale_set)
423 {
424 seam_scale_ = std::min(1.0, std::sqrt(seam_est_resol_ * 1e6 / full_img.size().area()));
425 seam_work_aspect_ = seam_scale_ / work_scale_;
426 is_seam_scale_set = true;
427 }
428
429 if (rois_.empty())
430 (*features_finder_)(img, features_[i]);
431 else
432 {
433 std::vector<Rect> rois(rois_[i].size());
434 for (size_t j = 0; j < rois_[i].size(); ++j)
435 {
436 Point tl(cvRound(rois_[i][j].x * work_scale_), cvRound(rois_[i][j].y * work_scale_));
437 Point br(cvRound(rois_[i][j].br().x * work_scale_), cvRound(rois_[i][j].br().y * work_scale_));
438 rois[j] = Rect(tl, br);
439 }
440 (*features_finder_)(img, features_[i], rois);
441 }
442 features_[i].img_idx = (int)i;
443 LOGLN("Features in image #" << i+1 << ": " << features_[i].keypoints.size());
444
445 resize(full_img, img, Size(), seam_scale_, seam_scale_);
446 seam_est_imgs_[i] = img.clone();
447 }
448
449 // Do it to save memory
450 features_finder_->collectGarbage();
451 full_img.release();
452 img.release();
453
454 LOGLN("Finding features, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
455
456 LOG("Pairwise matching");
457 #if ENABLE_LOG
458 t = getTickCount();
459 #endif
460 (*features_matcher_)(features_, pairwise_matches_, matching_mask_);
461 features_matcher_->collectGarbage();
462 LOGLN("Pairwise matching, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
463
464 // Leave only images we are sure are from the same panorama
465 indices_ = detail::leaveBiggestComponent(features_, pairwise_matches_, (float)conf_thresh_);
466 std::vector<UMat> seam_est_imgs_subset;
467 std::vector<UMat> imgs_subset;
468 std::vector<Size> full_img_sizes_subset;
469 for (size_t i = 0; i < indices_.size(); ++i)
470 {
471 imgs_subset.push_back(imgs_[indices_[i]]);
472 seam_est_imgs_subset.push_back(seam_est_imgs_[indices_[i]]);
473 full_img_sizes_subset.push_back(full_img_sizes_[indices_[i]]);
474 }
475 seam_est_imgs_ = seam_est_imgs_subset;
476 imgs_ = imgs_subset;
477 full_img_sizes_ = full_img_sizes_subset;
478
479 if ((int)imgs_.size() < 2)
480 {
481 LOGLN("Need more images");
482 return ERR_NEED_MORE_IMGS;
483 }
484
485 return OK;
486 }
487
488
estimateCameraParams()489 Stitcher::Status Stitcher::estimateCameraParams()
490 {
491 detail::HomographyBasedEstimator estimator;
492 if (!estimator(features_, pairwise_matches_, cameras_))
493 return ERR_HOMOGRAPHY_EST_FAIL;
494
495 for (size_t i = 0; i < cameras_.size(); ++i)
496 {
497 Mat R;
498 cameras_[i].R.convertTo(R, CV_32F);
499 cameras_[i].R = R;
500 //LOGLN("Initial intrinsic parameters #" << indices_[i] + 1 << ":\n " << cameras_[i].K());
501 }
502
503 bundle_adjuster_->setConfThresh(conf_thresh_);
504 if (!(*bundle_adjuster_)(features_, pairwise_matches_, cameras_))
505 return ERR_CAMERA_PARAMS_ADJUST_FAIL;
506
507 // Find median focal length and use it as final image scale
508 std::vector<double> focals;
509 for (size_t i = 0; i < cameras_.size(); ++i)
510 {
511 //LOGLN("Camera #" << indices_[i] + 1 << ":\n" << cameras_[i].K());
512 focals.push_back(cameras_[i].focal);
513 }
514
515 std::sort(focals.begin(), focals.end());
516 if (focals.size() % 2 == 1)
517 warped_image_scale_ = static_cast<float>(focals[focals.size() / 2]);
518 else
519 warped_image_scale_ = static_cast<float>(focals[focals.size() / 2 - 1] + focals[focals.size() / 2]) * 0.5f;
520
521 if (do_wave_correct_)
522 {
523 std::vector<Mat> rmats;
524 for (size_t i = 0; i < cameras_.size(); ++i)
525 rmats.push_back(cameras_[i].R.clone());
526 detail::waveCorrect(rmats, wave_correct_kind_);
527 for (size_t i = 0; i < cameras_.size(); ++i)
528 cameras_[i].R = rmats[i];
529 }
530
531 return OK;
532 }
533
534
createStitcher(bool try_use_gpu)535 Ptr<Stitcher> createStitcher(bool try_use_gpu)
536 {
537 Ptr<Stitcher> stitcher = makePtr<Stitcher>();
538 stitcher->setRegistrationResol(0.6);
539 stitcher->setSeamEstimationResol(0.1);
540 stitcher->setCompositingResol(Stitcher::ORIG_RESOL);
541 stitcher->setPanoConfidenceThresh(1);
542 stitcher->setWaveCorrection(true);
543 stitcher->setWaveCorrectKind(detail::WAVE_CORRECT_HORIZ);
544 stitcher->setFeaturesMatcher(makePtr<detail::BestOf2NearestMatcher>(try_use_gpu));
545 stitcher->setBundleAdjuster(makePtr<detail::BundleAdjusterRay>());
546
547 #ifdef HAVE_CUDA
548 if (try_use_gpu && cuda::getCudaEnabledDeviceCount() > 0)
549 {
550 #ifdef HAVE_OPENCV_NONFREE
551 stitcher->setFeaturesFinder(makePtr<detail::SurfFeaturesFinderGpu>());
552 #else
553 stitcher->setFeaturesFinder(makePtr<detail::OrbFeaturesFinder>());
554 #endif
555 stitcher->setWarper(makePtr<SphericalWarperGpu>());
556 stitcher->setSeamFinder(makePtr<detail::GraphCutSeamFinderGpu>());
557 }
558 else
559 #endif
560 {
561 #ifdef HAVE_OPENCV_NONFREE
562 stitcher->setFeaturesFinder(makePtr<detail::SurfFeaturesFinder>());
563 #else
564 stitcher->setFeaturesFinder(makePtr<detail::OrbFeaturesFinder>());
565 #endif
566 stitcher->setWarper(makePtr<SphericalWarper>());
567 stitcher->setSeamFinder(makePtr<detail::GraphCutSeamFinder>(detail::GraphCutSeamFinderBase::COST_COLOR));
568 }
569
570 stitcher->setExposureCompensator(makePtr<detail::BlocksGainCompensator>());
571 stitcher->setBlender(makePtr<detail::MultiBandBlender>(try_use_gpu));
572
573 return stitcher;
574 }
575 } // namespace cv
576