1 /*M/////////////////////////////////////////////////////////////////////////////////////// 2 // 3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. 4 // 5 // By downloading, copying, installing or using the software you agree to this license. 6 // If you do not agree to this license, do not download, install, 7 // copy or use the software. 8 // 9 // 10 // License Agreement 11 // For Open Source Computer Vision Library 12 // 13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. 14 // Copyright (C) 2009, Willow Garage Inc., all rights reserved. 15 // Third party copyrights are property of their respective owners. 16 // 17 // Redistribution and use in source and binary forms, with or without modification, 18 // are permitted provided that the following conditions are met: 19 // 20 // * Redistribution's of source code must retain the above copyright notice, 21 // this list of conditions and the following disclaimer. 22 // 23 // * Redistribution's in binary form must reproduce the above copyright notice, 24 // this list of conditions and the following disclaimer in the documentation 25 // and/or other materials provided with the distribution. 26 // 27 // * The name of the copyright holders may not be used to endorse or promote products 28 // derived from this software without specific prior written permission. 29 // 30 // This software is provided by the copyright holders and contributors "as is" and 31 // any express or implied warranties, including, but not limited to, the implied 32 // warranties of merchantability and fitness for a particular purpose are disclaimed. 33 // In no event shall the Intel Corporation or contributors be liable for any direct, 34 // indirect, incidental, special, exemplary, or consequential damages 35 // (including, but not limited to, procurement of substitute goods or services; 36 // loss of use, data, or profits; or business interruption) however caused 37 // and on any theory of liability, whether in contract, strict liability, 38 // or tort (including negligence or otherwise) arising in any way out of 39 // the use of this software, even if advised of the possibility of such damage. 40 // 41 //M*/ 42 43 #ifndef __OPENCV_FEATURES_2D_HPP__ 44 #define __OPENCV_FEATURES_2D_HPP__ 45 46 #include "opencv2/core.hpp" 47 #include "opencv2/flann/miniflann.hpp" 48 49 /** 50 @defgroup features2d 2D Features Framework 51 @{ 52 @defgroup features2d_main Feature Detection and Description 53 @defgroup features2d_match Descriptor Matchers 54 55 Matchers of keypoint descriptors in OpenCV have wrappers with a common interface that enables you to 56 easily switch between different algorithms solving the same problem. This section is devoted to 57 matching descriptors that are represented as vectors in a multidimensional space. All objects that 58 implement vector descriptor matchers inherit the DescriptorMatcher interface. 59 60 @note 61 - An example explaining keypoint matching can be found at 62 opencv_source_code/samples/cpp/descriptor_extractor_matcher.cpp 63 - An example on descriptor matching evaluation can be found at 64 opencv_source_code/samples/cpp/detector_descriptor_matcher_evaluation.cpp 65 - An example on one to many image matching can be found at 66 opencv_source_code/samples/cpp/matching_to_many_images.cpp 67 68 @defgroup features2d_draw Drawing Function of Keypoints and Matches 69 @defgroup features2d_category Object Categorization 70 71 This section describes approaches based on local 2D features and used to categorize objects. 72 73 @note 74 - A complete Bag-Of-Words sample can be found at 75 opencv_source_code/samples/cpp/bagofwords_classification.cpp 76 - (Python) An example using the features2D framework to perform object categorization can be 77 found at opencv_source_code/samples/python2/find_obj.py 78 79 @} 80 */ 81 82 namespace cv 83 { 84 85 //! @addtogroup features2d 86 //! @{ 87 88 // //! writes vector of keypoints to the file storage 89 // CV_EXPORTS void write(FileStorage& fs, const String& name, const std::vector<KeyPoint>& keypoints); 90 // //! reads vector of keypoints from the specified file storage node 91 // CV_EXPORTS void read(const FileNode& node, CV_OUT std::vector<KeyPoint>& keypoints); 92 93 /** @brief A class filters a vector of keypoints. 94 95 Because now it is difficult to provide a convenient interface for all usage scenarios of the 96 keypoints filter class, it has only several needed by now static methods. 97 */ 98 class CV_EXPORTS KeyPointsFilter 99 { 100 public: KeyPointsFilter()101 KeyPointsFilter(){} 102 103 /* 104 * Remove keypoints within borderPixels of an image edge. 105 */ 106 static void runByImageBorder( std::vector<KeyPoint>& keypoints, Size imageSize, int borderSize ); 107 /* 108 * Remove keypoints of sizes out of range. 109 */ 110 static void runByKeypointSize( std::vector<KeyPoint>& keypoints, float minSize, 111 float maxSize=FLT_MAX ); 112 /* 113 * Remove keypoints from some image by mask for pixels of this image. 114 */ 115 static void runByPixelsMask( std::vector<KeyPoint>& keypoints, const Mat& mask ); 116 /* 117 * Remove duplicated keypoints. 118 */ 119 static void removeDuplicated( std::vector<KeyPoint>& keypoints ); 120 121 /* 122 * Retain the specified number of the best keypoints (according to the response) 123 */ 124 static void retainBest( std::vector<KeyPoint>& keypoints, int npoints ); 125 }; 126 127 128 /************************************ Base Classes ************************************/ 129 130 /** @brief Abstract base class for 2D image feature detectors and descriptor extractors 131 */ 132 class CV_EXPORTS_W Feature2D : public virtual Algorithm 133 { 134 public: 135 virtual ~Feature2D(); 136 137 /** @brief Detects keypoints in an image (first variant) or image set (second variant). 138 139 @param image Image. 140 @param keypoints The detected keypoints. In the second variant of the method keypoints[i] is a set 141 of keypoints detected in images[i] . 142 @param mask Mask specifying where to look for keypoints (optional). It must be a 8-bit integer 143 matrix with non-zero values in the region of interest. 144 */ 145 CV_WRAP virtual void detect( InputArray image, 146 CV_OUT std::vector<KeyPoint>& keypoints, 147 InputArray mask=noArray() ); 148 149 /** @overload 150 @param images Image set. 151 @param keypoints The detected keypoints. In the second variant of the method keypoints[i] is a set 152 of keypoints detected in images[i] . 153 @param masks Masks for each input image specifying where to look for keypoints (optional). 154 masks[i] is a mask for images[i]. 155 */ 156 virtual void detect( InputArrayOfArrays images, 157 std::vector<std::vector<KeyPoint> >& keypoints, 158 InputArrayOfArrays masks=noArray() ); 159 160 /** @brief Computes the descriptors for a set of keypoints detected in an image (first variant) or image set 161 (second variant). 162 163 @param image Image. 164 @param keypoints Input collection of keypoints. Keypoints for which a descriptor cannot be 165 computed are removed. Sometimes new keypoints can be added, for example: SIFT duplicates keypoint 166 with several dominant orientations (for each orientation). 167 @param descriptors Computed descriptors. In the second variant of the method descriptors[i] are 168 descriptors computed for a keypoints[i]. Row j is the keypoints (or keypoints[i]) is the 169 descriptor for keypoint j-th keypoint. 170 */ 171 CV_WRAP virtual void compute( InputArray image, 172 CV_OUT CV_IN_OUT std::vector<KeyPoint>& keypoints, 173 OutputArray descriptors ); 174 175 /** @overload 176 177 @param images Image set. 178 @param keypoints Input collection of keypoints. Keypoints for which a descriptor cannot be 179 computed are removed. Sometimes new keypoints can be added, for example: SIFT duplicates keypoint 180 with several dominant orientations (for each orientation). 181 @param descriptors Computed descriptors. In the second variant of the method descriptors[i] are 182 descriptors computed for a keypoints[i]. Row j is the keypoints (or keypoints[i]) is the 183 descriptor for keypoint j-th keypoint. 184 */ 185 virtual void compute( InputArrayOfArrays images, 186 std::vector<std::vector<KeyPoint> >& keypoints, 187 OutputArrayOfArrays descriptors ); 188 189 /** Detects keypoints and computes the descriptors */ 190 CV_WRAP virtual void detectAndCompute( InputArray image, InputArray mask, 191 CV_OUT std::vector<KeyPoint>& keypoints, 192 OutputArray descriptors, 193 bool useProvidedKeypoints=false ); 194 195 CV_WRAP virtual int descriptorSize() const; 196 CV_WRAP virtual int descriptorType() const; 197 CV_WRAP virtual int defaultNorm() const; 198 199 //! Return true if detector object is empty 200 CV_WRAP virtual bool empty() const; 201 }; 202 203 /** Feature detectors in OpenCV have wrappers with a common interface that enables you to easily switch 204 between different algorithms solving the same problem. All objects that implement keypoint detectors 205 inherit the FeatureDetector interface. */ 206 typedef Feature2D FeatureDetector; 207 208 /** Extractors of keypoint descriptors in OpenCV have wrappers with a common interface that enables you 209 to easily switch between different algorithms solving the same problem. This section is devoted to 210 computing descriptors represented as vectors in a multidimensional space. All objects that implement 211 the vector descriptor extractors inherit the DescriptorExtractor interface. 212 */ 213 typedef Feature2D DescriptorExtractor; 214 215 //! @addtogroup features2d_main 216 //! @{ 217 218 /** @brief Class implementing the BRISK keypoint detector and descriptor extractor, described in @cite LCS11 . 219 */ 220 class CV_EXPORTS_W BRISK : public Feature2D 221 { 222 public: 223 /** @brief The BRISK constructor 224 225 @param thresh AGAST detection threshold score. 226 @param octaves detection octaves. Use 0 to do single scale. 227 @param patternScale apply this scale to the pattern used for sampling the neighbourhood of a 228 keypoint. 229 */ 230 CV_WRAP static Ptr<BRISK> create(int thresh=30, int octaves=3, float patternScale=1.0f); 231 232 /** @brief The BRISK constructor for a custom pattern 233 234 @param radiusList defines the radii (in pixels) where the samples around a keypoint are taken (for 235 keypoint scale 1). 236 @param numberList defines the number of sampling points on the sampling circle. Must be the same 237 size as radiusList.. 238 @param dMax threshold for the short pairings used for descriptor formation (in pixels for keypoint 239 scale 1). 240 @param dMin threshold for the long pairings used for orientation determination (in pixels for 241 keypoint scale 1). 242 @param indexChange index remapping of the bits. */ 243 CV_WRAP static Ptr<BRISK> create(const std::vector<float> &radiusList, const std::vector<int> &numberList, 244 float dMax=5.85f, float dMin=8.2f, const std::vector<int>& indexChange=std::vector<int>()); 245 }; 246 247 /** @brief Class implementing the ORB (*oriented BRIEF*) keypoint detector and descriptor extractor 248 249 described in @cite RRKB11 . The algorithm uses FAST in pyramids to detect stable keypoints, selects 250 the strongest features using FAST or Harris response, finds their orientation using first-order 251 moments and computes the descriptors using BRIEF (where the coordinates of random point pairs (or 252 k-tuples) are rotated according to the measured orientation). 253 */ 254 class CV_EXPORTS_W ORB : public Feature2D 255 { 256 public: 257 enum { kBytes = 32, HARRIS_SCORE=0, FAST_SCORE=1 }; 258 259 /** @brief The ORB constructor 260 261 @param nfeatures The maximum number of features to retain. 262 @param scaleFactor Pyramid decimation ratio, greater than 1. scaleFactor==2 means the classical 263 pyramid, where each next level has 4x less pixels than the previous, but such a big scale factor 264 will degrade feature matching scores dramatically. On the other hand, too close to 1 scale factor 265 will mean that to cover certain scale range you will need more pyramid levels and so the speed 266 will suffer. 267 @param nlevels The number of pyramid levels. The smallest level will have linear size equal to 268 input_image_linear_size/pow(scaleFactor, nlevels). 269 @param edgeThreshold This is size of the border where the features are not detected. It should 270 roughly match the patchSize parameter. 271 @param firstLevel It should be 0 in the current implementation. 272 @param WTA_K The number of points that produce each element of the oriented BRIEF descriptor. The 273 default value 2 means the BRIEF where we take a random point pair and compare their brightnesses, 274 so we get 0/1 response. Other possible values are 3 and 4. For example, 3 means that we take 3 275 random points (of course, those point coordinates are random, but they are generated from the 276 pre-defined seed, so each element of BRIEF descriptor is computed deterministically from the pixel 277 rectangle), find point of maximum brightness and output index of the winner (0, 1 or 2). Such 278 output will occupy 2 bits, and therefore it will need a special variant of Hamming distance, 279 denoted as NORM_HAMMING2 (2 bits per bin). When WTA_K=4, we take 4 random points to compute each 280 bin (that will also occupy 2 bits with possible values 0, 1, 2 or 3). 281 @param scoreType The default HARRIS_SCORE means that Harris algorithm is used to rank features 282 (the score is written to KeyPoint::score and is used to retain best nfeatures features); 283 FAST_SCORE is alternative value of the parameter that produces slightly less stable keypoints, 284 but it is a little faster to compute. 285 @param patchSize size of the patch used by the oriented BRIEF descriptor. Of course, on smaller 286 pyramid layers the perceived image area covered by a feature will be larger. 287 @param fastThreshold 288 */ 289 CV_WRAP static Ptr<ORB> create(int nfeatures=500, float scaleFactor=1.2f, int nlevels=8, int edgeThreshold=31, 290 int firstLevel=0, int WTA_K=2, int scoreType=ORB::HARRIS_SCORE, int patchSize=31, int fastThreshold=20); 291 292 CV_WRAP virtual void setMaxFeatures(int maxFeatures) = 0; 293 CV_WRAP virtual int getMaxFeatures() const = 0; 294 295 CV_WRAP virtual void setScaleFactor(double scaleFactor) = 0; 296 CV_WRAP virtual double getScaleFactor() const = 0; 297 298 CV_WRAP virtual void setNLevels(int nlevels) = 0; 299 CV_WRAP virtual int getNLevels() const = 0; 300 301 CV_WRAP virtual void setEdgeThreshold(int edgeThreshold) = 0; 302 CV_WRAP virtual int getEdgeThreshold() const = 0; 303 304 CV_WRAP virtual void setFirstLevel(int firstLevel) = 0; 305 CV_WRAP virtual int getFirstLevel() const = 0; 306 307 CV_WRAP virtual void setWTA_K(int wta_k) = 0; 308 CV_WRAP virtual int getWTA_K() const = 0; 309 310 CV_WRAP virtual void setScoreType(int scoreType) = 0; 311 CV_WRAP virtual int getScoreType() const = 0; 312 313 CV_WRAP virtual void setPatchSize(int patchSize) = 0; 314 CV_WRAP virtual int getPatchSize() const = 0; 315 316 CV_WRAP virtual void setFastThreshold(int fastThreshold) = 0; 317 CV_WRAP virtual int getFastThreshold() const = 0; 318 }; 319 320 /** @brief Maximally stable extremal region extractor. : 321 322 The class encapsulates all the parameters of the MSER extraction algorithm (see 323 <http://en.wikipedia.org/wiki/Maximally_stable_extremal_regions>). Also see 324 <http://code.opencv.org/projects/opencv/wiki/MSER> for useful comments and parameters description. 325 326 @note 327 - (Python) A complete example showing the use of the MSER detector can be found at 328 opencv_source_code/samples/python2/mser.py 329 */ 330 class CV_EXPORTS_W MSER : public Feature2D 331 { 332 public: 333 //! the full constructor 334 CV_WRAP static Ptr<MSER> create( int _delta=5, int _min_area=60, int _max_area=14400, 335 double _max_variation=0.25, double _min_diversity=.2, 336 int _max_evolution=200, double _area_threshold=1.01, 337 double _min_margin=0.003, int _edge_blur_size=5 ); 338 339 CV_WRAP virtual void detectRegions( InputArray image, 340 CV_OUT std::vector<std::vector<Point> >& msers, 341 std::vector<Rect>& bboxes ) = 0; 342 343 CV_WRAP virtual void setDelta(int delta) = 0; 344 CV_WRAP virtual int getDelta() const = 0; 345 346 CV_WRAP virtual void setMinArea(int minArea) = 0; 347 CV_WRAP virtual int getMinArea() const = 0; 348 349 CV_WRAP virtual void setMaxArea(int maxArea) = 0; 350 CV_WRAP virtual int getMaxArea() const = 0; 351 352 CV_WRAP virtual void setPass2Only(bool f) = 0; 353 CV_WRAP virtual bool getPass2Only() const = 0; 354 }; 355 356 /** @overload */ 357 CV_EXPORTS void FAST( InputArray image, CV_OUT std::vector<KeyPoint>& keypoints, 358 int threshold, bool nonmaxSuppression=true ); 359 360 /** @brief Detects corners using the FAST algorithm 361 362 @param image grayscale image where keypoints (corners) are detected. 363 @param keypoints keypoints detected on the image. 364 @param threshold threshold on difference between intensity of the central pixel and pixels of a 365 circle around this pixel. 366 @param nonmaxSuppression if true, non-maximum suppression is applied to detected corners 367 (keypoints). 368 @param type one of the three neighborhoods as defined in the paper: 369 FastFeatureDetector::TYPE_9_16, FastFeatureDetector::TYPE_7_12, 370 FastFeatureDetector::TYPE_5_8 371 372 Detects corners using the FAST algorithm by @cite Rosten06 . 373 374 @note In Python API, types are given as cv2.FAST_FEATURE_DETECTOR_TYPE_5_8, 375 cv2.FAST_FEATURE_DETECTOR_TYPE_7_12 and cv2.FAST_FEATURE_DETECTOR_TYPE_9_16. For corner 376 detection, use cv2.FAST.detect() method. 377 */ 378 CV_EXPORTS void FAST( InputArray image, CV_OUT std::vector<KeyPoint>& keypoints, 379 int threshold, bool nonmaxSuppression, int type ); 380 381 //! @} features2d_main 382 383 //! @addtogroup features2d_main 384 //! @{ 385 386 /** @brief Wrapping class for feature detection using the FAST method. : 387 */ 388 class CV_EXPORTS_W FastFeatureDetector : public Feature2D 389 { 390 public: 391 enum 392 { 393 TYPE_5_8 = 0, TYPE_7_12 = 1, TYPE_9_16 = 2, 394 THRESHOLD = 10000, NONMAX_SUPPRESSION=10001, FAST_N=10002, 395 }; 396 397 CV_WRAP static Ptr<FastFeatureDetector> create( int threshold=10, 398 bool nonmaxSuppression=true, 399 int type=FastFeatureDetector::TYPE_9_16 ); 400 401 CV_WRAP virtual void setThreshold(int threshold) = 0; 402 CV_WRAP virtual int getThreshold() const = 0; 403 404 CV_WRAP virtual void setNonmaxSuppression(bool f) = 0; 405 CV_WRAP virtual bool getNonmaxSuppression() const = 0; 406 407 CV_WRAP virtual void setType(int type) = 0; 408 CV_WRAP virtual int getType() const = 0; 409 }; 410 411 /** @overload */ 412 CV_EXPORTS void AGAST( InputArray image, CV_OUT std::vector<KeyPoint>& keypoints, 413 int threshold, bool nonmaxSuppression=true ); 414 415 /** @brief Detects corners using the AGAST algorithm 416 417 @param image grayscale image where keypoints (corners) are detected. 418 @param keypoints keypoints detected on the image. 419 @param threshold threshold on difference between intensity of the central pixel and pixels of a 420 circle around this pixel. 421 @param nonmaxSuppression if true, non-maximum suppression is applied to detected corners 422 (keypoints). 423 @param type one of the four neighborhoods as defined in the paper: 424 AgastFeatureDetector::AGAST_5_8, AgastFeatureDetector::AGAST_7_12d, 425 AgastFeatureDetector::AGAST_7_12s, AgastFeatureDetector::OAST_9_16 426 427 Detects corners using the AGAST algorithm by @cite mair2010_agast . 428 429 */ 430 CV_EXPORTS void AGAST( InputArray image, CV_OUT std::vector<KeyPoint>& keypoints, 431 int threshold, bool nonmaxSuppression, int type ); 432 //! @} features2d_main 433 434 //! @addtogroup features2d_main 435 //! @{ 436 437 /** @brief Wrapping class for feature detection using the AGAST method. : 438 */ 439 class CV_EXPORTS_W AgastFeatureDetector : public Feature2D 440 { 441 public: 442 enum 443 { 444 AGAST_5_8 = 0, AGAST_7_12d = 1, AGAST_7_12s = 2, OAST_9_16 = 3, 445 THRESHOLD = 10000, NONMAX_SUPPRESSION = 10001, 446 }; 447 448 CV_WRAP static Ptr<AgastFeatureDetector> create( int threshold=10, 449 bool nonmaxSuppression=true, 450 int type=AgastFeatureDetector::OAST_9_16 ); 451 452 CV_WRAP virtual void setThreshold(int threshold) = 0; 453 CV_WRAP virtual int getThreshold() const = 0; 454 455 CV_WRAP virtual void setNonmaxSuppression(bool f) = 0; 456 CV_WRAP virtual bool getNonmaxSuppression() const = 0; 457 458 CV_WRAP virtual void setType(int type) = 0; 459 CV_WRAP virtual int getType() const = 0; 460 }; 461 462 /** @brief Wrapping class for feature detection using the goodFeaturesToTrack function. : 463 */ 464 class CV_EXPORTS_W GFTTDetector : public Feature2D 465 { 466 public: 467 CV_WRAP static Ptr<GFTTDetector> create( int maxCorners=1000, double qualityLevel=0.01, double minDistance=1, 468 int blockSize=3, bool useHarrisDetector=false, double k=0.04 ); 469 CV_WRAP virtual void setMaxFeatures(int maxFeatures) = 0; 470 CV_WRAP virtual int getMaxFeatures() const = 0; 471 472 CV_WRAP virtual void setQualityLevel(double qlevel) = 0; 473 CV_WRAP virtual double getQualityLevel() const = 0; 474 475 CV_WRAP virtual void setMinDistance(double minDistance) = 0; 476 CV_WRAP virtual double getMinDistance() const = 0; 477 478 CV_WRAP virtual void setBlockSize(int blockSize) = 0; 479 CV_WRAP virtual int getBlockSize() const = 0; 480 481 CV_WRAP virtual void setHarrisDetector(bool val) = 0; 482 CV_WRAP virtual bool getHarrisDetector() const = 0; 483 484 CV_WRAP virtual void setK(double k) = 0; 485 CV_WRAP virtual double getK() const = 0; 486 }; 487 488 /** @brief Class for extracting blobs from an image. : 489 490 The class implements a simple algorithm for extracting blobs from an image: 491 492 1. Convert the source image to binary images by applying thresholding with several thresholds from 493 minThreshold (inclusive) to maxThreshold (exclusive) with distance thresholdStep between 494 neighboring thresholds. 495 2. Extract connected components from every binary image by findContours and calculate their 496 centers. 497 3. Group centers from several binary images by their coordinates. Close centers form one group that 498 corresponds to one blob, which is controlled by the minDistBetweenBlobs parameter. 499 4. From the groups, estimate final centers of blobs and their radiuses and return as locations and 500 sizes of keypoints. 501 502 This class performs several filtrations of returned blobs. You should set filterBy\* to true/false 503 to turn on/off corresponding filtration. Available filtrations: 504 505 - **By color**. This filter compares the intensity of a binary image at the center of a blob to 506 blobColor. If they differ, the blob is filtered out. Use blobColor = 0 to extract dark blobs 507 and blobColor = 255 to extract light blobs. 508 - **By area**. Extracted blobs have an area between minArea (inclusive) and maxArea (exclusive). 509 - **By circularity**. Extracted blobs have circularity 510 (\f$\frac{4*\pi*Area}{perimeter * perimeter}\f$) between minCircularity (inclusive) and 511 maxCircularity (exclusive). 512 - **By ratio of the minimum inertia to maximum inertia**. Extracted blobs have this ratio 513 between minInertiaRatio (inclusive) and maxInertiaRatio (exclusive). 514 - **By convexity**. Extracted blobs have convexity (area / area of blob convex hull) between 515 minConvexity (inclusive) and maxConvexity (exclusive). 516 517 Default values of parameters are tuned to extract dark circular blobs. 518 */ 519 class CV_EXPORTS_W SimpleBlobDetector : public Feature2D 520 { 521 public: 522 struct CV_EXPORTS_W_SIMPLE Params 523 { 524 CV_WRAP Params(); 525 CV_PROP_RW float thresholdStep; 526 CV_PROP_RW float minThreshold; 527 CV_PROP_RW float maxThreshold; 528 CV_PROP_RW size_t minRepeatability; 529 CV_PROP_RW float minDistBetweenBlobs; 530 531 CV_PROP_RW bool filterByColor; 532 CV_PROP_RW uchar blobColor; 533 534 CV_PROP_RW bool filterByArea; 535 CV_PROP_RW float minArea, maxArea; 536 537 CV_PROP_RW bool filterByCircularity; 538 CV_PROP_RW float minCircularity, maxCircularity; 539 540 CV_PROP_RW bool filterByInertia; 541 CV_PROP_RW float minInertiaRatio, maxInertiaRatio; 542 543 CV_PROP_RW bool filterByConvexity; 544 CV_PROP_RW float minConvexity, maxConvexity; 545 546 void read( const FileNode& fn ); 547 void write( FileStorage& fs ) const; 548 }; 549 550 CV_WRAP static Ptr<SimpleBlobDetector> 551 create(const SimpleBlobDetector::Params ¶meters = SimpleBlobDetector::Params()); 552 }; 553 554 //! @} features2d_main 555 556 //! @addtogroup features2d_main 557 //! @{ 558 559 /** @brief Class implementing the KAZE keypoint detector and descriptor extractor, described in @cite ABD12 . 560 561 @note AKAZE descriptor can only be used with KAZE or AKAZE keypoints .. [ABD12] KAZE Features. Pablo 562 F. Alcantarilla, Adrien Bartoli and Andrew J. Davison. In European Conference on Computer Vision 563 (ECCV), Fiorenze, Italy, October 2012. 564 */ 565 class CV_EXPORTS_W KAZE : public Feature2D 566 { 567 public: 568 enum 569 { 570 DIFF_PM_G1 = 0, 571 DIFF_PM_G2 = 1, 572 DIFF_WEICKERT = 2, 573 DIFF_CHARBONNIER = 3 574 }; 575 576 /** @brief The KAZE constructor 577 578 @param extended Set to enable extraction of extended (128-byte) descriptor. 579 @param upright Set to enable use of upright descriptors (non rotation-invariant). 580 @param threshold Detector response threshold to accept point 581 @param nOctaves Maximum octave evolution of the image 582 @param nOctaveLayers Default number of sublevels per scale level 583 @param diffusivity Diffusivity type. DIFF_PM_G1, DIFF_PM_G2, DIFF_WEICKERT or 584 DIFF_CHARBONNIER 585 */ 586 CV_WRAP static Ptr<KAZE> create(bool extended=false, bool upright=false, 587 float threshold = 0.001f, 588 int nOctaves = 4, int nOctaveLayers = 4, 589 int diffusivity = KAZE::DIFF_PM_G2); 590 591 CV_WRAP virtual void setExtended(bool extended) = 0; 592 CV_WRAP virtual bool getExtended() const = 0; 593 594 CV_WRAP virtual void setUpright(bool upright) = 0; 595 CV_WRAP virtual bool getUpright() const = 0; 596 597 CV_WRAP virtual void setThreshold(double threshold) = 0; 598 CV_WRAP virtual double getThreshold() const = 0; 599 600 CV_WRAP virtual void setNOctaves(int octaves) = 0; 601 CV_WRAP virtual int getNOctaves() const = 0; 602 603 CV_WRAP virtual void setNOctaveLayers(int octaveLayers) = 0; 604 CV_WRAP virtual int getNOctaveLayers() const = 0; 605 606 CV_WRAP virtual void setDiffusivity(int diff) = 0; 607 CV_WRAP virtual int getDiffusivity() const = 0; 608 }; 609 610 /** @brief Class implementing the AKAZE keypoint detector and descriptor extractor, described in @cite ANB13 . : 611 612 @note AKAZE descriptors can only be used with KAZE or AKAZE keypoints. Try to avoid using *extract* 613 and *detect* instead of *operator()* due to performance reasons. .. [ANB13] Fast Explicit Diffusion 614 for Accelerated Features in Nonlinear Scale Spaces. Pablo F. Alcantarilla, Jesús Nuevo and Adrien 615 Bartoli. In British Machine Vision Conference (BMVC), Bristol, UK, September 2013. 616 */ 617 class CV_EXPORTS_W AKAZE : public Feature2D 618 { 619 public: 620 // AKAZE descriptor type 621 enum 622 { 623 DESCRIPTOR_KAZE_UPRIGHT = 2, ///< Upright descriptors, not invariant to rotation 624 DESCRIPTOR_KAZE = 3, 625 DESCRIPTOR_MLDB_UPRIGHT = 4, ///< Upright descriptors, not invariant to rotation 626 DESCRIPTOR_MLDB = 5 627 }; 628 629 /** @brief The AKAZE constructor 630 631 @param descriptor_type Type of the extracted descriptor: DESCRIPTOR_KAZE, 632 DESCRIPTOR_KAZE_UPRIGHT, DESCRIPTOR_MLDB or DESCRIPTOR_MLDB_UPRIGHT. 633 @param descriptor_size Size of the descriptor in bits. 0 -\> Full size 634 @param descriptor_channels Number of channels in the descriptor (1, 2, 3) 635 @param threshold Detector response threshold to accept point 636 @param nOctaves Maximum octave evolution of the image 637 @param nOctaveLayers Default number of sublevels per scale level 638 @param diffusivity Diffusivity type. DIFF_PM_G1, DIFF_PM_G2, DIFF_WEICKERT or 639 DIFF_CHARBONNIER 640 */ 641 CV_WRAP static Ptr<AKAZE> create(int descriptor_type=AKAZE::DESCRIPTOR_MLDB, 642 int descriptor_size = 0, int descriptor_channels = 3, 643 float threshold = 0.001f, int nOctaves = 4, 644 int nOctaveLayers = 4, int diffusivity = KAZE::DIFF_PM_G2); 645 646 CV_WRAP virtual void setDescriptorType(int dtype) = 0; 647 CV_WRAP virtual int getDescriptorType() const = 0; 648 649 CV_WRAP virtual void setDescriptorSize(int dsize) = 0; 650 CV_WRAP virtual int getDescriptorSize() const = 0; 651 652 CV_WRAP virtual void setDescriptorChannels(int dch) = 0; 653 CV_WRAP virtual int getDescriptorChannels() const = 0; 654 655 CV_WRAP virtual void setThreshold(double threshold) = 0; 656 CV_WRAP virtual double getThreshold() const = 0; 657 658 CV_WRAP virtual void setNOctaves(int octaves) = 0; 659 CV_WRAP virtual int getNOctaves() const = 0; 660 661 CV_WRAP virtual void setNOctaveLayers(int octaveLayers) = 0; 662 CV_WRAP virtual int getNOctaveLayers() const = 0; 663 664 CV_WRAP virtual void setDiffusivity(int diff) = 0; 665 CV_WRAP virtual int getDiffusivity() const = 0; 666 }; 667 668 //! @} features2d_main 669 670 /****************************************************************************************\ 671 * Distance * 672 \****************************************************************************************/ 673 674 template<typename T> 675 struct CV_EXPORTS Accumulator 676 { 677 typedef T Type; 678 }; 679 680 template<> struct Accumulator<unsigned char> { typedef float Type; }; 681 template<> struct Accumulator<unsigned short> { typedef float Type; }; 682 template<> struct Accumulator<char> { typedef float Type; }; 683 template<> struct Accumulator<short> { typedef float Type; }; 684 685 /* 686 * Squared Euclidean distance functor 687 */ 688 template<class T> 689 struct CV_EXPORTS SL2 690 { 691 enum { normType = NORM_L2SQR }; 692 typedef T ValueType; 693 typedef typename Accumulator<T>::Type ResultType; 694 operator ()cv::SL2695 ResultType operator()( const T* a, const T* b, int size ) const 696 { 697 return normL2Sqr<ValueType, ResultType>(a, b, size); 698 } 699 }; 700 701 /* 702 * Euclidean distance functor 703 */ 704 template<class T> 705 struct CV_EXPORTS L2 706 { 707 enum { normType = NORM_L2 }; 708 typedef T ValueType; 709 typedef typename Accumulator<T>::Type ResultType; 710 operator ()cv::L2711 ResultType operator()( const T* a, const T* b, int size ) const 712 { 713 return (ResultType)std::sqrt((double)normL2Sqr<ValueType, ResultType>(a, b, size)); 714 } 715 }; 716 717 /* 718 * Manhattan distance (city block distance) functor 719 */ 720 template<class T> 721 struct CV_EXPORTS L1 722 { 723 enum { normType = NORM_L1 }; 724 typedef T ValueType; 725 typedef typename Accumulator<T>::Type ResultType; 726 operator ()cv::L1727 ResultType operator()( const T* a, const T* b, int size ) const 728 { 729 return normL1<ValueType, ResultType>(a, b, size); 730 } 731 }; 732 733 /****************************************************************************************\ 734 * DescriptorMatcher * 735 \****************************************************************************************/ 736 737 //! @addtogroup features2d_match 738 //! @{ 739 740 /** @brief Abstract base class for matching keypoint descriptors. 741 742 It has two groups of match methods: for matching descriptors of an image with another image or with 743 an image set. 744 */ 745 class CV_EXPORTS_W DescriptorMatcher : public Algorithm 746 { 747 public: 748 virtual ~DescriptorMatcher(); 749 750 /** @brief Adds descriptors to train a CPU(trainDescCollectionis) or GPU(utrainDescCollectionis) descriptor 751 collection. 752 753 If the collection is not empty, the new descriptors are added to existing train descriptors. 754 755 @param descriptors Descriptors to add. Each descriptors[i] is a set of descriptors from the same 756 train image. 757 */ 758 CV_WRAP virtual void add( InputArrayOfArrays descriptors ); 759 760 /** @brief Returns a constant link to the train descriptor collection trainDescCollection . 761 */ 762 CV_WRAP const std::vector<Mat>& getTrainDescriptors() const; 763 764 /** @brief Clears the train descriptor collections. 765 */ 766 CV_WRAP virtual void clear(); 767 768 /** @brief Returns true if there are no train descriptors in the both collections. 769 */ 770 CV_WRAP virtual bool empty() const; 771 772 /** @brief Returns true if the descriptor matcher supports masking permissible matches. 773 */ 774 CV_WRAP virtual bool isMaskSupported() const = 0; 775 776 /** @brief Trains a descriptor matcher 777 778 Trains a descriptor matcher (for example, the flann index). In all methods to match, the method 779 train() is run every time before matching. Some descriptor matchers (for example, BruteForceMatcher) 780 have an empty implementation of this method. Other matchers really train their inner structures (for 781 example, FlannBasedMatcher trains flann::Index ). 782 */ 783 CV_WRAP virtual void train(); 784 785 /** @brief Finds the best match for each descriptor from a query set. 786 787 @param queryDescriptors Query set of descriptors. 788 @param trainDescriptors Train set of descriptors. This set is not added to the train descriptors 789 collection stored in the class object. 790 @param matches Matches. If a query descriptor is masked out in mask , no match is added for this 791 descriptor. So, matches size may be smaller than the query descriptors count. 792 @param mask Mask specifying permissible matches between an input query and train matrices of 793 descriptors. 794 795 In the first variant of this method, the train descriptors are passed as an input argument. In the 796 second variant of the method, train descriptors collection that was set by DescriptorMatcher::add is 797 used. Optional mask (or masks) can be passed to specify which query and training descriptors can be 798 matched. Namely, queryDescriptors[i] can be matched with trainDescriptors[j] only if 799 mask.at\<uchar\>(i,j) is non-zero. 800 */ 801 CV_WRAP void match( InputArray queryDescriptors, InputArray trainDescriptors, 802 CV_OUT std::vector<DMatch>& matches, InputArray mask=noArray() ) const; 803 804 /** @brief Finds the k best matches for each descriptor from a query set. 805 806 @param queryDescriptors Query set of descriptors. 807 @param trainDescriptors Train set of descriptors. This set is not added to the train descriptors 808 collection stored in the class object. 809 @param mask Mask specifying permissible matches between an input query and train matrices of 810 descriptors. 811 @param matches Matches. Each matches[i] is k or less matches for the same query descriptor. 812 @param k Count of best matches found per each query descriptor or less if a query descriptor has 813 less than k possible matches in total. 814 @param compactResult Parameter used when the mask (or masks) is not empty. If compactResult is 815 false, the matches vector has the same size as queryDescriptors rows. If compactResult is true, 816 the matches vector does not contain matches for fully masked-out query descriptors. 817 818 These extended variants of DescriptorMatcher::match methods find several best matches for each query 819 descriptor. The matches are returned in the distance increasing order. See DescriptorMatcher::match 820 for the details about query and train descriptors. 821 */ 822 CV_WRAP void knnMatch( InputArray queryDescriptors, InputArray trainDescriptors, 823 CV_OUT std::vector<std::vector<DMatch> >& matches, int k, 824 InputArray mask=noArray(), bool compactResult=false ) const; 825 826 /** @brief For each query descriptor, finds the training descriptors not farther than the specified distance. 827 828 @param queryDescriptors Query set of descriptors. 829 @param trainDescriptors Train set of descriptors. This set is not added to the train descriptors 830 collection stored in the class object. 831 @param matches Found matches. 832 @param compactResult Parameter used when the mask (or masks) is not empty. If compactResult is 833 false, the matches vector has the same size as queryDescriptors rows. If compactResult is true, 834 the matches vector does not contain matches for fully masked-out query descriptors. 835 @param maxDistance Threshold for the distance between matched descriptors. Distance means here 836 metric distance (e.g. Hamming distance), not the distance between coordinates (which is measured 837 in Pixels)! 838 @param mask Mask specifying permissible matches between an input query and train matrices of 839 descriptors. 840 841 For each query descriptor, the methods find such training descriptors that the distance between the 842 query descriptor and the training descriptor is equal or smaller than maxDistance. Found matches are 843 returned in the distance increasing order. 844 */ 845 void radiusMatch( InputArray queryDescriptors, InputArray trainDescriptors, 846 std::vector<std::vector<DMatch> >& matches, float maxDistance, 847 InputArray mask=noArray(), bool compactResult=false ) const; 848 849 /** @overload 850 @param queryDescriptors Query set of descriptors. 851 @param matches Matches. If a query descriptor is masked out in mask , no match is added for this 852 descriptor. So, matches size may be smaller than the query descriptors count. 853 @param masks Set of masks. Each masks[i] specifies permissible matches between the input query 854 descriptors and stored train descriptors from the i-th image trainDescCollection[i]. 855 */ 856 CV_WRAP void match( InputArray queryDescriptors, CV_OUT std::vector<DMatch>& matches, 857 InputArrayOfArrays masks=noArray() ); 858 /** @overload 859 @param queryDescriptors Query set of descriptors. 860 @param matches Matches. Each matches[i] is k or less matches for the same query descriptor. 861 @param k Count of best matches found per each query descriptor or less if a query descriptor has 862 less than k possible matches in total. 863 @param masks Set of masks. Each masks[i] specifies permissible matches between the input query 864 descriptors and stored train descriptors from the i-th image trainDescCollection[i]. 865 @param compactResult Parameter used when the mask (or masks) is not empty. If compactResult is 866 false, the matches vector has the same size as queryDescriptors rows. If compactResult is true, 867 the matches vector does not contain matches for fully masked-out query descriptors. 868 */ 869 CV_WRAP void knnMatch( InputArray queryDescriptors, CV_OUT std::vector<std::vector<DMatch> >& matches, int k, 870 InputArrayOfArrays masks=noArray(), bool compactResult=false ); 871 /** @overload 872 @param queryDescriptors Query set of descriptors. 873 @param matches Found matches. 874 @param maxDistance Threshold for the distance between matched descriptors. Distance means here 875 metric distance (e.g. Hamming distance), not the distance between coordinates (which is measured 876 in Pixels)! 877 @param masks Set of masks. Each masks[i] specifies permissible matches between the input query 878 descriptors and stored train descriptors from the i-th image trainDescCollection[i]. 879 @param compactResult Parameter used when the mask (or masks) is not empty. If compactResult is 880 false, the matches vector has the same size as queryDescriptors rows. If compactResult is true, 881 the matches vector does not contain matches for fully masked-out query descriptors. 882 */ 883 void radiusMatch( InputArray queryDescriptors, std::vector<std::vector<DMatch> >& matches, float maxDistance, 884 InputArrayOfArrays masks=noArray(), bool compactResult=false ); 885 886 // Reads matcher object from a file node 887 virtual void read( const FileNode& ); 888 // Writes matcher object to a file storage 889 virtual void write( FileStorage& ) const; 890 891 /** @brief Clones the matcher. 892 893 @param emptyTrainData If emptyTrainData is false, the method creates a deep copy of the object, 894 that is, copies both parameters and train data. If emptyTrainData is true, the method creates an 895 object copy with the current parameters but with empty train data. 896 */ 897 virtual Ptr<DescriptorMatcher> clone( bool emptyTrainData=false ) const = 0; 898 899 /** @brief Creates a descriptor matcher of a given type with the default parameters (using default 900 constructor). 901 902 @param descriptorMatcherType Descriptor matcher type. Now the following matcher types are 903 supported: 904 - `BruteForce` (it uses L2 ) 905 - `BruteForce-L1` 906 - `BruteForce-Hamming` 907 - `BruteForce-Hamming(2)` 908 - `FlannBased` 909 */ 910 CV_WRAP static Ptr<DescriptorMatcher> create( const String& descriptorMatcherType ); 911 protected: 912 /** 913 * Class to work with descriptors from several images as with one merged matrix. 914 * It is used e.g. in FlannBasedMatcher. 915 */ 916 class CV_EXPORTS DescriptorCollection 917 { 918 public: 919 DescriptorCollection(); 920 DescriptorCollection( const DescriptorCollection& collection ); 921 virtual ~DescriptorCollection(); 922 923 // Vector of matrices "descriptors" will be merged to one matrix "mergedDescriptors" here. 924 void set( const std::vector<Mat>& descriptors ); 925 virtual void clear(); 926 927 const Mat& getDescriptors() const; 928 const Mat getDescriptor( int imgIdx, int localDescIdx ) const; 929 const Mat getDescriptor( int globalDescIdx ) const; 930 void getLocalIdx( int globalDescIdx, int& imgIdx, int& localDescIdx ) const; 931 932 int size() const; 933 934 protected: 935 Mat mergedDescriptors; 936 std::vector<int> startIdxs; 937 }; 938 939 //! In fact the matching is implemented only by the following two methods. These methods suppose 940 //! that the class object has been trained already. Public match methods call these methods 941 //! after calling train(). 942 virtual void knnMatchImpl( InputArray queryDescriptors, std::vector<std::vector<DMatch> >& matches, int k, 943 InputArrayOfArrays masks=noArray(), bool compactResult=false ) = 0; 944 virtual void radiusMatchImpl( InputArray queryDescriptors, std::vector<std::vector<DMatch> >& matches, float maxDistance, 945 InputArrayOfArrays masks=noArray(), bool compactResult=false ) = 0; 946 947 static bool isPossibleMatch( InputArray mask, int queryIdx, int trainIdx ); 948 static bool isMaskedOut( InputArrayOfArrays masks, int queryIdx ); 949 clone_op(Mat m)950 static Mat clone_op( Mat m ) { return m.clone(); } 951 void checkMasks( InputArrayOfArrays masks, int queryDescriptorsCount ) const; 952 953 //! Collection of descriptors from train images. 954 std::vector<Mat> trainDescCollection; 955 std::vector<UMat> utrainDescCollection; 956 }; 957 958 /** @brief Brute-force descriptor matcher. 959 960 For each descriptor in the first set, this matcher finds the closest descriptor in the second set 961 by trying each one. This descriptor matcher supports masking permissible matches of descriptor 962 sets. 963 */ 964 class CV_EXPORTS_W BFMatcher : public DescriptorMatcher 965 { 966 public: 967 /** @brief Brute-force matcher constructor. 968 969 @param normType One of NORM_L1, NORM_L2, NORM_HAMMING, NORM_HAMMING2. L1 and L2 norms are 970 preferable choices for SIFT and SURF descriptors, NORM_HAMMING should be used with ORB, BRISK and 971 BRIEF, NORM_HAMMING2 should be used with ORB when WTA_K==3 or 4 (see ORB::ORB constructor 972 description). 973 @param crossCheck If it is false, this is will be default BFMatcher behaviour when it finds the k 974 nearest neighbors for each query descriptor. If crossCheck==true, then the knnMatch() method with 975 k=1 will only return pairs (i,j) such that for i-th query descriptor the j-th descriptor in the 976 matcher's collection is the nearest and vice versa, i.e. the BFMatcher will only return consistent 977 pairs. Such technique usually produces best results with minimal number of outliers when there are 978 enough matches. This is alternative to the ratio test, used by D. Lowe in SIFT paper. 979 */ 980 CV_WRAP BFMatcher( int normType=NORM_L2, bool crossCheck=false ); ~BFMatcher()981 virtual ~BFMatcher() {} 982 isMaskSupported() const983 virtual bool isMaskSupported() const { return true; } 984 985 virtual Ptr<DescriptorMatcher> clone( bool emptyTrainData=false ) const; 986 protected: 987 virtual void knnMatchImpl( InputArray queryDescriptors, std::vector<std::vector<DMatch> >& matches, int k, 988 InputArrayOfArrays masks=noArray(), bool compactResult=false ); 989 virtual void radiusMatchImpl( InputArray queryDescriptors, std::vector<std::vector<DMatch> >& matches, float maxDistance, 990 InputArrayOfArrays masks=noArray(), bool compactResult=false ); 991 992 int normType; 993 bool crossCheck; 994 }; 995 996 997 /** @brief Flann-based descriptor matcher. 998 999 This matcher trains flann::Index_ on a train descriptor collection and calls its nearest search 1000 methods to find the best matches. So, this matcher may be faster when matching a large train 1001 collection than the brute force matcher. FlannBasedMatcher does not support masking permissible 1002 matches of descriptor sets because flann::Index does not support this. : 1003 */ 1004 class CV_EXPORTS_W FlannBasedMatcher : public DescriptorMatcher 1005 { 1006 public: 1007 CV_WRAP FlannBasedMatcher( const Ptr<flann::IndexParams>& indexParams=makePtr<flann::KDTreeIndexParams>(), 1008 const Ptr<flann::SearchParams>& searchParams=makePtr<flann::SearchParams>() ); 1009 1010 virtual void add( InputArrayOfArrays descriptors ); 1011 virtual void clear(); 1012 1013 // Reads matcher object from a file node 1014 virtual void read( const FileNode& ); 1015 // Writes matcher object to a file storage 1016 virtual void write( FileStorage& ) const; 1017 1018 virtual void train(); 1019 virtual bool isMaskSupported() const; 1020 1021 virtual Ptr<DescriptorMatcher> clone( bool emptyTrainData=false ) const; 1022 protected: 1023 static void convertToDMatches( const DescriptorCollection& descriptors, 1024 const Mat& indices, const Mat& distances, 1025 std::vector<std::vector<DMatch> >& matches ); 1026 1027 virtual void knnMatchImpl( InputArray queryDescriptors, std::vector<std::vector<DMatch> >& matches, int k, 1028 InputArrayOfArrays masks=noArray(), bool compactResult=false ); 1029 virtual void radiusMatchImpl( InputArray queryDescriptors, std::vector<std::vector<DMatch> >& matches, float maxDistance, 1030 InputArrayOfArrays masks=noArray(), bool compactResult=false ); 1031 1032 Ptr<flann::IndexParams> indexParams; 1033 Ptr<flann::SearchParams> searchParams; 1034 Ptr<flann::Index> flannIndex; 1035 1036 DescriptorCollection mergedDescriptors; 1037 int addedDescCount; 1038 }; 1039 1040 //! @} features2d_match 1041 1042 /****************************************************************************************\ 1043 * Drawing functions * 1044 \****************************************************************************************/ 1045 1046 //! @addtogroup features2d_draw 1047 //! @{ 1048 1049 struct CV_EXPORTS DrawMatchesFlags 1050 { 1051 enum{ DEFAULT = 0, //!< Output image matrix will be created (Mat::create), 1052 //!< i.e. existing memory of output image may be reused. 1053 //!< Two source image, matches and single keypoints will be drawn. 1054 //!< For each keypoint only the center point will be drawn (without 1055 //!< the circle around keypoint with keypoint size and orientation). 1056 DRAW_OVER_OUTIMG = 1, //!< Output image matrix will not be created (Mat::create). 1057 //!< Matches will be drawn on existing content of output image. 1058 NOT_DRAW_SINGLE_POINTS = 2, //!< Single keypoints will not be drawn. 1059 DRAW_RICH_KEYPOINTS = 4 //!< For each keypoint the circle around keypoint with keypoint size and 1060 //!< orientation will be drawn. 1061 }; 1062 }; 1063 1064 /** @brief Draws keypoints. 1065 1066 @param image Source image. 1067 @param keypoints Keypoints from the source image. 1068 @param outImage Output image. Its content depends on the flags value defining what is drawn in the 1069 output image. See possible flags bit values below. 1070 @param color Color of keypoints. 1071 @param flags Flags setting drawing features. Possible flags bit values are defined by 1072 DrawMatchesFlags. See details above in drawMatches . 1073 1074 @note 1075 For Python API, flags are modified as cv2.DRAW_MATCHES_FLAGS_DEFAULT, 1076 cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS, cv2.DRAW_MATCHES_FLAGS_DRAW_OVER_OUTIMG, 1077 cv2.DRAW_MATCHES_FLAGS_NOT_DRAW_SINGLE_POINTS 1078 */ 1079 CV_EXPORTS_W void drawKeypoints( InputArray image, const std::vector<KeyPoint>& keypoints, InputOutputArray outImage, 1080 const Scalar& color=Scalar::all(-1), int flags=DrawMatchesFlags::DEFAULT ); 1081 1082 /** @brief Draws the found matches of keypoints from two images. 1083 1084 @param img1 First source image. 1085 @param keypoints1 Keypoints from the first source image. 1086 @param img2 Second source image. 1087 @param keypoints2 Keypoints from the second source image. 1088 @param matches1to2 Matches from the first image to the second one, which means that keypoints1[i] 1089 has a corresponding point in keypoints2[matches[i]] . 1090 @param outImg Output image. Its content depends on the flags value defining what is drawn in the 1091 output image. See possible flags bit values below. 1092 @param matchColor Color of matches (lines and connected keypoints). If matchColor==Scalar::all(-1) 1093 , the color is generated randomly. 1094 @param singlePointColor Color of single keypoints (circles), which means that keypoints do not 1095 have the matches. If singlePointColor==Scalar::all(-1) , the color is generated randomly. 1096 @param matchesMask Mask determining which matches are drawn. If the mask is empty, all matches are 1097 drawn. 1098 @param flags Flags setting drawing features. Possible flags bit values are defined by 1099 DrawMatchesFlags. 1100 1101 This function draws matches of keypoints from two images in the output image. Match is a line 1102 connecting two keypoints (circles). See cv::DrawMatchesFlags. 1103 */ 1104 CV_EXPORTS_W void drawMatches( InputArray img1, const std::vector<KeyPoint>& keypoints1, 1105 InputArray img2, const std::vector<KeyPoint>& keypoints2, 1106 const std::vector<DMatch>& matches1to2, InputOutputArray outImg, 1107 const Scalar& matchColor=Scalar::all(-1), const Scalar& singlePointColor=Scalar::all(-1), 1108 const std::vector<char>& matchesMask=std::vector<char>(), int flags=DrawMatchesFlags::DEFAULT ); 1109 1110 /** @overload */ 1111 CV_EXPORTS_AS(drawMatchesKnn) void drawMatches( InputArray img1, const std::vector<KeyPoint>& keypoints1, 1112 InputArray img2, const std::vector<KeyPoint>& keypoints2, 1113 const std::vector<std::vector<DMatch> >& matches1to2, InputOutputArray outImg, 1114 const Scalar& matchColor=Scalar::all(-1), const Scalar& singlePointColor=Scalar::all(-1), 1115 const std::vector<std::vector<char> >& matchesMask=std::vector<std::vector<char> >(), int flags=DrawMatchesFlags::DEFAULT ); 1116 1117 //! @} features2d_draw 1118 1119 /****************************************************************************************\ 1120 * Functions to evaluate the feature detectors and [generic] descriptor extractors * 1121 \****************************************************************************************/ 1122 1123 CV_EXPORTS void evaluateFeatureDetector( const Mat& img1, const Mat& img2, const Mat& H1to2, 1124 std::vector<KeyPoint>* keypoints1, std::vector<KeyPoint>* keypoints2, 1125 float& repeatability, int& correspCount, 1126 const Ptr<FeatureDetector>& fdetector=Ptr<FeatureDetector>() ); 1127 1128 CV_EXPORTS void computeRecallPrecisionCurve( const std::vector<std::vector<DMatch> >& matches1to2, 1129 const std::vector<std::vector<uchar> >& correctMatches1to2Mask, 1130 std::vector<Point2f>& recallPrecisionCurve ); 1131 1132 CV_EXPORTS float getRecall( const std::vector<Point2f>& recallPrecisionCurve, float l_precision ); 1133 CV_EXPORTS int getNearestPoint( const std::vector<Point2f>& recallPrecisionCurve, float l_precision ); 1134 1135 /****************************************************************************************\ 1136 * Bag of visual words * 1137 \****************************************************************************************/ 1138 1139 //! @addtogroup features2d_category 1140 //! @{ 1141 1142 /** @brief Abstract base class for training the *bag of visual words* vocabulary from a set of descriptors. 1143 1144 For details, see, for example, *Visual Categorization with Bags of Keypoints* by Gabriella Csurka, 1145 Christopher R. Dance, Lixin Fan, Jutta Willamowski, Cedric Bray, 2004. : 1146 */ 1147 class CV_EXPORTS_W BOWTrainer 1148 { 1149 public: 1150 BOWTrainer(); 1151 virtual ~BOWTrainer(); 1152 1153 /** @brief Adds descriptors to a training set. 1154 1155 @param descriptors Descriptors to add to a training set. Each row of the descriptors matrix is a 1156 descriptor. 1157 1158 The training set is clustered using clustermethod to construct the vocabulary. 1159 */ 1160 CV_WRAP void add( const Mat& descriptors ); 1161 1162 /** @brief Returns a training set of descriptors. 1163 */ 1164 CV_WRAP const std::vector<Mat>& getDescriptors() const; 1165 1166 /** @brief Returns the count of all descriptors stored in the training set. 1167 */ 1168 CV_WRAP int descriptorsCount() const; 1169 1170 CV_WRAP virtual void clear(); 1171 1172 /** @overload */ 1173 CV_WRAP virtual Mat cluster() const = 0; 1174 1175 /** @brief Clusters train descriptors. 1176 1177 @param descriptors Descriptors to cluster. Each row of the descriptors matrix is a descriptor. 1178 Descriptors are not added to the inner train descriptor set. 1179 1180 The vocabulary consists of cluster centers. So, this method returns the vocabulary. In the first 1181 variant of the method, train descriptors stored in the object are clustered. In the second variant, 1182 input descriptors are clustered. 1183 */ 1184 CV_WRAP virtual Mat cluster( const Mat& descriptors ) const = 0; 1185 1186 protected: 1187 std::vector<Mat> descriptors; 1188 int size; 1189 }; 1190 1191 /** @brief kmeans -based class to train visual vocabulary using the *bag of visual words* approach. : 1192 */ 1193 class CV_EXPORTS_W BOWKMeansTrainer : public BOWTrainer 1194 { 1195 public: 1196 /** @brief The constructor. 1197 1198 @see cv::kmeans 1199 */ 1200 CV_WRAP BOWKMeansTrainer( int clusterCount, const TermCriteria& termcrit=TermCriteria(), 1201 int attempts=3, int flags=KMEANS_PP_CENTERS ); 1202 virtual ~BOWKMeansTrainer(); 1203 1204 // Returns trained vocabulary (i.e. cluster centers). 1205 CV_WRAP virtual Mat cluster() const; 1206 CV_WRAP virtual Mat cluster( const Mat& descriptors ) const; 1207 1208 protected: 1209 1210 int clusterCount; 1211 TermCriteria termcrit; 1212 int attempts; 1213 int flags; 1214 }; 1215 1216 /** @brief Class to compute an image descriptor using the *bag of visual words*. 1217 1218 Such a computation consists of the following steps: 1219 1220 1. Compute descriptors for a given image and its keypoints set. 1221 2. Find the nearest visual words from the vocabulary for each keypoint descriptor. 1222 3. Compute the bag-of-words image descriptor as is a normalized histogram of vocabulary words 1223 encountered in the image. The i-th bin of the histogram is a frequency of i-th word of the 1224 vocabulary in the given image. 1225 */ 1226 class CV_EXPORTS_W BOWImgDescriptorExtractor 1227 { 1228 public: 1229 /** @brief The constructor. 1230 1231 @param dextractor Descriptor extractor that is used to compute descriptors for an input image and 1232 its keypoints. 1233 @param dmatcher Descriptor matcher that is used to find the nearest word of the trained vocabulary 1234 for each keypoint descriptor of the image. 1235 */ 1236 CV_WRAP BOWImgDescriptorExtractor( const Ptr<DescriptorExtractor>& dextractor, 1237 const Ptr<DescriptorMatcher>& dmatcher ); 1238 /** @overload */ 1239 BOWImgDescriptorExtractor( const Ptr<DescriptorMatcher>& dmatcher ); 1240 virtual ~BOWImgDescriptorExtractor(); 1241 1242 /** @brief Sets a visual vocabulary. 1243 1244 @param vocabulary Vocabulary (can be trained using the inheritor of BOWTrainer ). Each row of the 1245 vocabulary is a visual word (cluster center). 1246 */ 1247 CV_WRAP void setVocabulary( const Mat& vocabulary ); 1248 1249 /** @brief Returns the set vocabulary. 1250 */ 1251 CV_WRAP const Mat& getVocabulary() const; 1252 1253 /** @brief Computes an image descriptor using the set visual vocabulary. 1254 1255 @param image Image, for which the descriptor is computed. 1256 @param keypoints Keypoints detected in the input image. 1257 @param imgDescriptor Computed output image descriptor. 1258 @param pointIdxsOfClusters Indices of keypoints that belong to the cluster. This means that 1259 pointIdxsOfClusters[i] are keypoint indices that belong to the i -th cluster (word of vocabulary) 1260 returned if it is non-zero. 1261 @param descriptors Descriptors of the image keypoints that are returned if they are non-zero. 1262 */ 1263 void compute( InputArray image, std::vector<KeyPoint>& keypoints, OutputArray imgDescriptor, 1264 std::vector<std::vector<int> >* pointIdxsOfClusters=0, Mat* descriptors=0 ); 1265 /** @overload 1266 @param keypointDescriptors Computed descriptors to match with vocabulary. 1267 @param imgDescriptor Computed output image descriptor. 1268 @param pointIdxsOfClusters Indices of keypoints that belong to the cluster. This means that 1269 pointIdxsOfClusters[i] are keypoint indices that belong to the i -th cluster (word of vocabulary) 1270 returned if it is non-zero. 1271 */ 1272 void compute( InputArray keypointDescriptors, OutputArray imgDescriptor, 1273 std::vector<std::vector<int> >* pointIdxsOfClusters=0 ); 1274 // compute() is not constant because DescriptorMatcher::match is not constant 1275 CV_WRAP_AS(compute)1276 CV_WRAP_AS(compute) void compute2( const Mat& image, std::vector<KeyPoint>& keypoints, CV_OUT Mat& imgDescriptor ) 1277 { compute(image,keypoints,imgDescriptor); } 1278 1279 /** @brief Returns an image descriptor size if the vocabulary is set. Otherwise, it returns 0. 1280 */ 1281 CV_WRAP int descriptorSize() const; 1282 1283 /** @brief Returns an image descriptor type. 1284 */ 1285 CV_WRAP int descriptorType() const; 1286 1287 protected: 1288 Mat vocabulary; 1289 Ptr<DescriptorExtractor> dextractor; 1290 Ptr<DescriptorMatcher> dmatcher; 1291 }; 1292 1293 //! @} features2d_category 1294 1295 //! @} features2d 1296 1297 } /* namespace cv */ 1298 1299 #endif 1300