github.com/kaydxh/golang@v0.0.131/pkg/gocv/cgo/third_path/opencv4/include/opencv2/features2d.hpp (about)

     1  /*M///////////////////////////////////////////////////////////////////////////////////////
     2  //
     3  //  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
     4  //
     5  //  By downloading, copying, installing or using the software you agree to this license.
     6  //  If you do not agree to this license, do not download, install,
     7  //  copy or use the software.
     8  //
     9  //
    10  //                           License Agreement
    11  //                For Open Source Computer Vision Library
    12  //
    13  // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
    14  // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
    15  // Third party copyrights are property of their respective owners.
    16  //
    17  // Redistribution and use in source and binary forms, with or without modification,
    18  // are permitted provided that the following conditions are met:
    19  //
    20  //   * Redistribution's of source code must retain the above copyright notice,
    21  //     this list of conditions and the following disclaimer.
    22  //
    23  //   * Redistribution's in binary form must reproduce the above copyright notice,
    24  //     this list of conditions and the following disclaimer in the documentation
    25  //     and/or other materials provided with the distribution.
    26  //
    27  //   * The name of the copyright holders may not be used to endorse or promote products
    28  //     derived from this software without specific prior written permission.
    29  //
    30  // This software is provided by the copyright holders and contributors "as is" and
    31  // any express or implied warranties, including, but not limited to, the implied
    32  // warranties of merchantability and fitness for a particular purpose are disclaimed.
    33  // In no event shall the Intel Corporation or contributors be liable for any direct,
    34  // indirect, incidental, special, exemplary, or consequential damages
    35  // (including, but not limited to, procurement of substitute goods or services;
    36  // loss of use, data, or profits; or business interruption) however caused
    37  // and on any theory of liability, whether in contract, strict liability,
    38  // or tort (including negligence or otherwise) arising in any way out of
    39  // the use of this software, even if advised of the possibility of such damage.
    40  //
    41  //M*/
    42  
    43  #ifndef OPENCV_FEATURES_2D_HPP
    44  #define OPENCV_FEATURES_2D_HPP
    45  
    46  #include "opencv2/opencv_modules.hpp"
    47  #include "opencv2/core.hpp"
    48  
    49  #ifdef HAVE_OPENCV_FLANN
    50  #include "opencv2/flann/miniflann.hpp"
    51  #endif
    52  
    53  /**
    54    @defgroup features2d 2D Features Framework
    55    @{
    56      @defgroup features2d_main Feature Detection and Description
    57      @defgroup features2d_match Descriptor Matchers
    58  
    59  Matchers of keypoint descriptors in OpenCV have wrappers with a common interface that enables you to
    60  easily switch between different algorithms solving the same problem. This section is devoted to
    61  matching descriptors that are represented as vectors in a multidimensional space. All objects that
    62  implement vector descriptor matchers inherit the DescriptorMatcher interface.
    63  
    64      @defgroup features2d_draw Drawing Function of Keypoints and Matches
    65      @defgroup features2d_category Object Categorization
    66  
    67  This section describes approaches based on local 2D features and used to categorize objects.
    68  
    69      @defgroup feature2d_hal Hardware Acceleration Layer
    70      @{
    71          @defgroup features2d_hal_interface Interface
    72      @}
    73    @}
    74   */
    75  
    76  namespace cv
    77  {
    78  
    79  //! @addtogroup features2d_main
    80  //! @{
    81  
    82  // //! writes vector of keypoints to the file storage
    83  // CV_EXPORTS void write(FileStorage& fs, const String& name, const std::vector<KeyPoint>& keypoints);
    84  // //! reads vector of keypoints from the specified file storage node
    85  // CV_EXPORTS void read(const FileNode& node, CV_OUT std::vector<KeyPoint>& keypoints);
    86  
    87  /** @brief A class filters a vector of keypoints.
    88  
    89   Because now it is difficult to provide a convenient interface for all usage scenarios of the
    90   keypoints filter class, it has only several needed by now static methods.
    91   */
    92  class CV_EXPORTS KeyPointsFilter
    93  {
    94  public:
    95      KeyPointsFilter(){}
    96  
    97      /*
    98       * Remove keypoints within borderPixels of an image edge.
    99       */
   100      static void runByImageBorder( std::vector<KeyPoint>& keypoints, Size imageSize, int borderSize );
   101      /*
   102       * Remove keypoints of sizes out of range.
   103       */
   104      static void runByKeypointSize( std::vector<KeyPoint>& keypoints, float minSize,
   105                                     float maxSize=FLT_MAX );
   106      /*
   107       * Remove keypoints from some image by mask for pixels of this image.
   108       */
   109      static void runByPixelsMask( std::vector<KeyPoint>& keypoints, const Mat& mask );
   110      /*
   111       * Remove duplicated keypoints.
   112       */
   113      static void removeDuplicated( std::vector<KeyPoint>& keypoints );
   114      /*
   115       * Remove duplicated keypoints and sort the remaining keypoints
   116       */
   117      static void removeDuplicatedSorted( std::vector<KeyPoint>& keypoints );
   118  
   119      /*
   120       * Retain the specified number of the best keypoints (according to the response)
   121       */
   122      static void retainBest( std::vector<KeyPoint>& keypoints, int npoints );
   123  };
   124  
   125  
   126  /************************************ Base Classes ************************************/
   127  
   128  /** @brief Abstract base class for 2D image feature detectors and descriptor extractors
   129  */
   130  #ifdef __EMSCRIPTEN__
   131  class CV_EXPORTS_W Feature2D : public Algorithm
   132  #else
   133  class CV_EXPORTS_W Feature2D : public virtual Algorithm
   134  #endif
   135  {
   136  public:
   137      virtual ~Feature2D();
   138  
   139      /** @brief Detects keypoints in an image (first variant) or image set (second variant).
   140  
   141      @param image Image.
   142      @param keypoints The detected keypoints. In the second variant of the method keypoints[i] is a set
   143      of keypoints detected in images[i] .
   144      @param mask Mask specifying where to look for keypoints (optional). It must be a 8-bit integer
   145      matrix with non-zero values in the region of interest.
   146       */
   147      CV_WRAP virtual void detect( InputArray image,
   148                                   CV_OUT std::vector<KeyPoint>& keypoints,
   149                                   InputArray mask=noArray() );
   150  
   151      /** @overload
   152      @param images Image set.
   153      @param keypoints The detected keypoints. In the second variant of the method keypoints[i] is a set
   154      of keypoints detected in images[i] .
   155      @param masks Masks for each input image specifying where to look for keypoints (optional).
   156      masks[i] is a mask for images[i].
   157      */
   158      CV_WRAP virtual void detect( InputArrayOfArrays images,
   159                           CV_OUT std::vector<std::vector<KeyPoint> >& keypoints,
   160                           InputArrayOfArrays masks=noArray() );
   161  
   162      /** @brief Computes the descriptors for a set of keypoints detected in an image (first variant) or image set
   163      (second variant).
   164  
   165      @param image Image.
   166      @param keypoints Input collection of keypoints. Keypoints for which a descriptor cannot be
   167      computed are removed. Sometimes new keypoints can be added, for example: SIFT duplicates keypoint
   168      with several dominant orientations (for each orientation).
   169      @param descriptors Computed descriptors. In the second variant of the method descriptors[i] are
   170      descriptors computed for a keypoints[i]. Row j is the keypoints (or keypoints[i]) is the
   171      descriptor for keypoint j-th keypoint.
   172       */
   173      CV_WRAP virtual void compute( InputArray image,
   174                                    CV_OUT CV_IN_OUT std::vector<KeyPoint>& keypoints,
   175                                    OutputArray descriptors );
   176  
   177      /** @overload
   178  
   179      @param images Image set.
   180      @param keypoints Input collection of keypoints. Keypoints for which a descriptor cannot be
   181      computed are removed. Sometimes new keypoints can be added, for example: SIFT duplicates keypoint
   182      with several dominant orientations (for each orientation).
   183      @param descriptors Computed descriptors. In the second variant of the method descriptors[i] are
   184      descriptors computed for a keypoints[i]. Row j is the keypoints (or keypoints[i]) is the
   185      descriptor for keypoint j-th keypoint.
   186      */
   187      CV_WRAP virtual void compute( InputArrayOfArrays images,
   188                            CV_OUT CV_IN_OUT std::vector<std::vector<KeyPoint> >& keypoints,
   189                            OutputArrayOfArrays descriptors );
   190  
   191      /** Detects keypoints and computes the descriptors */
   192      CV_WRAP virtual void detectAndCompute( InputArray image, InputArray mask,
   193                                             CV_OUT std::vector<KeyPoint>& keypoints,
   194                                             OutputArray descriptors,
   195                                             bool useProvidedKeypoints=false );
   196  
   197      CV_WRAP virtual int descriptorSize() const;
   198      CV_WRAP virtual int descriptorType() const;
   199      CV_WRAP virtual int defaultNorm() const;
   200  
   201      CV_WRAP void write( const String& fileName ) const;
   202  
   203      CV_WRAP void read( const String& fileName );
   204  
   205      virtual void write( FileStorage&) const CV_OVERRIDE;
   206  
   207      // see corresponding cv::Algorithm method
   208      CV_WRAP virtual void read( const FileNode&) CV_OVERRIDE;
   209  
   210      //! Return true if detector object is empty
   211      CV_WRAP virtual bool empty() const CV_OVERRIDE;
   212      CV_WRAP virtual String getDefaultName() const CV_OVERRIDE;
   213  
   214      // see corresponding cv::Algorithm method
   215      CV_WRAP inline void write(const Ptr<FileStorage>& fs, const String& name = String()) const { Algorithm::write(fs, name); }
   216  };
   217  
   218  /** Feature detectors in OpenCV have wrappers with a common interface that enables you to easily switch
   219  between different algorithms solving the same problem. All objects that implement keypoint detectors
   220  inherit the FeatureDetector interface. */
   221  typedef Feature2D FeatureDetector;
   222  
   223  /** Extractors of keypoint descriptors in OpenCV have wrappers with a common interface that enables you
   224  to easily switch between different algorithms solving the same problem. This section is devoted to
   225  computing descriptors represented as vectors in a multidimensional space. All objects that implement
   226  the vector descriptor extractors inherit the DescriptorExtractor interface.
   227   */
   228  typedef Feature2D DescriptorExtractor;
   229  
   230  
   231  /** @brief Class for implementing the wrapper which makes detectors and extractors to be affine invariant,
   232  described as ASIFT in @cite YM11 .
   233  */
   234  class CV_EXPORTS_W AffineFeature : public Feature2D
   235  {
   236  public:
   237      /**
   238      @param backend The detector/extractor you want to use as backend.
   239      @param maxTilt The highest power index of tilt factor. 5 is used in the paper as tilt sampling range n.
   240      @param minTilt The lowest power index of tilt factor. 0 is used in the paper.
   241      @param tiltStep Tilt sampling step \f$\delta_t\f$ in Algorithm 1 in the paper.
   242      @param rotateStepBase Rotation sampling step factor b in Algorithm 1 in the paper.
   243      */
   244      CV_WRAP static Ptr<AffineFeature> create(const Ptr<Feature2D>& backend,
   245          int maxTilt = 5, int minTilt = 0, float tiltStep = 1.4142135623730951f, float rotateStepBase = 72);
   246  
   247      CV_WRAP virtual void setViewParams(const std::vector<float>& tilts, const std::vector<float>& rolls) = 0;
   248      CV_WRAP virtual void getViewParams(std::vector<float>& tilts, std::vector<float>& rolls) const = 0;
   249      CV_WRAP virtual String getDefaultName() const CV_OVERRIDE;
   250  };
   251  
   252  typedef AffineFeature AffineFeatureDetector;
   253  typedef AffineFeature AffineDescriptorExtractor;
   254  
   255  
   256  /** @brief Class for extracting keypoints and computing descriptors using the Scale Invariant Feature Transform
   257  (SIFT) algorithm by D. Lowe @cite Lowe04 .
   258  */
   259  class CV_EXPORTS_W SIFT : public Feature2D
   260  {
   261  public:
   262      /**
   263      @param nfeatures The number of best features to retain. The features are ranked by their scores
   264      (measured in SIFT algorithm as the local contrast)
   265  
   266      @param nOctaveLayers The number of layers in each octave. 3 is the value used in D. Lowe paper. The
   267      number of octaves is computed automatically from the image resolution.
   268  
   269      @param contrastThreshold The contrast threshold used to filter out weak features in semi-uniform
   270      (low-contrast) regions. The larger the threshold, the less features are produced by the detector.
   271  
   272      @note The contrast threshold will be divided by nOctaveLayers when the filtering is applied. When
   273      nOctaveLayers is set to default and if you want to use the value used in D. Lowe paper, 0.03, set
   274      this argument to 0.09.
   275  
   276      @param edgeThreshold The threshold used to filter out edge-like features. Note that the its meaning
   277      is different from the contrastThreshold, i.e. the larger the edgeThreshold, the less features are
   278      filtered out (more features are retained).
   279  
   280      @param sigma The sigma of the Gaussian applied to the input image at the octave \#0. If your image
   281      is captured with a weak camera with soft lenses, you might want to reduce the number.
   282      */
   283      CV_WRAP static Ptr<SIFT> create(int nfeatures = 0, int nOctaveLayers = 3,
   284          double contrastThreshold = 0.04, double edgeThreshold = 10,
   285          double sigma = 1.6);
   286  
   287      /** @brief Create SIFT with specified descriptorType.
   288      @param nfeatures The number of best features to retain. The features are ranked by their scores
   289      (measured in SIFT algorithm as the local contrast)
   290  
   291      @param nOctaveLayers The number of layers in each octave. 3 is the value used in D. Lowe paper. The
   292      number of octaves is computed automatically from the image resolution.
   293  
   294      @param contrastThreshold The contrast threshold used to filter out weak features in semi-uniform
   295      (low-contrast) regions. The larger the threshold, the less features are produced by the detector.
   296  
   297      @note The contrast threshold will be divided by nOctaveLayers when the filtering is applied. When
   298      nOctaveLayers is set to default and if you want to use the value used in D. Lowe paper, 0.03, set
   299      this argument to 0.09.
   300  
   301      @param edgeThreshold The threshold used to filter out edge-like features. Note that the its meaning
   302      is different from the contrastThreshold, i.e. the larger the edgeThreshold, the less features are
   303      filtered out (more features are retained).
   304  
   305      @param sigma The sigma of the Gaussian applied to the input image at the octave \#0. If your image
   306      is captured with a weak camera with soft lenses, you might want to reduce the number.
   307  
   308      @param descriptorType The type of descriptors. Only CV_32F and CV_8U are supported.
   309      */
   310      CV_WRAP static Ptr<SIFT> create(int nfeatures, int nOctaveLayers,
   311          double contrastThreshold, double edgeThreshold,
   312          double sigma, int descriptorType);
   313  
   314      CV_WRAP virtual String getDefaultName() const CV_OVERRIDE;
   315  };
   316  
   317  typedef SIFT SiftFeatureDetector;
   318  typedef SIFT SiftDescriptorExtractor;
   319  
   320  
   321  /** @brief Class implementing the BRISK keypoint detector and descriptor extractor, described in @cite LCS11 .
   322   */
   323  class CV_EXPORTS_W BRISK : public Feature2D
   324  {
   325  public:
   326      /** @brief The BRISK constructor
   327  
   328      @param thresh AGAST detection threshold score.
   329      @param octaves detection octaves. Use 0 to do single scale.
   330      @param patternScale apply this scale to the pattern used for sampling the neighbourhood of a
   331      keypoint.
   332       */
   333      CV_WRAP static Ptr<BRISK> create(int thresh=30, int octaves=3, float patternScale=1.0f);
   334  
   335      /** @brief The BRISK constructor for a custom pattern
   336  
   337      @param radiusList defines the radii (in pixels) where the samples around a keypoint are taken (for
   338      keypoint scale 1).
   339      @param numberList defines the number of sampling points on the sampling circle. Must be the same
   340      size as radiusList..
   341      @param dMax threshold for the short pairings used for descriptor formation (in pixels for keypoint
   342      scale 1).
   343      @param dMin threshold for the long pairings used for orientation determination (in pixels for
   344      keypoint scale 1).
   345      @param indexChange index remapping of the bits. */
   346      CV_WRAP static Ptr<BRISK> create(const std::vector<float> &radiusList, const std::vector<int> &numberList,
   347          float dMax=5.85f, float dMin=8.2f, const std::vector<int>& indexChange=std::vector<int>());
   348  
   349      /** @brief The BRISK constructor for a custom pattern, detection threshold and octaves
   350  
   351      @param thresh AGAST detection threshold score.
   352      @param octaves detection octaves. Use 0 to do single scale.
   353      @param radiusList defines the radii (in pixels) where the samples around a keypoint are taken (for
   354      keypoint scale 1).
   355      @param numberList defines the number of sampling points on the sampling circle. Must be the same
   356      size as radiusList..
   357      @param dMax threshold for the short pairings used for descriptor formation (in pixels for keypoint
   358      scale 1).
   359      @param dMin threshold for the long pairings used for orientation determination (in pixels for
   360      keypoint scale 1).
   361      @param indexChange index remapping of the bits. */
   362      CV_WRAP static Ptr<BRISK> create(int thresh, int octaves, const std::vector<float> &radiusList,
   363          const std::vector<int> &numberList, float dMax=5.85f, float dMin=8.2f,
   364          const std::vector<int>& indexChange=std::vector<int>());
   365      CV_WRAP virtual String getDefaultName() const CV_OVERRIDE;
   366  
   367      /** @brief Set detection threshold.
   368      @param threshold AGAST detection threshold score.
   369      */
   370      CV_WRAP virtual void setThreshold(int threshold) { CV_UNUSED(threshold); return; }
   371      CV_WRAP virtual int getThreshold() const { return -1; }
   372  
   373      /** @brief Set detection octaves.
   374      @param octaves detection octaves. Use 0 to do single scale.
   375      */
   376      CV_WRAP virtual void setOctaves(int octaves) { CV_UNUSED(octaves); return; }
   377      CV_WRAP virtual int getOctaves() const { return -1; }
   378  };
   379  
   380  /** @brief Class implementing the ORB (*oriented BRIEF*) keypoint detector and descriptor extractor
   381  
   382  described in @cite RRKB11 . The algorithm uses FAST in pyramids to detect stable keypoints, selects
   383  the strongest features using FAST or Harris response, finds their orientation using first-order
   384  moments and computes the descriptors using BRIEF (where the coordinates of random point pairs (or
   385  k-tuples) are rotated according to the measured orientation).
   386   */
   387  class CV_EXPORTS_W ORB : public Feature2D
   388  {
   389  public:
   390      enum ScoreType { HARRIS_SCORE=0, FAST_SCORE=1 };
   391      static const int kBytes = 32;
   392  
   393      /** @brief The ORB constructor
   394  
   395      @param nfeatures The maximum number of features to retain.
   396      @param scaleFactor Pyramid decimation ratio, greater than 1. scaleFactor==2 means the classical
   397      pyramid, where each next level has 4x less pixels than the previous, but such a big scale factor
   398      will degrade feature matching scores dramatically. On the other hand, too close to 1 scale factor
   399      will mean that to cover certain scale range you will need more pyramid levels and so the speed
   400      will suffer.
   401      @param nlevels The number of pyramid levels. The smallest level will have linear size equal to
   402      input_image_linear_size/pow(scaleFactor, nlevels - firstLevel).
   403      @param edgeThreshold This is size of the border where the features are not detected. It should
   404      roughly match the patchSize parameter.
   405      @param firstLevel The level of pyramid to put source image to. Previous layers are filled
   406      with upscaled source image.
   407      @param WTA_K The number of points that produce each element of the oriented BRIEF descriptor. The
   408      default value 2 means the BRIEF where we take a random point pair and compare their brightnesses,
   409      so we get 0/1 response. Other possible values are 3 and 4. For example, 3 means that we take 3
   410      random points (of course, those point coordinates are random, but they are generated from the
   411      pre-defined seed, so each element of BRIEF descriptor is computed deterministically from the pixel
   412      rectangle), find point of maximum brightness and output index of the winner (0, 1 or 2). Such
   413      output will occupy 2 bits, and therefore it will need a special variant of Hamming distance,
   414      denoted as NORM_HAMMING2 (2 bits per bin). When WTA_K=4, we take 4 random points to compute each
   415      bin (that will also occupy 2 bits with possible values 0, 1, 2 or 3).
   416      @param scoreType The default HARRIS_SCORE means that Harris algorithm is used to rank features
   417      (the score is written to KeyPoint::score and is used to retain best nfeatures features);
   418      FAST_SCORE is alternative value of the parameter that produces slightly less stable keypoints,
   419      but it is a little faster to compute.
   420      @param patchSize size of the patch used by the oriented BRIEF descriptor. Of course, on smaller
   421      pyramid layers the perceived image area covered by a feature will be larger.
   422      @param fastThreshold the fast threshold
   423       */
   424      CV_WRAP static Ptr<ORB> create(int nfeatures=500, float scaleFactor=1.2f, int nlevels=8, int edgeThreshold=31,
   425          int firstLevel=0, int WTA_K=2, ORB::ScoreType scoreType=ORB::HARRIS_SCORE, int patchSize=31, int fastThreshold=20);
   426  
   427      CV_WRAP virtual void setMaxFeatures(int maxFeatures) = 0;
   428      CV_WRAP virtual int getMaxFeatures() const = 0;
   429  
   430      CV_WRAP virtual void setScaleFactor(double scaleFactor) = 0;
   431      CV_WRAP virtual double getScaleFactor() const = 0;
   432  
   433      CV_WRAP virtual void setNLevels(int nlevels) = 0;
   434      CV_WRAP virtual int getNLevels() const = 0;
   435  
   436      CV_WRAP virtual void setEdgeThreshold(int edgeThreshold) = 0;
   437      CV_WRAP virtual int getEdgeThreshold() const = 0;
   438  
   439      CV_WRAP virtual void setFirstLevel(int firstLevel) = 0;
   440      CV_WRAP virtual int getFirstLevel() const = 0;
   441  
   442      CV_WRAP virtual void setWTA_K(int wta_k) = 0;
   443      CV_WRAP virtual int getWTA_K() const = 0;
   444  
   445      CV_WRAP virtual void setScoreType(ORB::ScoreType scoreType) = 0;
   446      CV_WRAP virtual ORB::ScoreType getScoreType() const = 0;
   447  
   448      CV_WRAP virtual void setPatchSize(int patchSize) = 0;
   449      CV_WRAP virtual int getPatchSize() const = 0;
   450  
   451      CV_WRAP virtual void setFastThreshold(int fastThreshold) = 0;
   452      CV_WRAP virtual int getFastThreshold() const = 0;
   453      CV_WRAP virtual String getDefaultName() const CV_OVERRIDE;
   454  };
   455  
   456  /** @brief Maximally stable extremal region extractor
   457  
   458  The class encapsulates all the parameters of the %MSER extraction algorithm (see [wiki
   459  article](http://en.wikipedia.org/wiki/Maximally_stable_extremal_regions)).
   460  
   461  - there are two different implementation of %MSER: one for grey image, one for color image
   462  
   463  - the grey image algorithm is taken from: @cite nister2008linear ;  the paper claims to be faster
   464  than union-find method; it actually get 1.5~2m/s on my centrino L7200 1.2GHz laptop.
   465  
   466  - the color image algorithm is taken from: @cite forssen2007maximally ; it should be much slower
   467  than grey image method ( 3~4 times )
   468  
   469  - (Python) A complete example showing the use of the %MSER detector can be found at samples/python/mser.py
   470  */
   471  class CV_EXPORTS_W MSER : public Feature2D
   472  {
   473  public:
   474      /** @brief Full constructor for %MSER detector
   475  
   476      @param delta it compares \f$(size_{i}-size_{i-delta})/size_{i-delta}\f$
   477      @param min_area prune the area which smaller than minArea
   478      @param max_area prune the area which bigger than maxArea
   479      @param max_variation prune the area have similar size to its children
   480      @param min_diversity for color image, trace back to cut off mser with diversity less than min_diversity
   481      @param max_evolution  for color image, the evolution steps
   482      @param area_threshold for color image, the area threshold to cause re-initialize
   483      @param min_margin for color image, ignore too small margin
   484      @param edge_blur_size for color image, the aperture size for edge blur
   485       */
   486      CV_WRAP static Ptr<MSER> create( int delta=5, int min_area=60, int max_area=14400,
   487            double max_variation=0.25, double min_diversity=.2,
   488            int max_evolution=200, double area_threshold=1.01,
   489            double min_margin=0.003, int edge_blur_size=5 );
   490  
   491      /** @brief Detect %MSER regions
   492  
   493      @param image input image (8UC1, 8UC3 or 8UC4, must be greater or equal than 3x3)
   494      @param msers resulting list of point sets
   495      @param bboxes resulting bounding boxes
   496      */
   497      CV_WRAP virtual void detectRegions( InputArray image,
   498                                          CV_OUT std::vector<std::vector<Point> >& msers,
   499                                          CV_OUT std::vector<Rect>& bboxes ) = 0;
   500  
   501      CV_WRAP virtual void setDelta(int delta) = 0;
   502      CV_WRAP virtual int getDelta() const = 0;
   503  
   504      CV_WRAP virtual void setMinArea(int minArea) = 0;
   505      CV_WRAP virtual int getMinArea() const = 0;
   506  
   507      CV_WRAP virtual void setMaxArea(int maxArea) = 0;
   508      CV_WRAP virtual int getMaxArea() const = 0;
   509  
   510      CV_WRAP virtual void setPass2Only(bool f) = 0;
   511      CV_WRAP virtual bool getPass2Only() const = 0;
   512      CV_WRAP virtual String getDefaultName() const CV_OVERRIDE;
   513  };
   514  
   515  //! @} features2d_main
   516  
   517  //! @addtogroup features2d_main
   518  //! @{
   519  
   520  /** @brief Wrapping class for feature detection using the FAST method. :
   521   */
   522  class CV_EXPORTS_W FastFeatureDetector : public Feature2D
   523  {
   524  public:
   525      enum DetectorType
   526      {
   527          TYPE_5_8 = 0, TYPE_7_12 = 1, TYPE_9_16 = 2
   528      };
   529      enum
   530      {
   531          THRESHOLD = 10000, NONMAX_SUPPRESSION=10001, FAST_N=10002
   532      };
   533  
   534  
   535      CV_WRAP static Ptr<FastFeatureDetector> create( int threshold=10,
   536                                                      bool nonmaxSuppression=true,
   537                                                      FastFeatureDetector::DetectorType type=FastFeatureDetector::TYPE_9_16 );
   538  
   539      CV_WRAP virtual void setThreshold(int threshold) = 0;
   540      CV_WRAP virtual int getThreshold() const = 0;
   541  
   542      CV_WRAP virtual void setNonmaxSuppression(bool f) = 0;
   543      CV_WRAP virtual bool getNonmaxSuppression() const = 0;
   544  
   545      CV_WRAP virtual void setType(FastFeatureDetector::DetectorType type) = 0;
   546      CV_WRAP virtual FastFeatureDetector::DetectorType getType() const = 0;
   547      CV_WRAP virtual String getDefaultName() const CV_OVERRIDE;
   548  };
   549  
   550  /** @overload */
   551  CV_EXPORTS void FAST( InputArray image, CV_OUT std::vector<KeyPoint>& keypoints,
   552                        int threshold, bool nonmaxSuppression=true );
   553  
   554  /** @brief Detects corners using the FAST algorithm
   555  
   556  @param image grayscale image where keypoints (corners) are detected.
   557  @param keypoints keypoints detected on the image.
   558  @param threshold threshold on difference between intensity of the central pixel and pixels of a
   559  circle around this pixel.
   560  @param nonmaxSuppression if true, non-maximum suppression is applied to detected corners
   561  (keypoints).
   562  @param type one of the three neighborhoods as defined in the paper:
   563  FastFeatureDetector::TYPE_9_16, FastFeatureDetector::TYPE_7_12,
   564  FastFeatureDetector::TYPE_5_8
   565  
   566  Detects corners using the FAST algorithm by @cite Rosten06 .
   567  
   568  @note In Python API, types are given as cv.FAST_FEATURE_DETECTOR_TYPE_5_8,
   569  cv.FAST_FEATURE_DETECTOR_TYPE_7_12 and cv.FAST_FEATURE_DETECTOR_TYPE_9_16. For corner
   570  detection, use cv.FAST.detect() method.
   571   */
   572  CV_EXPORTS void FAST( InputArray image, CV_OUT std::vector<KeyPoint>& keypoints,
   573                        int threshold, bool nonmaxSuppression, FastFeatureDetector::DetectorType type );
   574  
   575  //! @} features2d_main
   576  
   577  //! @addtogroup features2d_main
   578  //! @{
   579  
   580  /** @brief Wrapping class for feature detection using the AGAST method. :
   581   */
   582  class CV_EXPORTS_W AgastFeatureDetector : public Feature2D
   583  {
   584  public:
   585      enum DetectorType
   586      {
   587          AGAST_5_8 = 0, AGAST_7_12d = 1, AGAST_7_12s = 2, OAST_9_16 = 3,
   588      };
   589  
   590      enum
   591      {
   592          THRESHOLD = 10000, NONMAX_SUPPRESSION = 10001,
   593      };
   594  
   595      CV_WRAP static Ptr<AgastFeatureDetector> create( int threshold=10,
   596                                                       bool nonmaxSuppression=true,
   597                                                       AgastFeatureDetector::DetectorType type = AgastFeatureDetector::OAST_9_16);
   598  
   599      CV_WRAP virtual void setThreshold(int threshold) = 0;
   600      CV_WRAP virtual int getThreshold() const = 0;
   601  
   602      CV_WRAP virtual void setNonmaxSuppression(bool f) = 0;
   603      CV_WRAP virtual bool getNonmaxSuppression() const = 0;
   604  
   605      CV_WRAP virtual void setType(AgastFeatureDetector::DetectorType type) = 0;
   606      CV_WRAP virtual AgastFeatureDetector::DetectorType getType() const = 0;
   607      CV_WRAP virtual String getDefaultName() const CV_OVERRIDE;
   608  };
   609  
   610  /** @overload */
   611  CV_EXPORTS void AGAST( InputArray image, CV_OUT std::vector<KeyPoint>& keypoints,
   612                        int threshold, bool nonmaxSuppression=true );
   613  
   614  /** @brief Detects corners using the AGAST algorithm
   615  
   616  @param image grayscale image where keypoints (corners) are detected.
   617  @param keypoints keypoints detected on the image.
   618  @param threshold threshold on difference between intensity of the central pixel and pixels of a
   619  circle around this pixel.
   620  @param nonmaxSuppression if true, non-maximum suppression is applied to detected corners
   621  (keypoints).
   622  @param type one of the four neighborhoods as defined in the paper:
   623  AgastFeatureDetector::AGAST_5_8, AgastFeatureDetector::AGAST_7_12d,
   624  AgastFeatureDetector::AGAST_7_12s, AgastFeatureDetector::OAST_9_16
   625  
   626  For non-Intel platforms, there is a tree optimised variant of AGAST with same numerical results.
   627  The 32-bit binary tree tables were generated automatically from original code using perl script.
   628  The perl script and examples of tree generation are placed in features2d/doc folder.
   629  Detects corners using the AGAST algorithm by @cite mair2010_agast .
   630  
   631   */
   632  CV_EXPORTS void AGAST( InputArray image, CV_OUT std::vector<KeyPoint>& keypoints,
   633                        int threshold, bool nonmaxSuppression, AgastFeatureDetector::DetectorType type );
   634  
   635  /** @brief Wrapping class for feature detection using the goodFeaturesToTrack function. :
   636   */
   637  class CV_EXPORTS_W GFTTDetector : public Feature2D
   638  {
   639  public:
   640      CV_WRAP static Ptr<GFTTDetector> create( int maxCorners=1000, double qualityLevel=0.01, double minDistance=1,
   641                                               int blockSize=3, bool useHarrisDetector=false, double k=0.04 );
   642      CV_WRAP static Ptr<GFTTDetector> create( int maxCorners, double qualityLevel, double minDistance,
   643                                               int blockSize, int gradiantSize, bool useHarrisDetector=false, double k=0.04 );
   644      CV_WRAP virtual void setMaxFeatures(int maxFeatures) = 0;
   645      CV_WRAP virtual int getMaxFeatures() const = 0;
   646  
   647      CV_WRAP virtual void setQualityLevel(double qlevel) = 0;
   648      CV_WRAP virtual double getQualityLevel() const = 0;
   649  
   650      CV_WRAP virtual void setMinDistance(double minDistance) = 0;
   651      CV_WRAP virtual double getMinDistance() const = 0;
   652  
   653      CV_WRAP virtual void setBlockSize(int blockSize) = 0;
   654      CV_WRAP virtual int getBlockSize() const = 0;
   655  
   656      CV_WRAP virtual void setHarrisDetector(bool val) = 0;
   657      CV_WRAP virtual bool getHarrisDetector() const = 0;
   658  
   659      CV_WRAP virtual void setK(double k) = 0;
   660      CV_WRAP virtual double getK() const = 0;
   661      CV_WRAP virtual String getDefaultName() const CV_OVERRIDE;
   662  };
   663  
   664  /** @brief Class for extracting blobs from an image. :
   665  
   666  The class implements a simple algorithm for extracting blobs from an image:
   667  
   668  1.  Convert the source image to binary images by applying thresholding with several thresholds from
   669      minThreshold (inclusive) to maxThreshold (exclusive) with distance thresholdStep between
   670      neighboring thresholds.
   671  2.  Extract connected components from every binary image by findContours and calculate their
   672      centers.
   673  3.  Group centers from several binary images by their coordinates. Close centers form one group that
   674      corresponds to one blob, which is controlled by the minDistBetweenBlobs parameter.
   675  4.  From the groups, estimate final centers of blobs and their radiuses and return as locations and
   676      sizes of keypoints.
   677  
   678  This class performs several filtrations of returned blobs. You should set filterBy\* to true/false
   679  to turn on/off corresponding filtration. Available filtrations:
   680  
   681  -   **By color**. This filter compares the intensity of a binary image at the center of a blob to
   682  blobColor. If they differ, the blob is filtered out. Use blobColor = 0 to extract dark blobs
   683  and blobColor = 255 to extract light blobs.
   684  -   **By area**. Extracted blobs have an area between minArea (inclusive) and maxArea (exclusive).
   685  -   **By circularity**. Extracted blobs have circularity
   686  (\f$\frac{4*\pi*Area}{perimeter * perimeter}\f$) between minCircularity (inclusive) and
   687  maxCircularity (exclusive).
   688  -   **By ratio of the minimum inertia to maximum inertia**. Extracted blobs have this ratio
   689  between minInertiaRatio (inclusive) and maxInertiaRatio (exclusive).
   690  -   **By convexity**. Extracted blobs have convexity (area / area of blob convex hull) between
   691  minConvexity (inclusive) and maxConvexity (exclusive).
   692  
   693  Default values of parameters are tuned to extract dark circular blobs.
   694   */
   695  class CV_EXPORTS_W SimpleBlobDetector : public Feature2D
   696  {
   697  public:
   698    struct CV_EXPORTS_W_SIMPLE Params
   699    {
   700        CV_WRAP Params();
   701        CV_PROP_RW float thresholdStep;
   702        CV_PROP_RW float minThreshold;
   703        CV_PROP_RW float maxThreshold;
   704        CV_PROP_RW size_t minRepeatability;
   705        CV_PROP_RW float minDistBetweenBlobs;
   706  
   707        CV_PROP_RW bool filterByColor;
   708        CV_PROP_RW uchar blobColor;
   709  
   710        CV_PROP_RW bool filterByArea;
   711        CV_PROP_RW float minArea, maxArea;
   712  
   713        CV_PROP_RW bool filterByCircularity;
   714        CV_PROP_RW float minCircularity, maxCircularity;
   715  
   716        CV_PROP_RW bool filterByInertia;
   717        CV_PROP_RW float minInertiaRatio, maxInertiaRatio;
   718  
   719        CV_PROP_RW bool filterByConvexity;
   720        CV_PROP_RW float minConvexity, maxConvexity;
   721  
   722        void read( const FileNode& fn );
   723        void write( FileStorage& fs ) const;
   724    };
   725  
   726    CV_WRAP static Ptr<SimpleBlobDetector>
   727      create(const SimpleBlobDetector::Params &parameters = SimpleBlobDetector::Params());
   728    CV_WRAP virtual String getDefaultName() const CV_OVERRIDE;
   729  };
   730  
   731  //! @} features2d_main
   732  
   733  //! @addtogroup features2d_main
   734  //! @{
   735  
   736  /** @brief Class implementing the KAZE keypoint detector and descriptor extractor, described in @cite ABD12 .
   737  
   738  @note AKAZE descriptor can only be used with KAZE or AKAZE keypoints .. [ABD12] KAZE Features. Pablo
   739  F. Alcantarilla, Adrien Bartoli and Andrew J. Davison. In European Conference on Computer Vision
   740  (ECCV), Fiorenze, Italy, October 2012.
   741  */
   742  class CV_EXPORTS_W KAZE : public Feature2D
   743  {
   744  public:
   745      enum DiffusivityType
   746      {
   747          DIFF_PM_G1 = 0,
   748          DIFF_PM_G2 = 1,
   749          DIFF_WEICKERT = 2,
   750          DIFF_CHARBONNIER = 3
   751      };
   752  
   753      /** @brief The KAZE constructor
   754  
   755      @param extended Set to enable extraction of extended (128-byte) descriptor.
   756      @param upright Set to enable use of upright descriptors (non rotation-invariant).
   757      @param threshold Detector response threshold to accept point
   758      @param nOctaves Maximum octave evolution of the image
   759      @param nOctaveLayers Default number of sublevels per scale level
   760      @param diffusivity Diffusivity type. DIFF_PM_G1, DIFF_PM_G2, DIFF_WEICKERT or
   761      DIFF_CHARBONNIER
   762       */
   763      CV_WRAP static Ptr<KAZE> create(bool extended=false, bool upright=false,
   764                                      float threshold = 0.001f,
   765                                      int nOctaves = 4, int nOctaveLayers = 4,
   766                                      KAZE::DiffusivityType diffusivity = KAZE::DIFF_PM_G2);
   767  
   768      CV_WRAP virtual void setExtended(bool extended) = 0;
   769      CV_WRAP virtual bool getExtended() const = 0;
   770  
   771      CV_WRAP virtual void setUpright(bool upright) = 0;
   772      CV_WRAP virtual bool getUpright() const = 0;
   773  
   774      CV_WRAP virtual void setThreshold(double threshold) = 0;
   775      CV_WRAP virtual double getThreshold() const = 0;
   776  
   777      CV_WRAP virtual void setNOctaves(int octaves) = 0;
   778      CV_WRAP virtual int getNOctaves() const = 0;
   779  
   780      CV_WRAP virtual void setNOctaveLayers(int octaveLayers) = 0;
   781      CV_WRAP virtual int getNOctaveLayers() const = 0;
   782  
   783      CV_WRAP virtual void setDiffusivity(KAZE::DiffusivityType diff) = 0;
   784      CV_WRAP virtual KAZE::DiffusivityType getDiffusivity() const = 0;
   785      CV_WRAP virtual String getDefaultName() const CV_OVERRIDE;
   786  };
   787  
   788  /** @brief Class implementing the AKAZE keypoint detector and descriptor extractor, described in @cite ANB13.
   789  
   790  @details AKAZE descriptors can only be used with KAZE or AKAZE keypoints. This class is thread-safe.
   791  
   792  @note When you need descriptors use Feature2D::detectAndCompute, which
   793  provides better performance. When using Feature2D::detect followed by
   794  Feature2D::compute scale space pyramid is computed twice.
   795  
   796  @note AKAZE implements T-API. When image is passed as UMat some parts of the algorithm
   797  will use OpenCL.
   798  
   799  @note [ANB13] Fast Explicit Diffusion for Accelerated Features in Nonlinear
   800  Scale Spaces. Pablo F. Alcantarilla, Jesús Nuevo and Adrien Bartoli. In
   801  British Machine Vision Conference (BMVC), Bristol, UK, September 2013.
   802  
   803  */
   804  class CV_EXPORTS_W AKAZE : public Feature2D
   805  {
   806  public:
   807      // AKAZE descriptor type
   808      enum DescriptorType
   809      {
   810          DESCRIPTOR_KAZE_UPRIGHT = 2, ///< Upright descriptors, not invariant to rotation
   811          DESCRIPTOR_KAZE = 3,
   812          DESCRIPTOR_MLDB_UPRIGHT = 4, ///< Upright descriptors, not invariant to rotation
   813          DESCRIPTOR_MLDB = 5
   814      };
   815  
   816      /** @brief The AKAZE constructor
   817  
   818      @param descriptor_type Type of the extracted descriptor: DESCRIPTOR_KAZE,
   819      DESCRIPTOR_KAZE_UPRIGHT, DESCRIPTOR_MLDB or DESCRIPTOR_MLDB_UPRIGHT.
   820      @param descriptor_size Size of the descriptor in bits. 0 -\> Full size
   821      @param descriptor_channels Number of channels in the descriptor (1, 2, 3)
   822      @param threshold Detector response threshold to accept point
   823      @param nOctaves Maximum octave evolution of the image
   824      @param nOctaveLayers Default number of sublevels per scale level
   825      @param diffusivity Diffusivity type. DIFF_PM_G1, DIFF_PM_G2, DIFF_WEICKERT or
   826      DIFF_CHARBONNIER
   827       */
   828      CV_WRAP static Ptr<AKAZE> create(AKAZE::DescriptorType descriptor_type = AKAZE::DESCRIPTOR_MLDB,
   829                                       int descriptor_size = 0, int descriptor_channels = 3,
   830                                       float threshold = 0.001f, int nOctaves = 4,
   831                                       int nOctaveLayers = 4, KAZE::DiffusivityType diffusivity = KAZE::DIFF_PM_G2);
   832  
   833      CV_WRAP virtual void setDescriptorType(AKAZE::DescriptorType dtype) = 0;
   834      CV_WRAP virtual AKAZE::DescriptorType getDescriptorType() const = 0;
   835  
   836      CV_WRAP virtual void setDescriptorSize(int dsize) = 0;
   837      CV_WRAP virtual int getDescriptorSize() const = 0;
   838  
   839      CV_WRAP virtual void setDescriptorChannels(int dch) = 0;
   840      CV_WRAP virtual int getDescriptorChannels() const = 0;
   841  
   842      CV_WRAP virtual void setThreshold(double threshold) = 0;
   843      CV_WRAP virtual double getThreshold() const = 0;
   844  
   845      CV_WRAP virtual void setNOctaves(int octaves) = 0;
   846      CV_WRAP virtual int getNOctaves() const = 0;
   847  
   848      CV_WRAP virtual void setNOctaveLayers(int octaveLayers) = 0;
   849      CV_WRAP virtual int getNOctaveLayers() const = 0;
   850  
   851      CV_WRAP virtual void setDiffusivity(KAZE::DiffusivityType diff) = 0;
   852      CV_WRAP virtual KAZE::DiffusivityType getDiffusivity() const = 0;
   853      CV_WRAP virtual String getDefaultName() const CV_OVERRIDE;
   854  };
   855  
   856  //! @} features2d_main
   857  
   858  /****************************************************************************************\
   859  *                                      Distance                                          *
   860  \****************************************************************************************/
   861  
   862  template<typename T>
   863  struct CV_EXPORTS Accumulator
   864  {
   865      typedef T Type;
   866  };
   867  
   868  template<> struct Accumulator<unsigned char>  { typedef float Type; };
   869  template<> struct Accumulator<unsigned short> { typedef float Type; };
   870  template<> struct Accumulator<char>   { typedef float Type; };
   871  template<> struct Accumulator<short>  { typedef float Type; };
   872  
   873  /*
   874   * Squared Euclidean distance functor
   875   */
   876  template<class T>
   877  struct CV_EXPORTS SL2
   878  {
   879      static const NormTypes normType = NORM_L2SQR;
   880      typedef T ValueType;
   881      typedef typename Accumulator<T>::Type ResultType;
   882  
   883      ResultType operator()( const T* a, const T* b, int size ) const
   884      {
   885          return normL2Sqr<ValueType, ResultType>(a, b, size);
   886      }
   887  };
   888  
   889  /*
   890   * Euclidean distance functor
   891   */
   892  template<class T>
   893  struct L2
   894  {
   895      static const NormTypes normType = NORM_L2;
   896      typedef T ValueType;
   897      typedef typename Accumulator<T>::Type ResultType;
   898  
   899      ResultType operator()( const T* a, const T* b, int size ) const
   900      {
   901          return (ResultType)std::sqrt((double)normL2Sqr<ValueType, ResultType>(a, b, size));
   902      }
   903  };
   904  
   905  /*
   906   * Manhattan distance (city block distance) functor
   907   */
   908  template<class T>
   909  struct L1
   910  {
   911      static const NormTypes normType = NORM_L1;
   912      typedef T ValueType;
   913      typedef typename Accumulator<T>::Type ResultType;
   914  
   915      ResultType operator()( const T* a, const T* b, int size ) const
   916      {
   917          return normL1<ValueType, ResultType>(a, b, size);
   918      }
   919  };
   920  
   921  /****************************************************************************************\
   922  *                                  DescriptorMatcher                                     *
   923  \****************************************************************************************/
   924  
   925  //! @addtogroup features2d_match
   926  //! @{
   927  
   928  /** @brief Abstract base class for matching keypoint descriptors.
   929  
   930  It has two groups of match methods: for matching descriptors of an image with another image or with
   931  an image set.
   932   */
   933  class CV_EXPORTS_W DescriptorMatcher : public Algorithm
   934  {
   935  public:
   936     enum MatcherType
   937      {
   938          FLANNBASED            = 1,
   939          BRUTEFORCE            = 2,
   940          BRUTEFORCE_L1         = 3,
   941          BRUTEFORCE_HAMMING    = 4,
   942          BRUTEFORCE_HAMMINGLUT = 5,
   943          BRUTEFORCE_SL2        = 6
   944      };
   945  
   946      virtual ~DescriptorMatcher();
   947  
   948      /** @brief Adds descriptors to train a CPU(trainDescCollectionis) or GPU(utrainDescCollectionis) descriptor
   949      collection.
   950  
   951      If the collection is not empty, the new descriptors are added to existing train descriptors.
   952  
   953      @param descriptors Descriptors to add. Each descriptors[i] is a set of descriptors from the same
   954      train image.
   955       */
   956      CV_WRAP virtual void add( InputArrayOfArrays descriptors );
   957  
   958      /** @brief Returns a constant link to the train descriptor collection trainDescCollection .
   959       */
   960      CV_WRAP const std::vector<Mat>& getTrainDescriptors() const;
   961  
   962      /** @brief Clears the train descriptor collections.
   963       */
   964      CV_WRAP virtual void clear() CV_OVERRIDE;
   965  
   966      /** @brief Returns true if there are no train descriptors in the both collections.
   967       */
   968      CV_WRAP virtual bool empty() const CV_OVERRIDE;
   969  
   970      /** @brief Returns true if the descriptor matcher supports masking permissible matches.
   971       */
   972      CV_WRAP virtual bool isMaskSupported() const = 0;
   973  
   974      /** @brief Trains a descriptor matcher
   975  
   976      Trains a descriptor matcher (for example, the flann index). In all methods to match, the method
   977      train() is run every time before matching. Some descriptor matchers (for example, BruteForceMatcher)
   978      have an empty implementation of this method. Other matchers really train their inner structures (for
   979      example, FlannBasedMatcher trains flann::Index ).
   980       */
   981      CV_WRAP virtual void train();
   982  
   983      /** @brief Finds the best match for each descriptor from a query set.
   984  
   985      @param queryDescriptors Query set of descriptors.
   986      @param trainDescriptors Train set of descriptors. This set is not added to the train descriptors
   987      collection stored in the class object.
   988      @param matches Matches. If a query descriptor is masked out in mask , no match is added for this
   989      descriptor. So, matches size may be smaller than the query descriptors count.
   990      @param mask Mask specifying permissible matches between an input query and train matrices of
   991      descriptors.
   992  
   993      In the first variant of this method, the train descriptors are passed as an input argument. In the
   994      second variant of the method, train descriptors collection that was set by DescriptorMatcher::add is
   995      used. Optional mask (or masks) can be passed to specify which query and training descriptors can be
   996      matched. Namely, queryDescriptors[i] can be matched with trainDescriptors[j] only if
   997      mask.at\<uchar\>(i,j) is non-zero.
   998       */
   999      CV_WRAP void match( InputArray queryDescriptors, InputArray trainDescriptors,
  1000                  CV_OUT std::vector<DMatch>& matches, InputArray mask=noArray() ) const;
  1001  
  1002      /** @brief Finds the k best matches for each descriptor from a query set.
  1003  
  1004      @param queryDescriptors Query set of descriptors.
  1005      @param trainDescriptors Train set of descriptors. This set is not added to the train descriptors
  1006      collection stored in the class object.
  1007      @param mask Mask specifying permissible matches between an input query and train matrices of
  1008      descriptors.
  1009      @param matches Matches. Each matches[i] is k or less matches for the same query descriptor.
  1010      @param k Count of best matches found per each query descriptor or less if a query descriptor has
  1011      less than k possible matches in total.
  1012      @param compactResult Parameter used when the mask (or masks) is not empty. If compactResult is
  1013      false, the matches vector has the same size as queryDescriptors rows. If compactResult is true,
  1014      the matches vector does not contain matches for fully masked-out query descriptors.
  1015  
  1016      These extended variants of DescriptorMatcher::match methods find several best matches for each query
  1017      descriptor. The matches are returned in the distance increasing order. See DescriptorMatcher::match
  1018      for the details about query and train descriptors.
  1019       */
  1020      CV_WRAP void knnMatch( InputArray queryDescriptors, InputArray trainDescriptors,
  1021                     CV_OUT std::vector<std::vector<DMatch> >& matches, int k,
  1022                     InputArray mask=noArray(), bool compactResult=false ) const;
  1023  
  1024      /** @brief For each query descriptor, finds the training descriptors not farther than the specified distance.
  1025  
  1026      @param queryDescriptors Query set of descriptors.
  1027      @param trainDescriptors Train set of descriptors. This set is not added to the train descriptors
  1028      collection stored in the class object.
  1029      @param matches Found matches.
  1030      @param compactResult Parameter used when the mask (or masks) is not empty. If compactResult is
  1031      false, the matches vector has the same size as queryDescriptors rows. If compactResult is true,
  1032      the matches vector does not contain matches for fully masked-out query descriptors.
  1033      @param maxDistance Threshold for the distance between matched descriptors. Distance means here
  1034      metric distance (e.g. Hamming distance), not the distance between coordinates (which is measured
  1035      in Pixels)!
  1036      @param mask Mask specifying permissible matches between an input query and train matrices of
  1037      descriptors.
  1038  
  1039      For each query descriptor, the methods find such training descriptors that the distance between the
  1040      query descriptor and the training descriptor is equal or smaller than maxDistance. Found matches are
  1041      returned in the distance increasing order.
  1042       */
  1043      CV_WRAP void radiusMatch( InputArray queryDescriptors, InputArray trainDescriptors,
  1044                        CV_OUT std::vector<std::vector<DMatch> >& matches, float maxDistance,
  1045                        InputArray mask=noArray(), bool compactResult=false ) const;
  1046  
  1047      /** @overload
  1048      @param queryDescriptors Query set of descriptors.
  1049      @param matches Matches. If a query descriptor is masked out in mask , no match is added for this
  1050      descriptor. So, matches size may be smaller than the query descriptors count.
  1051      @param masks Set of masks. Each masks[i] specifies permissible matches between the input query
  1052      descriptors and stored train descriptors from the i-th image trainDescCollection[i].
  1053      */
  1054      CV_WRAP void match( InputArray queryDescriptors, CV_OUT std::vector<DMatch>& matches,
  1055                          InputArrayOfArrays masks=noArray() );
  1056      /** @overload
  1057      @param queryDescriptors Query set of descriptors.
  1058      @param matches Matches. Each matches[i] is k or less matches for the same query descriptor.
  1059      @param k Count of best matches found per each query descriptor or less if a query descriptor has
  1060      less than k possible matches in total.
  1061      @param masks Set of masks. Each masks[i] specifies permissible matches between the input query
  1062      descriptors and stored train descriptors from the i-th image trainDescCollection[i].
  1063      @param compactResult Parameter used when the mask (or masks) is not empty. If compactResult is
  1064      false, the matches vector has the same size as queryDescriptors rows. If compactResult is true,
  1065      the matches vector does not contain matches for fully masked-out query descriptors.
  1066      */
  1067      CV_WRAP void knnMatch( InputArray queryDescriptors, CV_OUT std::vector<std::vector<DMatch> >& matches, int k,
  1068                             InputArrayOfArrays masks=noArray(), bool compactResult=false );
  1069      /** @overload
  1070      @param queryDescriptors Query set of descriptors.
  1071      @param matches Found matches.
  1072      @param maxDistance Threshold for the distance between matched descriptors. Distance means here
  1073      metric distance (e.g. Hamming distance), not the distance between coordinates (which is measured
  1074      in Pixels)!
  1075      @param masks Set of masks. Each masks[i] specifies permissible matches between the input query
  1076      descriptors and stored train descriptors from the i-th image trainDescCollection[i].
  1077      @param compactResult Parameter used when the mask (or masks) is not empty. If compactResult is
  1078      false, the matches vector has the same size as queryDescriptors rows. If compactResult is true,
  1079      the matches vector does not contain matches for fully masked-out query descriptors.
  1080      */
  1081      CV_WRAP void radiusMatch( InputArray queryDescriptors, CV_OUT std::vector<std::vector<DMatch> >& matches, float maxDistance,
  1082                        InputArrayOfArrays masks=noArray(), bool compactResult=false );
  1083  
  1084  
  1085      CV_WRAP void write( const String& fileName ) const
  1086      {
  1087          FileStorage fs(fileName, FileStorage::WRITE);
  1088          write(fs);
  1089      }
  1090  
  1091      CV_WRAP void read( const String& fileName )
  1092      {
  1093          FileStorage fs(fileName, FileStorage::READ);
  1094          read(fs.root());
  1095      }
  1096      // Reads matcher object from a file node
  1097      // see corresponding cv::Algorithm method
  1098      CV_WRAP virtual void read( const FileNode& ) CV_OVERRIDE;
  1099      // Writes matcher object to a file storage
  1100      virtual void write( FileStorage& ) const CV_OVERRIDE;
  1101  
  1102      /** @brief Clones the matcher.
  1103  
  1104      @param emptyTrainData If emptyTrainData is false, the method creates a deep copy of the object,
  1105      that is, copies both parameters and train data. If emptyTrainData is true, the method creates an
  1106      object copy with the current parameters but with empty train data.
  1107       */
  1108      CV_WRAP virtual Ptr<DescriptorMatcher> clone( bool emptyTrainData=false ) const = 0;
  1109  
  1110      /** @brief Creates a descriptor matcher of a given type with the default parameters (using default
  1111      constructor).
  1112  
  1113      @param descriptorMatcherType Descriptor matcher type. Now the following matcher types are
  1114      supported:
  1115      -   `BruteForce` (it uses L2 )
  1116      -   `BruteForce-L1`
  1117      -   `BruteForce-Hamming`
  1118      -   `BruteForce-Hamming(2)`
  1119      -   `FlannBased`
  1120       */
  1121      CV_WRAP static Ptr<DescriptorMatcher> create( const String& descriptorMatcherType );
  1122  
  1123      CV_WRAP static Ptr<DescriptorMatcher> create( const DescriptorMatcher::MatcherType& matcherType );
  1124  
  1125  
  1126      // see corresponding cv::Algorithm method
  1127      CV_WRAP inline void write(const Ptr<FileStorage>& fs, const String& name = String()) const { Algorithm::write(fs, name); }
  1128  
  1129  protected:
  1130      /**
  1131       * Class to work with descriptors from several images as with one merged matrix.
  1132       * It is used e.g. in FlannBasedMatcher.
  1133       */
  1134      class CV_EXPORTS DescriptorCollection
  1135      {
  1136      public:
  1137          DescriptorCollection();
  1138          DescriptorCollection( const DescriptorCollection& collection );
  1139          virtual ~DescriptorCollection();
  1140  
  1141          // Vector of matrices "descriptors" will be merged to one matrix "mergedDescriptors" here.
  1142          void set( const std::vector<Mat>& descriptors );
  1143          virtual void clear();
  1144  
  1145          const Mat& getDescriptors() const;
  1146          const Mat getDescriptor( int imgIdx, int localDescIdx ) const;
  1147          const Mat getDescriptor( int globalDescIdx ) const;
  1148          void getLocalIdx( int globalDescIdx, int& imgIdx, int& localDescIdx ) const;
  1149  
  1150          int size() const;
  1151  
  1152      protected:
  1153          Mat mergedDescriptors;
  1154          std::vector<int> startIdxs;
  1155      };
  1156  
  1157      //! In fact the matching is implemented only by the following two methods. These methods suppose
  1158      //! that the class object has been trained already. Public match methods call these methods
  1159      //! after calling train().
  1160      virtual void knnMatchImpl( InputArray queryDescriptors, std::vector<std::vector<DMatch> >& matches, int k,
  1161          InputArrayOfArrays masks=noArray(), bool compactResult=false ) = 0;
  1162      virtual void radiusMatchImpl( InputArray queryDescriptors, std::vector<std::vector<DMatch> >& matches, float maxDistance,
  1163          InputArrayOfArrays masks=noArray(), bool compactResult=false ) = 0;
  1164  
  1165      static bool isPossibleMatch( InputArray mask, int queryIdx, int trainIdx );
  1166      static bool isMaskedOut( InputArrayOfArrays masks, int queryIdx );
  1167  
  1168      static Mat clone_op( Mat m ) { return m.clone(); }
  1169      void checkMasks( InputArrayOfArrays masks, int queryDescriptorsCount ) const;
  1170  
  1171      //! Collection of descriptors from train images.
  1172      std::vector<Mat> trainDescCollection;
  1173      std::vector<UMat> utrainDescCollection;
  1174  };
  1175  
  1176  /** @brief Brute-force descriptor matcher.
  1177  
  1178  For each descriptor in the first set, this matcher finds the closest descriptor in the second set
  1179  by trying each one. This descriptor matcher supports masking permissible matches of descriptor
  1180  sets.
  1181   */
  1182  class CV_EXPORTS_W BFMatcher : public DescriptorMatcher
  1183  {
  1184  public:
  1185      /** @brief Brute-force matcher constructor (obsolete). Please use BFMatcher.create()
  1186       *
  1187       *
  1188      */
  1189      CV_WRAP BFMatcher( int normType=NORM_L2, bool crossCheck=false );
  1190  
  1191      virtual ~BFMatcher() {}
  1192  
  1193      virtual bool isMaskSupported() const CV_OVERRIDE { return true; }
  1194  
  1195      /** @brief Brute-force matcher create method.
  1196      @param normType One of NORM_L1, NORM_L2, NORM_HAMMING, NORM_HAMMING2. L1 and L2 norms are
  1197      preferable choices for SIFT and SURF descriptors, NORM_HAMMING should be used with ORB, BRISK and
  1198      BRIEF, NORM_HAMMING2 should be used with ORB when WTA_K==3 or 4 (see ORB::ORB constructor
  1199      description).
  1200      @param crossCheck If it is false, this is will be default BFMatcher behaviour when it finds the k
  1201      nearest neighbors for each query descriptor. If crossCheck==true, then the knnMatch() method with
  1202      k=1 will only return pairs (i,j) such that for i-th query descriptor the j-th descriptor in the
  1203      matcher's collection is the nearest and vice versa, i.e. the BFMatcher will only return consistent
  1204      pairs. Such technique usually produces best results with minimal number of outliers when there are
  1205      enough matches. This is alternative to the ratio test, used by D. Lowe in SIFT paper.
  1206       */
  1207      CV_WRAP static Ptr<BFMatcher> create( int normType=NORM_L2, bool crossCheck=false ) ;
  1208  
  1209      virtual Ptr<DescriptorMatcher> clone( bool emptyTrainData=false ) const CV_OVERRIDE;
  1210  protected:
  1211      virtual void knnMatchImpl( InputArray queryDescriptors, std::vector<std::vector<DMatch> >& matches, int k,
  1212          InputArrayOfArrays masks=noArray(), bool compactResult=false ) CV_OVERRIDE;
  1213      virtual void radiusMatchImpl( InputArray queryDescriptors, std::vector<std::vector<DMatch> >& matches, float maxDistance,
  1214          InputArrayOfArrays masks=noArray(), bool compactResult=false ) CV_OVERRIDE;
  1215  
  1216      int normType;
  1217      bool crossCheck;
  1218  };
  1219  
  1220  #if defined(HAVE_OPENCV_FLANN) || defined(CV_DOXYGEN)
  1221  
  1222  /** @brief Flann-based descriptor matcher.
  1223  
  1224  This matcher trains cv::flann::Index on a train descriptor collection and calls its nearest search
  1225  methods to find the best matches. So, this matcher may be faster when matching a large train
  1226  collection than the brute force matcher. FlannBasedMatcher does not support masking permissible
  1227  matches of descriptor sets because flann::Index does not support this. :
  1228   */
  1229  class CV_EXPORTS_W FlannBasedMatcher : public DescriptorMatcher
  1230  {
  1231  public:
  1232      CV_WRAP FlannBasedMatcher( const Ptr<flann::IndexParams>& indexParams=makePtr<flann::KDTreeIndexParams>(),
  1233                         const Ptr<flann::SearchParams>& searchParams=makePtr<flann::SearchParams>() );
  1234  
  1235      virtual void add( InputArrayOfArrays descriptors ) CV_OVERRIDE;
  1236      virtual void clear() CV_OVERRIDE;
  1237  
  1238      // Reads matcher object from a file node
  1239      virtual void read( const FileNode& ) CV_OVERRIDE;
  1240      // Writes matcher object to a file storage
  1241      virtual void write( FileStorage& ) const CV_OVERRIDE;
  1242  
  1243      virtual void train() CV_OVERRIDE;
  1244      virtual bool isMaskSupported() const CV_OVERRIDE;
  1245  
  1246      CV_WRAP static Ptr<FlannBasedMatcher> create();
  1247  
  1248      virtual Ptr<DescriptorMatcher> clone( bool emptyTrainData=false ) const CV_OVERRIDE;
  1249  protected:
  1250      static void convertToDMatches( const DescriptorCollection& descriptors,
  1251                                     const Mat& indices, const Mat& distances,
  1252                                     std::vector<std::vector<DMatch> >& matches );
  1253  
  1254      virtual void knnMatchImpl( InputArray queryDescriptors, std::vector<std::vector<DMatch> >& matches, int k,
  1255          InputArrayOfArrays masks=noArray(), bool compactResult=false ) CV_OVERRIDE;
  1256      virtual void radiusMatchImpl( InputArray queryDescriptors, std::vector<std::vector<DMatch> >& matches, float maxDistance,
  1257          InputArrayOfArrays masks=noArray(), bool compactResult=false ) CV_OVERRIDE;
  1258  
  1259      Ptr<flann::IndexParams> indexParams;
  1260      Ptr<flann::SearchParams> searchParams;
  1261      Ptr<flann::Index> flannIndex;
  1262  
  1263      DescriptorCollection mergedDescriptors;
  1264      int addedDescCount;
  1265  };
  1266  
  1267  #endif
  1268  
  1269  //! @} features2d_match
  1270  
  1271  /****************************************************************************************\
  1272  *                                   Drawing functions                                    *
  1273  \****************************************************************************************/
  1274  
  1275  //! @addtogroup features2d_draw
  1276  //! @{
  1277  
  1278  enum struct DrawMatchesFlags
  1279  {
  1280    DEFAULT = 0, //!< Output image matrix will be created (Mat::create),
  1281                 //!< i.e. existing memory of output image may be reused.
  1282                 //!< Two source image, matches and single keypoints will be drawn.
  1283                 //!< For each keypoint only the center point will be drawn (without
  1284                 //!< the circle around keypoint with keypoint size and orientation).
  1285    DRAW_OVER_OUTIMG = 1, //!< Output image matrix will not be created (Mat::create).
  1286                          //!< Matches will be drawn on existing content of output image.
  1287    NOT_DRAW_SINGLE_POINTS = 2, //!< Single keypoints will not be drawn.
  1288    DRAW_RICH_KEYPOINTS = 4 //!< For each keypoint the circle around keypoint with keypoint size and
  1289                            //!< orientation will be drawn.
  1290  };
  1291  CV_ENUM_FLAGS(DrawMatchesFlags)
  1292  
  1293  /** @brief Draws keypoints.
  1294  
  1295  @param image Source image.
  1296  @param keypoints Keypoints from the source image.
  1297  @param outImage Output image. Its content depends on the flags value defining what is drawn in the
  1298  output image. See possible flags bit values below.
  1299  @param color Color of keypoints.
  1300  @param flags Flags setting drawing features. Possible flags bit values are defined by
  1301  DrawMatchesFlags. See details above in drawMatches .
  1302  
  1303  @note
  1304  For Python API, flags are modified as cv.DRAW_MATCHES_FLAGS_DEFAULT,
  1305  cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS, cv.DRAW_MATCHES_FLAGS_DRAW_OVER_OUTIMG,
  1306  cv.DRAW_MATCHES_FLAGS_NOT_DRAW_SINGLE_POINTS
  1307   */
  1308  CV_EXPORTS_W void drawKeypoints( InputArray image, const std::vector<KeyPoint>& keypoints, InputOutputArray outImage,
  1309                                 const Scalar& color=Scalar::all(-1), DrawMatchesFlags flags=DrawMatchesFlags::DEFAULT );
  1310  
  1311  /** @brief Draws the found matches of keypoints from two images.
  1312  
  1313  @param img1 First source image.
  1314  @param keypoints1 Keypoints from the first source image.
  1315  @param img2 Second source image.
  1316  @param keypoints2 Keypoints from the second source image.
  1317  @param matches1to2 Matches from the first image to the second one, which means that keypoints1[i]
  1318  has a corresponding point in keypoints2[matches[i]] .
  1319  @param outImg Output image. Its content depends on the flags value defining what is drawn in the
  1320  output image. See possible flags bit values below.
  1321  @param matchColor Color of matches (lines and connected keypoints). If matchColor==Scalar::all(-1)
  1322  , the color is generated randomly.
  1323  @param singlePointColor Color of single keypoints (circles), which means that keypoints do not
  1324  have the matches. If singlePointColor==Scalar::all(-1) , the color is generated randomly.
  1325  @param matchesMask Mask determining which matches are drawn. If the mask is empty, all matches are
  1326  drawn.
  1327  @param flags Flags setting drawing features. Possible flags bit values are defined by
  1328  DrawMatchesFlags.
  1329  
  1330  This function draws matches of keypoints from two images in the output image. Match is a line
  1331  connecting two keypoints (circles). See cv::DrawMatchesFlags.
  1332   */
  1333  CV_EXPORTS_W void drawMatches( InputArray img1, const std::vector<KeyPoint>& keypoints1,
  1334                               InputArray img2, const std::vector<KeyPoint>& keypoints2,
  1335                               const std::vector<DMatch>& matches1to2, InputOutputArray outImg,
  1336                               const Scalar& matchColor=Scalar::all(-1), const Scalar& singlePointColor=Scalar::all(-1),
  1337                               const std::vector<char>& matchesMask=std::vector<char>(), DrawMatchesFlags flags=DrawMatchesFlags::DEFAULT );
  1338  
  1339  /** @overload */
  1340  CV_EXPORTS_W void drawMatches( InputArray img1, const std::vector<KeyPoint>& keypoints1,
  1341                               InputArray img2, const std::vector<KeyPoint>& keypoints2,
  1342                               const std::vector<DMatch>& matches1to2, InputOutputArray outImg,
  1343                               const int matchesThickness, const Scalar& matchColor=Scalar::all(-1),
  1344                               const Scalar& singlePointColor=Scalar::all(-1), const std::vector<char>& matchesMask=std::vector<char>(),
  1345                               DrawMatchesFlags flags=DrawMatchesFlags::DEFAULT );
  1346  
  1347  CV_EXPORTS_AS(drawMatchesKnn) void drawMatches( InputArray img1, const std::vector<KeyPoint>& keypoints1,
  1348                               InputArray img2, const std::vector<KeyPoint>& keypoints2,
  1349                               const std::vector<std::vector<DMatch> >& matches1to2, InputOutputArray outImg,
  1350                               const Scalar& matchColor=Scalar::all(-1), const Scalar& singlePointColor=Scalar::all(-1),
  1351                               const std::vector<std::vector<char> >& matchesMask=std::vector<std::vector<char> >(), DrawMatchesFlags flags=DrawMatchesFlags::DEFAULT );
  1352  
  1353  //! @} features2d_draw
  1354  
  1355  /****************************************************************************************\
  1356  *   Functions to evaluate the feature detectors and [generic] descriptor extractors      *
  1357  \****************************************************************************************/
  1358  
  1359  CV_EXPORTS void evaluateFeatureDetector( const Mat& img1, const Mat& img2, const Mat& H1to2,
  1360                                           std::vector<KeyPoint>* keypoints1, std::vector<KeyPoint>* keypoints2,
  1361                                           float& repeatability, int& correspCount,
  1362                                           const Ptr<FeatureDetector>& fdetector=Ptr<FeatureDetector>() );
  1363  
  1364  CV_EXPORTS void computeRecallPrecisionCurve( const std::vector<std::vector<DMatch> >& matches1to2,
  1365                                               const std::vector<std::vector<uchar> >& correctMatches1to2Mask,
  1366                                               std::vector<Point2f>& recallPrecisionCurve );
  1367  
  1368  CV_EXPORTS float getRecall( const std::vector<Point2f>& recallPrecisionCurve, float l_precision );
  1369  CV_EXPORTS int getNearestPoint( const std::vector<Point2f>& recallPrecisionCurve, float l_precision );
  1370  
  1371  /****************************************************************************************\
  1372  *                                     Bag of visual words                                *
  1373  \****************************************************************************************/
  1374  
  1375  //! @addtogroup features2d_category
  1376  //! @{
  1377  
  1378  /** @brief Abstract base class for training the *bag of visual words* vocabulary from a set of descriptors.
  1379  
  1380  For details, see, for example, *Visual Categorization with Bags of Keypoints* by Gabriella Csurka,
  1381  Christopher R. Dance, Lixin Fan, Jutta Willamowski, Cedric Bray, 2004. :
  1382   */
  1383  class CV_EXPORTS_W BOWTrainer
  1384  {
  1385  public:
  1386      BOWTrainer();
  1387      virtual ~BOWTrainer();
  1388  
  1389      /** @brief Adds descriptors to a training set.
  1390  
  1391      @param descriptors Descriptors to add to a training set. Each row of the descriptors matrix is a
  1392      descriptor.
  1393  
  1394      The training set is clustered using clustermethod to construct the vocabulary.
  1395       */
  1396      CV_WRAP void add( const Mat& descriptors );
  1397  
  1398      /** @brief Returns a training set of descriptors.
  1399      */
  1400      CV_WRAP const std::vector<Mat>& getDescriptors() const;
  1401  
  1402      /** @brief Returns the count of all descriptors stored in the training set.
  1403      */
  1404      CV_WRAP int descriptorsCount() const;
  1405  
  1406      CV_WRAP virtual void clear();
  1407  
  1408      /** @overload */
  1409      CV_WRAP virtual Mat cluster() const = 0;
  1410  
  1411      /** @brief Clusters train descriptors.
  1412  
  1413      @param descriptors Descriptors to cluster. Each row of the descriptors matrix is a descriptor.
  1414      Descriptors are not added to the inner train descriptor set.
  1415  
  1416      The vocabulary consists of cluster centers. So, this method returns the vocabulary. In the first
  1417      variant of the method, train descriptors stored in the object are clustered. In the second variant,
  1418      input descriptors are clustered.
  1419       */
  1420      CV_WRAP virtual Mat cluster( const Mat& descriptors ) const = 0;
  1421  
  1422  protected:
  1423      std::vector<Mat> descriptors;
  1424      int size;
  1425  };
  1426  
  1427  /** @brief kmeans -based class to train visual vocabulary using the *bag of visual words* approach. :
  1428   */
  1429  class CV_EXPORTS_W BOWKMeansTrainer : public BOWTrainer
  1430  {
  1431  public:
  1432      /** @brief The constructor.
  1433  
  1434      @see cv::kmeans
  1435      */
  1436      CV_WRAP BOWKMeansTrainer( int clusterCount, const TermCriteria& termcrit=TermCriteria(),
  1437                        int attempts=3, int flags=KMEANS_PP_CENTERS );
  1438      virtual ~BOWKMeansTrainer();
  1439  
  1440      // Returns trained vocabulary (i.e. cluster centers).
  1441      CV_WRAP virtual Mat cluster() const CV_OVERRIDE;
  1442      CV_WRAP virtual Mat cluster( const Mat& descriptors ) const CV_OVERRIDE;
  1443  
  1444  protected:
  1445  
  1446      int clusterCount;
  1447      TermCriteria termcrit;
  1448      int attempts;
  1449      int flags;
  1450  };
  1451  
  1452  /** @brief Class to compute an image descriptor using the *bag of visual words*.
  1453  
  1454  Such a computation consists of the following steps:
  1455  
  1456  1.  Compute descriptors for a given image and its keypoints set.
  1457  2.  Find the nearest visual words from the vocabulary for each keypoint descriptor.
  1458  3.  Compute the bag-of-words image descriptor as is a normalized histogram of vocabulary words
  1459  encountered in the image. The i-th bin of the histogram is a frequency of i-th word of the
  1460  vocabulary in the given image.
  1461   */
  1462  class CV_EXPORTS_W BOWImgDescriptorExtractor
  1463  {
  1464  public:
  1465      /** @brief The constructor.
  1466  
  1467      @param dextractor Descriptor extractor that is used to compute descriptors for an input image and
  1468      its keypoints.
  1469      @param dmatcher Descriptor matcher that is used to find the nearest word of the trained vocabulary
  1470      for each keypoint descriptor of the image.
  1471       */
  1472      CV_WRAP BOWImgDescriptorExtractor( const Ptr<DescriptorExtractor>& dextractor,
  1473                                 const Ptr<DescriptorMatcher>& dmatcher );
  1474      /** @overload */
  1475      BOWImgDescriptorExtractor( const Ptr<DescriptorMatcher>& dmatcher );
  1476      virtual ~BOWImgDescriptorExtractor();
  1477  
  1478      /** @brief Sets a visual vocabulary.
  1479  
  1480      @param vocabulary Vocabulary (can be trained using the inheritor of BOWTrainer ). Each row of the
  1481      vocabulary is a visual word (cluster center).
  1482       */
  1483      CV_WRAP void setVocabulary( const Mat& vocabulary );
  1484  
  1485      /** @brief Returns the set vocabulary.
  1486      */
  1487      CV_WRAP const Mat& getVocabulary() const;
  1488  
  1489      /** @brief Computes an image descriptor using the set visual vocabulary.
  1490  
  1491      @param image Image, for which the descriptor is computed.
  1492      @param keypoints Keypoints detected in the input image.
  1493      @param imgDescriptor Computed output image descriptor.
  1494      @param pointIdxsOfClusters Indices of keypoints that belong to the cluster. This means that
  1495      pointIdxsOfClusters[i] are keypoint indices that belong to the i -th cluster (word of vocabulary)
  1496      returned if it is non-zero.
  1497      @param descriptors Descriptors of the image keypoints that are returned if they are non-zero.
  1498       */
  1499      void compute( InputArray image, std::vector<KeyPoint>& keypoints, OutputArray imgDescriptor,
  1500                    std::vector<std::vector<int> >* pointIdxsOfClusters=0, Mat* descriptors=0 );
  1501      /** @overload
  1502      @param keypointDescriptors Computed descriptors to match with vocabulary.
  1503      @param imgDescriptor Computed output image descriptor.
  1504      @param pointIdxsOfClusters Indices of keypoints that belong to the cluster. This means that
  1505      pointIdxsOfClusters[i] are keypoint indices that belong to the i -th cluster (word of vocabulary)
  1506      returned if it is non-zero.
  1507      */
  1508      void compute( InputArray keypointDescriptors, OutputArray imgDescriptor,
  1509                    std::vector<std::vector<int> >* pointIdxsOfClusters=0 );
  1510      // compute() is not constant because DescriptorMatcher::match is not constant
  1511  
  1512      CV_WRAP_AS(compute) void compute2( const Mat& image, std::vector<KeyPoint>& keypoints, CV_OUT Mat& imgDescriptor )
  1513      { compute(image,keypoints,imgDescriptor); }
  1514  
  1515      /** @brief Returns an image descriptor size if the vocabulary is set. Otherwise, it returns 0.
  1516      */
  1517      CV_WRAP int descriptorSize() const;
  1518  
  1519      /** @brief Returns an image descriptor type.
  1520       */
  1521      CV_WRAP int descriptorType() const;
  1522  
  1523  protected:
  1524      Mat vocabulary;
  1525      Ptr<DescriptorExtractor> dextractor;
  1526      Ptr<DescriptorMatcher> dmatcher;
  1527  };
  1528  
  1529  //! @} features2d_category
  1530  
  1531  //! @} features2d
  1532  
  1533  } /* namespace cv */
  1534  
  1535  #endif