github.com/kaydxh/golang@v0.0.131/pkg/gocv/cgo/third_path/opencv4/include/opencv2/objdetect.hpp (about)

     1  /*M///////////////////////////////////////////////////////////////////////////////////////
     2  //
     3  //  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
     4  //
     5  //  By downloading, copying, installing or using the software you agree to this license.
     6  //  If you do not agree to this license, do not download, install,
     7  //  copy or use the software.
     8  //
     9  //
    10  //                          License Agreement
    11  //                For Open Source Computer Vision Library
    12  //
    13  // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
    14  // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
    15  // Copyright (C) 2013, OpenCV Foundation, all rights reserved.
    16  // Third party copyrights are property of their respective owners.
    17  //
    18  // Redistribution and use in source and binary forms, with or without modification,
    19  // are permitted provided that the following conditions are met:
    20  //
    21  //   * Redistribution's of source code must retain the above copyright notice,
    22  //     this list of conditions and the following disclaimer.
    23  //
    24  //   * Redistribution's in binary form must reproduce the above copyright notice,
    25  //     this list of conditions and the following disclaimer in the documentation
    26  //     and/or other materials provided with the distribution.
    27  //
    28  //   * The name of the copyright holders may not be used to endorse or promote products
    29  //     derived from this software without specific prior written permission.
    30  //
    31  // This software is provided by the copyright holders and contributors "as is" and
    32  // any express or implied warranties, including, but not limited to, the implied
    33  // warranties of merchantability and fitness for a particular purpose are disclaimed.
    34  // In no event shall the Intel Corporation or contributors be liable for any direct,
    35  // indirect, incidental, special, exemplary, or consequential damages
    36  // (including, but not limited to, procurement of substitute goods or services;
    37  // loss of use, data, or profits; or business interruption) however caused
    38  // and on any theory of liability, whether in contract, strict liability,
    39  // or tort (including negligence or otherwise) arising in any way out of
    40  // the use of this software, even if advised of the possibility of such damage.
    41  //
    42  //M*/
    43  
    44  #ifndef OPENCV_OBJDETECT_HPP
    45  #define OPENCV_OBJDETECT_HPP
    46  
    47  #include "opencv2/core.hpp"
    48  
    49  /**
    50  @defgroup objdetect Object Detection
    51  
    52  Haar Feature-based Cascade Classifier for Object Detection
    53  ----------------------------------------------------------
    54  
    55  The object detector described below has been initially proposed by Paul Viola @cite Viola01 and
    56  improved by Rainer Lienhart @cite Lienhart02 .
    57  
    58  First, a classifier (namely a *cascade of boosted classifiers working with haar-like features*) is
    59  trained with a few hundred sample views of a particular object (i.e., a face or a car), called
    60  positive examples, that are scaled to the same size (say, 20x20), and negative examples - arbitrary
    61  images of the same size.
    62  
    63  After a classifier is trained, it can be applied to a region of interest (of the same size as used
    64  during the training) in an input image. The classifier outputs a "1" if the region is likely to show
    65  the object (i.e., face/car), and "0" otherwise. To search for the object in the whole image one can
    66  move the search window across the image and check every location using the classifier. The
    67  classifier is designed so that it can be easily "resized" in order to be able to find the objects of
    68  interest at different sizes, which is more efficient than resizing the image itself. So, to find an
    69  object of an unknown size in the image the scan procedure should be done several times at different
    70  scales.
    71  
    72  The word "cascade" in the classifier name means that the resultant classifier consists of several
    73  simpler classifiers (*stages*) that are applied subsequently to a region of interest until at some
    74  stage the candidate is rejected or all the stages are passed. The word "boosted" means that the
    75  classifiers at every stage of the cascade are complex themselves and they are built out of basic
    76  classifiers using one of four different boosting techniques (weighted voting). Currently Discrete
    77  Adaboost, Real Adaboost, Gentle Adaboost and Logitboost are supported. The basic classifiers are
    78  decision-tree classifiers with at least 2 leaves. Haar-like features are the input to the basic
    79  classifiers, and are calculated as described below. The current algorithm uses the following
    80  Haar-like features:
    81  
    82  ![image](pics/haarfeatures.png)
    83  
    84  The feature used in a particular classifier is specified by its shape (1a, 2b etc.), position within
    85  the region of interest and the scale (this scale is not the same as the scale used at the detection
    86  stage, though these two scales are multiplied). For example, in the case of the third line feature
    87  (2c) the response is calculated as the difference between the sum of image pixels under the
    88  rectangle covering the whole feature (including the two white stripes and the black stripe in the
    89  middle) and the sum of the image pixels under the black stripe multiplied by 3 in order to
    90  compensate for the differences in the size of areas. The sums of pixel values over a rectangular
    91  regions are calculated rapidly using integral images (see below and the integral description).
    92  
    93  To see the object detector at work, have a look at the facedetect demo:
    94  <https://github.com/opencv/opencv/tree/master/samples/cpp/dbt_face_detection.cpp>
    95  
    96  The following reference is for the detection part only. There is a separate application called
    97  opencv_traincascade that can train a cascade of boosted classifiers from a set of samples.
    98  
    99  @note In the new C++ interface it is also possible to use LBP (local binary pattern) features in
   100  addition to Haar-like features. .. [Viola01] Paul Viola and Michael J. Jones. Rapid Object Detection
   101  using a Boosted Cascade of Simple Features. IEEE CVPR, 2001. The paper is available online at
   102  <http://research.microsoft.com/en-us/um/people/viola/Pubs/Detect/violaJones_CVPR2001.pdf>
   103  
   104  @{
   105      @defgroup objdetect_c C API
   106  @}
   107   */
   108  
   109  typedef struct CvHaarClassifierCascade CvHaarClassifierCascade;
   110  
   111  namespace cv
   112  {
   113  
   114  //! @addtogroup objdetect
   115  //! @{
   116  
   117  ///////////////////////////// Object Detection ////////////////////////////
   118  
   119  //! class for grouping object candidates, detected by Cascade Classifier, HOG etc.
   120  //! instance of the class is to be passed to cv::partition (see cxoperations.hpp)
   121  class CV_EXPORTS SimilarRects
   122  {
   123  public:
   124      SimilarRects(double _eps) : eps(_eps) {}
   125      inline bool operator()(const Rect& r1, const Rect& r2) const
   126      {
   127          double delta = eps * ((std::min)(r1.width, r2.width) + (std::min)(r1.height, r2.height)) * 0.5;
   128          return std::abs(r1.x - r2.x) <= delta &&
   129              std::abs(r1.y - r2.y) <= delta &&
   130              std::abs(r1.x + r1.width - r2.x - r2.width) <= delta &&
   131              std::abs(r1.y + r1.height - r2.y - r2.height) <= delta;
   132      }
   133      double eps;
   134  };
   135  
   136  /** @brief Groups the object candidate rectangles.
   137  
   138  @param rectList Input/output vector of rectangles. Output vector includes retained and grouped
   139  rectangles. (The Python list is not modified in place.)
   140  @param groupThreshold Minimum possible number of rectangles minus 1. The threshold is used in a
   141  group of rectangles to retain it.
   142  @param eps Relative difference between sides of the rectangles to merge them into a group.
   143  
   144  The function is a wrapper for the generic function partition . It clusters all the input rectangles
   145  using the rectangle equivalence criteria that combines rectangles with similar sizes and similar
   146  locations. The similarity is defined by eps. When eps=0 , no clustering is done at all. If
   147  \f$\texttt{eps}\rightarrow +\inf\f$ , all the rectangles are put in one cluster. Then, the small
   148  clusters containing less than or equal to groupThreshold rectangles are rejected. In each other
   149  cluster, the average rectangle is computed and put into the output rectangle list.
   150   */
   151  CV_EXPORTS   void groupRectangles(std::vector<Rect>& rectList, int groupThreshold, double eps = 0.2);
   152  /** @overload */
   153  CV_EXPORTS_W void groupRectangles(CV_IN_OUT std::vector<Rect>& rectList, CV_OUT std::vector<int>& weights,
   154                                    int groupThreshold, double eps = 0.2);
   155  /** @overload */
   156  CV_EXPORTS   void groupRectangles(std::vector<Rect>& rectList, int groupThreshold,
   157                                    double eps, std::vector<int>* weights, std::vector<double>* levelWeights );
   158  /** @overload */
   159  CV_EXPORTS   void groupRectangles(std::vector<Rect>& rectList, std::vector<int>& rejectLevels,
   160                                    std::vector<double>& levelWeights, int groupThreshold, double eps = 0.2);
   161  /** @overload */
   162  CV_EXPORTS   void groupRectangles_meanshift(std::vector<Rect>& rectList, std::vector<double>& foundWeights,
   163                                              std::vector<double>& foundScales,
   164                                              double detectThreshold = 0.0, Size winDetSize = Size(64, 128));
   165  
   166  template<> struct DefaultDeleter<CvHaarClassifierCascade>{ CV_EXPORTS void operator ()(CvHaarClassifierCascade* obj) const; };
   167  
   168  enum { CASCADE_DO_CANNY_PRUNING    = 1,
   169         CASCADE_SCALE_IMAGE         = 2,
   170         CASCADE_FIND_BIGGEST_OBJECT = 4,
   171         CASCADE_DO_ROUGH_SEARCH     = 8
   172       };
   173  
   174  class CV_EXPORTS_W BaseCascadeClassifier : public Algorithm
   175  {
   176  public:
   177      virtual ~BaseCascadeClassifier();
   178      virtual bool empty() const CV_OVERRIDE = 0;
   179      virtual bool load( const String& filename ) = 0;
   180      virtual void detectMultiScale( InputArray image,
   181                             CV_OUT std::vector<Rect>& objects,
   182                             double scaleFactor,
   183                             int minNeighbors, int flags,
   184                             Size minSize, Size maxSize ) = 0;
   185  
   186      virtual void detectMultiScale( InputArray image,
   187                             CV_OUT std::vector<Rect>& objects,
   188                             CV_OUT std::vector<int>& numDetections,
   189                             double scaleFactor,
   190                             int minNeighbors, int flags,
   191                             Size minSize, Size maxSize ) = 0;
   192  
   193      virtual void detectMultiScale( InputArray image,
   194                                     CV_OUT std::vector<Rect>& objects,
   195                                     CV_OUT std::vector<int>& rejectLevels,
   196                                     CV_OUT std::vector<double>& levelWeights,
   197                                     double scaleFactor,
   198                                     int minNeighbors, int flags,
   199                                     Size minSize, Size maxSize,
   200                                     bool outputRejectLevels ) = 0;
   201  
   202      virtual bool isOldFormatCascade() const = 0;
   203      virtual Size getOriginalWindowSize() const = 0;
   204      virtual int getFeatureType() const = 0;
   205      virtual void* getOldCascade() = 0;
   206  
   207      class CV_EXPORTS MaskGenerator
   208      {
   209      public:
   210          virtual ~MaskGenerator() {}
   211          virtual Mat generateMask(const Mat& src)=0;
   212          virtual void initializeMask(const Mat& /*src*/) { }
   213      };
   214      virtual void setMaskGenerator(const Ptr<MaskGenerator>& maskGenerator) = 0;
   215      virtual Ptr<MaskGenerator> getMaskGenerator() = 0;
   216  };
   217  
   218  /** @example samples/cpp/facedetect.cpp
   219  This program demonstrates usage of the Cascade classifier class
   220  \image html Cascade_Classifier_Tutorial_Result_Haar.jpg "Sample screenshot" width=321 height=254
   221  */
   222  /** @brief Cascade classifier class for object detection.
   223   */
   224  class CV_EXPORTS_W CascadeClassifier
   225  {
   226  public:
   227      CV_WRAP CascadeClassifier();
   228      /** @brief Loads a classifier from a file.
   229  
   230      @param filename Name of the file from which the classifier is loaded.
   231       */
   232      CV_WRAP CascadeClassifier(const String& filename);
   233      ~CascadeClassifier();
   234      /** @brief Checks whether the classifier has been loaded.
   235      */
   236      CV_WRAP bool empty() const;
   237      /** @brief Loads a classifier from a file.
   238  
   239      @param filename Name of the file from which the classifier is loaded. The file may contain an old
   240      HAAR classifier trained by the haartraining application or a new cascade classifier trained by the
   241      traincascade application.
   242       */
   243      CV_WRAP bool load( const String& filename );
   244      /** @brief Reads a classifier from a FileStorage node.
   245  
   246      @note The file may contain a new cascade classifier (trained traincascade application) only.
   247       */
   248      CV_WRAP bool read( const FileNode& node );
   249  
   250      /** @brief Detects objects of different sizes in the input image. The detected objects are returned as a list
   251      of rectangles.
   252  
   253      @param image Matrix of the type CV_8U containing an image where objects are detected.
   254      @param objects Vector of rectangles where each rectangle contains the detected object, the
   255      rectangles may be partially outside the original image.
   256      @param scaleFactor Parameter specifying how much the image size is reduced at each image scale.
   257      @param minNeighbors Parameter specifying how many neighbors each candidate rectangle should have
   258      to retain it.
   259      @param flags Parameter with the same meaning for an old cascade as in the function
   260      cvHaarDetectObjects. It is not used for a new cascade.
   261      @param minSize Minimum possible object size. Objects smaller than that are ignored.
   262      @param maxSize Maximum possible object size. Objects larger than that are ignored. If `maxSize == minSize` model is evaluated on single scale.
   263  
   264      The function is parallelized with the TBB library.
   265  
   266      @note
   267         -   (Python) A face detection example using cascade classifiers can be found at
   268              opencv_source_code/samples/python/facedetect.py
   269      */
   270      CV_WRAP void detectMultiScale( InputArray image,
   271                            CV_OUT std::vector<Rect>& objects,
   272                            double scaleFactor = 1.1,
   273                            int minNeighbors = 3, int flags = 0,
   274                            Size minSize = Size(),
   275                            Size maxSize = Size() );
   276  
   277      /** @overload
   278      @param image Matrix of the type CV_8U containing an image where objects are detected.
   279      @param objects Vector of rectangles where each rectangle contains the detected object, the
   280      rectangles may be partially outside the original image.
   281      @param numDetections Vector of detection numbers for the corresponding objects. An object's number
   282      of detections is the number of neighboring positively classified rectangles that were joined
   283      together to form the object.
   284      @param scaleFactor Parameter specifying how much the image size is reduced at each image scale.
   285      @param minNeighbors Parameter specifying how many neighbors each candidate rectangle should have
   286      to retain it.
   287      @param flags Parameter with the same meaning for an old cascade as in the function
   288      cvHaarDetectObjects. It is not used for a new cascade.
   289      @param minSize Minimum possible object size. Objects smaller than that are ignored.
   290      @param maxSize Maximum possible object size. Objects larger than that are ignored. If `maxSize == minSize` model is evaluated on single scale.
   291      */
   292      CV_WRAP_AS(detectMultiScale2) void detectMultiScale( InputArray image,
   293                            CV_OUT std::vector<Rect>& objects,
   294                            CV_OUT std::vector<int>& numDetections,
   295                            double scaleFactor=1.1,
   296                            int minNeighbors=3, int flags=0,
   297                            Size minSize=Size(),
   298                            Size maxSize=Size() );
   299  
   300      /** @overload
   301      This function allows you to retrieve the final stage decision certainty of classification.
   302      For this, one needs to set `outputRejectLevels` on true and provide the `rejectLevels` and `levelWeights` parameter.
   303      For each resulting detection, `levelWeights` will then contain the certainty of classification at the final stage.
   304      This value can then be used to separate strong from weaker classifications.
   305  
   306      A code sample on how to use it efficiently can be found below:
   307      @code
   308      Mat img;
   309      vector<double> weights;
   310      vector<int> levels;
   311      vector<Rect> detections;
   312      CascadeClassifier model("/path/to/your/model.xml");
   313      model.detectMultiScale(img, detections, levels, weights, 1.1, 3, 0, Size(), Size(), true);
   314      cerr << "Detection " << detections[0] << " with weight " << weights[0] << endl;
   315      @endcode
   316      */
   317      CV_WRAP_AS(detectMultiScale3) void detectMultiScale( InputArray image,
   318                                    CV_OUT std::vector<Rect>& objects,
   319                                    CV_OUT std::vector<int>& rejectLevels,
   320                                    CV_OUT std::vector<double>& levelWeights,
   321                                    double scaleFactor = 1.1,
   322                                    int minNeighbors = 3, int flags = 0,
   323                                    Size minSize = Size(),
   324                                    Size maxSize = Size(),
   325                                    bool outputRejectLevels = false );
   326  
   327      CV_WRAP bool isOldFormatCascade() const;
   328      CV_WRAP Size getOriginalWindowSize() const;
   329      CV_WRAP int getFeatureType() const;
   330      void* getOldCascade();
   331  
   332      CV_WRAP static bool convert(const String& oldcascade, const String& newcascade);
   333  
   334      void setMaskGenerator(const Ptr<BaseCascadeClassifier::MaskGenerator>& maskGenerator);
   335      Ptr<BaseCascadeClassifier::MaskGenerator> getMaskGenerator();
   336  
   337      Ptr<BaseCascadeClassifier> cc;
   338  };
   339  
   340  CV_EXPORTS Ptr<BaseCascadeClassifier::MaskGenerator> createFaceDetectionMaskGenerator();
   341  
   342  //////////////// HOG (Histogram-of-Oriented-Gradients) Descriptor and Object Detector //////////////
   343  
   344  //! struct for detection region of interest (ROI)
   345  struct DetectionROI
   346  {
   347     //! scale(size) of the bounding box
   348     double scale;
   349     //! set of requested locations to be evaluated
   350     std::vector<cv::Point> locations;
   351     //! vector that will contain confidence values for each location
   352     std::vector<double> confidences;
   353  };
   354  
   355  /**@brief Implementation of HOG (Histogram of Oriented Gradients) descriptor and object detector.
   356  
   357  the HOG descriptor algorithm introduced by Navneet Dalal and Bill Triggs @cite Dalal2005 .
   358  
   359  useful links:
   360  
   361  https://hal.inria.fr/inria-00548512/document/
   362  
   363  https://en.wikipedia.org/wiki/Histogram_of_oriented_gradients
   364  
   365  https://software.intel.com/en-us/ipp-dev-reference-histogram-of-oriented-gradients-hog-descriptor
   366  
   367  http://www.learnopencv.com/histogram-of-oriented-gradients
   368  
   369  http://www.learnopencv.com/handwritten-digits-classification-an-opencv-c-python-tutorial
   370  
   371   */
   372  struct CV_EXPORTS_W HOGDescriptor
   373  {
   374  public:
   375      enum HistogramNormType { L2Hys = 0 //!< Default histogramNormType
   376           };
   377      enum { DEFAULT_NLEVELS = 64 //!< Default nlevels value.
   378           };
   379      enum DescriptorStorageFormat { DESCR_FORMAT_COL_BY_COL, DESCR_FORMAT_ROW_BY_ROW };
   380  
   381      /**@brief Creates the HOG descriptor and detector with default params.
   382  
   383      aqual to HOGDescriptor(Size(64,128), Size(16,16), Size(8,8), Size(8,8), 9 )
   384      */
   385      CV_WRAP HOGDescriptor() : winSize(64,128), blockSize(16,16), blockStride(8,8),
   386          cellSize(8,8), nbins(9), derivAperture(1), winSigma(-1),
   387          histogramNormType(HOGDescriptor::L2Hys), L2HysThreshold(0.2), gammaCorrection(true),
   388          free_coef(-1.f), nlevels(HOGDescriptor::DEFAULT_NLEVELS), signedGradient(false)
   389      {}
   390  
   391      /** @overload
   392      @param _winSize sets winSize with given value.
   393      @param _blockSize sets blockSize with given value.
   394      @param _blockStride sets blockStride with given value.
   395      @param _cellSize sets cellSize with given value.
   396      @param _nbins sets nbins with given value.
   397      @param _derivAperture sets derivAperture with given value.
   398      @param _winSigma sets winSigma with given value.
   399      @param _histogramNormType sets histogramNormType with given value.
   400      @param _L2HysThreshold sets L2HysThreshold with given value.
   401      @param _gammaCorrection sets gammaCorrection with given value.
   402      @param _nlevels sets nlevels with given value.
   403      @param _signedGradient sets signedGradient with given value.
   404      */
   405      CV_WRAP HOGDescriptor(Size _winSize, Size _blockSize, Size _blockStride,
   406                    Size _cellSize, int _nbins, int _derivAperture=1, double _winSigma=-1,
   407                    HOGDescriptor::HistogramNormType _histogramNormType=HOGDescriptor::L2Hys,
   408                    double _L2HysThreshold=0.2, bool _gammaCorrection=false,
   409                    int _nlevels=HOGDescriptor::DEFAULT_NLEVELS, bool _signedGradient=false)
   410      : winSize(_winSize), blockSize(_blockSize), blockStride(_blockStride), cellSize(_cellSize),
   411      nbins(_nbins), derivAperture(_derivAperture), winSigma(_winSigma),
   412      histogramNormType(_histogramNormType), L2HysThreshold(_L2HysThreshold),
   413      gammaCorrection(_gammaCorrection), free_coef(-1.f), nlevels(_nlevels), signedGradient(_signedGradient)
   414      {}
   415  
   416      /** @overload
   417      @param filename The file name containing HOGDescriptor properties and coefficients for the linear SVM classifier.
   418      */
   419      CV_WRAP HOGDescriptor(const String& filename)
   420      {
   421          load(filename);
   422      }
   423  
   424      /** @overload
   425      @param d the HOGDescriptor which cloned to create a new one.
   426      */
   427      HOGDescriptor(const HOGDescriptor& d)
   428      {
   429          d.copyTo(*this);
   430      }
   431  
   432      /**@brief Default destructor.
   433      */
   434      virtual ~HOGDescriptor() {}
   435  
   436      /**@brief Returns the number of coefficients required for the classification.
   437      */
   438      CV_WRAP size_t getDescriptorSize() const;
   439  
   440      /** @brief Checks if detector size equal to descriptor size.
   441      */
   442      CV_WRAP bool checkDetectorSize() const;
   443  
   444      /** @brief Returns winSigma value
   445      */
   446      CV_WRAP double getWinSigma() const;
   447  
   448      /**@example samples/cpp/peopledetect.cpp
   449      */
   450      /**@brief Sets coefficients for the linear SVM classifier.
   451      @param svmdetector coefficients for the linear SVM classifier.
   452      */
   453      CV_WRAP virtual void setSVMDetector(InputArray svmdetector);
   454  
   455      /** @brief Reads HOGDescriptor parameters from a cv::FileNode.
   456      @param fn File node
   457      */
   458      virtual bool read(FileNode& fn);
   459  
   460      /** @brief Stores HOGDescriptor parameters in a cv::FileStorage.
   461      @param fs File storage
   462      @param objname Object name
   463      */
   464      virtual void write(FileStorage& fs, const String& objname) const;
   465  
   466      /** @brief loads HOGDescriptor parameters and coefficients for the linear SVM classifier from a file.
   467      @param filename Path of the file to read.
   468      @param objname The optional name of the node to read (if empty, the first top-level node will be used).
   469      */
   470      CV_WRAP virtual bool load(const String& filename, const String& objname = String());
   471  
   472      /** @brief saves HOGDescriptor parameters and coefficients for the linear SVM classifier to a file
   473      @param filename File name
   474      @param objname Object name
   475      */
   476      CV_WRAP virtual void save(const String& filename, const String& objname = String()) const;
   477  
   478      /** @brief clones the HOGDescriptor
   479      @param c cloned HOGDescriptor
   480      */
   481      virtual void copyTo(HOGDescriptor& c) const;
   482  
   483      /**@example samples/cpp/train_HOG.cpp
   484      */
   485      /** @brief Computes HOG descriptors of given image.
   486      @param img Matrix of the type CV_8U containing an image where HOG features will be calculated.
   487      @param descriptors Matrix of the type CV_32F
   488      @param winStride Window stride. It must be a multiple of block stride.
   489      @param padding Padding
   490      @param locations Vector of Point
   491      */
   492      CV_WRAP virtual void compute(InputArray img,
   493                           CV_OUT std::vector<float>& descriptors,
   494                           Size winStride = Size(), Size padding = Size(),
   495                           const std::vector<Point>& locations = std::vector<Point>()) const;
   496  
   497      /** @brief Performs object detection without a multi-scale window.
   498      @param img Matrix of the type CV_8U or CV_8UC3 containing an image where objects are detected.
   499      @param foundLocations Vector of point where each point contains left-top corner point of detected object boundaries.
   500      @param weights Vector that will contain confidence values for each detected object.
   501      @param hitThreshold Threshold for the distance between features and SVM classifying plane.
   502      Usually it is 0 and should be specified in the detector coefficients (as the last free coefficient).
   503      But if the free coefficient is omitted (which is allowed), you can specify it manually here.
   504      @param winStride Window stride. It must be a multiple of block stride.
   505      @param padding Padding
   506      @param searchLocations Vector of Point includes set of requested locations to be evaluated.
   507      */
   508      CV_WRAP virtual void detect(InputArray img, CV_OUT std::vector<Point>& foundLocations,
   509                          CV_OUT std::vector<double>& weights,
   510                          double hitThreshold = 0, Size winStride = Size(),
   511                          Size padding = Size(),
   512                          const std::vector<Point>& searchLocations = std::vector<Point>()) const;
   513  
   514      /** @brief Performs object detection without a multi-scale window.
   515      @param img Matrix of the type CV_8U or CV_8UC3 containing an image where objects are detected.
   516      @param foundLocations Vector of point where each point contains left-top corner point of detected object boundaries.
   517      @param hitThreshold Threshold for the distance between features and SVM classifying plane.
   518      Usually it is 0 and should be specified in the detector coefficients (as the last free coefficient).
   519      But if the free coefficient is omitted (which is allowed), you can specify it manually here.
   520      @param winStride Window stride. It must be a multiple of block stride.
   521      @param padding Padding
   522      @param searchLocations Vector of Point includes locations to search.
   523      */
   524      virtual void detect(InputArray img, CV_OUT std::vector<Point>& foundLocations,
   525                          double hitThreshold = 0, Size winStride = Size(),
   526                          Size padding = Size(),
   527                          const std::vector<Point>& searchLocations=std::vector<Point>()) const;
   528  
   529      /** @brief Detects objects of different sizes in the input image. The detected objects are returned as a list
   530      of rectangles.
   531      @param img Matrix of the type CV_8U or CV_8UC3 containing an image where objects are detected.
   532      @param foundLocations Vector of rectangles where each rectangle contains the detected object.
   533      @param foundWeights Vector that will contain confidence values for each detected object.
   534      @param hitThreshold Threshold for the distance between features and SVM classifying plane.
   535      Usually it is 0 and should be specified in the detector coefficients (as the last free coefficient).
   536      But if the free coefficient is omitted (which is allowed), you can specify it manually here.
   537      @param winStride Window stride. It must be a multiple of block stride.
   538      @param padding Padding
   539      @param scale Coefficient of the detection window increase.
   540      @param finalThreshold Final threshold
   541      @param useMeanshiftGrouping indicates grouping algorithm
   542      */
   543      CV_WRAP virtual void detectMultiScale(InputArray img, CV_OUT std::vector<Rect>& foundLocations,
   544                                    CV_OUT std::vector<double>& foundWeights, double hitThreshold = 0,
   545                                    Size winStride = Size(), Size padding = Size(), double scale = 1.05,
   546                                    double finalThreshold = 2.0,bool useMeanshiftGrouping = false) const;
   547  
   548      /** @brief Detects objects of different sizes in the input image. The detected objects are returned as a list
   549      of rectangles.
   550      @param img Matrix of the type CV_8U or CV_8UC3 containing an image where objects are detected.
   551      @param foundLocations Vector of rectangles where each rectangle contains the detected object.
   552      @param hitThreshold Threshold for the distance between features and SVM classifying plane.
   553      Usually it is 0 and should be specified in the detector coefficients (as the last free coefficient).
   554      But if the free coefficient is omitted (which is allowed), you can specify it manually here.
   555      @param winStride Window stride. It must be a multiple of block stride.
   556      @param padding Padding
   557      @param scale Coefficient of the detection window increase.
   558      @param finalThreshold Final threshold
   559      @param useMeanshiftGrouping indicates grouping algorithm
   560      */
   561      virtual void detectMultiScale(InputArray img, CV_OUT std::vector<Rect>& foundLocations,
   562                                    double hitThreshold = 0, Size winStride = Size(),
   563                                    Size padding = Size(), double scale = 1.05,
   564                                    double finalThreshold = 2.0, bool useMeanshiftGrouping = false) const;
   565  
   566      /** @brief  Computes gradients and quantized gradient orientations.
   567      @param img Matrix contains the image to be computed
   568      @param grad Matrix of type CV_32FC2 contains computed gradients
   569      @param angleOfs Matrix of type CV_8UC2 contains quantized gradient orientations
   570      @param paddingTL Padding from top-left
   571      @param paddingBR Padding from bottom-right
   572      */
   573      CV_WRAP virtual void computeGradient(InputArray img, InputOutputArray grad, InputOutputArray angleOfs,
   574                                   Size paddingTL = Size(), Size paddingBR = Size()) const;
   575  
   576      /** @brief Returns coefficients of the classifier trained for people detection (for 64x128 windows).
   577      */
   578      CV_WRAP static std::vector<float> getDefaultPeopleDetector();
   579  
   580      /**@example samples/tapi/hog.cpp
   581      */
   582      /** @brief Returns coefficients of the classifier trained for people detection (for 48x96 windows).
   583      */
   584      CV_WRAP static std::vector<float> getDaimlerPeopleDetector();
   585  
   586      //! Detection window size. Align to block size and block stride. Default value is Size(64,128).
   587      CV_PROP Size winSize;
   588  
   589      //! Block size in pixels. Align to cell size. Default value is Size(16,16).
   590      CV_PROP Size blockSize;
   591  
   592      //! Block stride. It must be a multiple of cell size. Default value is Size(8,8).
   593      CV_PROP Size blockStride;
   594  
   595      //! Cell size. Default value is Size(8,8).
   596      CV_PROP Size cellSize;
   597  
   598      //! Number of bins used in the calculation of histogram of gradients. Default value is 9.
   599      CV_PROP int nbins;
   600  
   601      //! not documented
   602      CV_PROP int derivAperture;
   603  
   604      //! Gaussian smoothing window parameter.
   605      CV_PROP double winSigma;
   606  
   607      //! histogramNormType
   608      CV_PROP HOGDescriptor::HistogramNormType histogramNormType;
   609  
   610      //! L2-Hys normalization method shrinkage.
   611      CV_PROP double L2HysThreshold;
   612  
   613      //! Flag to specify whether the gamma correction preprocessing is required or not.
   614      CV_PROP bool gammaCorrection;
   615  
   616      //! coefficients for the linear SVM classifier.
   617      CV_PROP std::vector<float> svmDetector;
   618  
   619      //! coefficients for the linear SVM classifier used when OpenCL is enabled
   620      UMat oclSvmDetector;
   621  
   622      //! not documented
   623      float free_coef;
   624  
   625      //! Maximum number of detection window increases. Default value is 64
   626      CV_PROP int nlevels;
   627  
   628      //! Indicates signed gradient will be used or not
   629      CV_PROP bool signedGradient;
   630  
   631      /** @brief evaluate specified ROI and return confidence value for each location
   632      @param img Matrix of the type CV_8U or CV_8UC3 containing an image where objects are detected.
   633      @param locations Vector of Point
   634      @param foundLocations Vector of Point where each Point is detected object's top-left point.
   635      @param confidences confidences
   636      @param hitThreshold Threshold for the distance between features and SVM classifying plane. Usually
   637      it is 0 and should be specified in the detector coefficients (as the last free coefficient). But if
   638      the free coefficient is omitted (which is allowed), you can specify it manually here
   639      @param winStride winStride
   640      @param padding padding
   641      */
   642      virtual void detectROI(InputArray img, const std::vector<cv::Point> &locations,
   643                                     CV_OUT std::vector<cv::Point>& foundLocations, CV_OUT std::vector<double>& confidences,
   644                                     double hitThreshold = 0, cv::Size winStride = Size(),
   645                                     cv::Size padding = Size()) const;
   646  
   647      /** @brief evaluate specified ROI and return confidence value for each location in multiple scales
   648      @param img Matrix of the type CV_8U or CV_8UC3 containing an image where objects are detected.
   649      @param foundLocations Vector of rectangles where each rectangle contains the detected object.
   650      @param locations Vector of DetectionROI
   651      @param hitThreshold Threshold for the distance between features and SVM classifying plane. Usually it is 0 and should be specified
   652      in the detector coefficients (as the last free coefficient). But if the free coefficient is omitted (which is allowed), you can specify it manually here.
   653      @param groupThreshold Minimum possible number of rectangles minus 1. The threshold is used in a group of rectangles to retain it.
   654      */
   655      virtual void detectMultiScaleROI(InputArray img,
   656                                       CV_OUT std::vector<cv::Rect>& foundLocations,
   657                                       std::vector<DetectionROI>& locations,
   658                                       double hitThreshold = 0,
   659                                       int groupThreshold = 0) const;
   660  
   661      /** @brief Groups the object candidate rectangles.
   662      @param rectList  Input/output vector of rectangles. Output vector includes retained and grouped rectangles. (The Python list is not modified in place.)
   663      @param weights Input/output vector of weights of rectangles. Output vector includes weights of retained and grouped rectangles. (The Python list is not modified in place.)
   664      @param groupThreshold Minimum possible number of rectangles minus 1. The threshold is used in a group of rectangles to retain it.
   665      @param eps Relative difference between sides of the rectangles to merge them into a group.
   666      */
   667      void groupRectangles(std::vector<cv::Rect>& rectList, std::vector<double>& weights, int groupThreshold, double eps) const;
   668  };
   669  
   670  class CV_EXPORTS_W QRCodeDetector
   671  {
   672  public:
   673      CV_WRAP QRCodeDetector();
   674      ~QRCodeDetector();
   675  
   676      /** @brief sets the epsilon used during the horizontal scan of QR code stop marker detection.
   677       @param epsX Epsilon neighborhood, which allows you to determine the horizontal pattern
   678       of the scheme 1:1:3:1:1 according to QR code standard.
   679      */
   680      CV_WRAP void setEpsX(double epsX);
   681      /** @brief sets the epsilon used during the vertical scan of QR code stop marker detection.
   682       @param epsY Epsilon neighborhood, which allows you to determine the vertical pattern
   683       of the scheme 1:1:3:1:1 according to QR code standard.
   684       */
   685      CV_WRAP void setEpsY(double epsY);
   686  
   687      /** @brief Detects QR code in image and returns the quadrangle containing the code.
   688       @param img grayscale or color (BGR) image containing (or not) QR code.
   689       @param points Output vector of vertices of the minimum-area quadrangle containing the code.
   690       */
   691      CV_WRAP bool detect(InputArray img, OutputArray points) const;
   692  
   693      /** @brief Decodes QR code in image once it's found by the detect() method.
   694  
   695       Returns UTF8-encoded output string or empty string if the code cannot be decoded.
   696       @param img grayscale or color (BGR) image containing QR code.
   697       @param points Quadrangle vertices found by detect() method (or some other algorithm).
   698       @param straight_qrcode The optional output image containing rectified and binarized QR code
   699       */
   700      CV_WRAP std::string decode(InputArray img, InputArray points, OutputArray straight_qrcode = noArray());
   701  
   702      /** @brief Decodes QR code on a curved surface in image once it's found by the detect() method.
   703  
   704       Returns UTF8-encoded output string or empty string if the code cannot be decoded.
   705       @param img grayscale or color (BGR) image containing QR code.
   706       @param points Quadrangle vertices found by detect() method (or some other algorithm).
   707       @param straight_qrcode The optional output image containing rectified and binarized QR code
   708       */
   709      CV_WRAP cv::String decodeCurved(InputArray img, InputArray points, OutputArray straight_qrcode = noArray());
   710  
   711      /** @brief Both detects and decodes QR code
   712  
   713       @param img grayscale or color (BGR) image containing QR code.
   714       @param points optional output array of vertices of the found QR code quadrangle. Will be empty if not found.
   715       @param straight_qrcode The optional output image containing rectified and binarized QR code
   716       */
   717      CV_WRAP std::string detectAndDecode(InputArray img, OutputArray points=noArray(),
   718                                          OutputArray straight_qrcode = noArray());
   719  
   720      /** @brief Both detects and decodes QR code on a curved surface
   721  
   722       @param img grayscale or color (BGR) image containing QR code.
   723       @param points optional output array of vertices of the found QR code quadrangle. Will be empty if not found.
   724       @param straight_qrcode The optional output image containing rectified and binarized QR code
   725       */
   726      CV_WRAP std::string detectAndDecodeCurved(InputArray img, OutputArray points=noArray(),
   727                                                OutputArray straight_qrcode = noArray());
   728  
   729      /** @brief Detects QR codes in image and returns the vector of the quadrangles containing the codes.
   730       @param img grayscale or color (BGR) image containing (or not) QR codes.
   731       @param points Output vector of vector of vertices of the minimum-area quadrangle containing the codes.
   732       */
   733      CV_WRAP
   734      bool detectMulti(InputArray img, OutputArray points) const;
   735  
   736      /** @brief Decodes QR codes in image once it's found by the detect() method.
   737       @param img grayscale or color (BGR) image containing QR codes.
   738       @param decoded_info UTF8-encoded output vector of string or empty vector of string if the codes cannot be decoded.
   739       @param points vector of Quadrangle vertices found by detect() method (or some other algorithm).
   740       @param straight_qrcode The optional output vector of images containing rectified and binarized QR codes
   741       */
   742      CV_WRAP
   743      bool decodeMulti(
   744              InputArray img, InputArray points,
   745              CV_OUT std::vector<std::string>& decoded_info,
   746              OutputArrayOfArrays straight_qrcode = noArray()
   747      ) const;
   748  
   749      /** @brief Both detects and decodes QR codes
   750      @param img grayscale or color (BGR) image containing QR codes.
   751      @param decoded_info UTF8-encoded output vector of string or empty vector of string if the codes cannot be decoded.
   752      @param points optional output vector of vertices of the found QR code quadrangles. Will be empty if not found.
   753      @param straight_qrcode The optional output vector of images containing rectified and binarized QR codes
   754      */
   755      CV_WRAP
   756      bool detectAndDecodeMulti(
   757              InputArray img, CV_OUT std::vector<std::string>& decoded_info,
   758              OutputArray points = noArray(),
   759              OutputArrayOfArrays straight_qrcode = noArray()
   760      ) const;
   761  
   762  protected:
   763      struct Impl;
   764      Ptr<Impl> p;
   765  };
   766  
   767  //! @} objdetect
   768  }
   769  
   770  #include "opencv2/objdetect/detection_based_tracker.hpp"
   771  
   772  #endif