github.com/kaydxh/golang@v0.0.131/pkg/gocv/cgo/third_path/opencv4/include/opencv2/calib3d.hpp (about)

     1  /*M///////////////////////////////////////////////////////////////////////////////////////
     2  //
     3  //  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
     4  //
     5  //  By downloading, copying, installing or using the software you agree to this license.
     6  //  If you do not agree to this license, do not download, install,
     7  //  copy or use the software.
     8  //
     9  //
    10  //                          License Agreement
    11  //                For Open Source Computer Vision Library
    12  //
    13  // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
    14  // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
    15  // Copyright (C) 2013, OpenCV Foundation, all rights reserved.
    16  // Third party copyrights are property of their respective owners.
    17  //
    18  // Redistribution and use in source and binary forms, with or without modification,
    19  // are permitted provided that the following conditions are met:
    20  //
    21  //   * Redistribution's of source code must retain the above copyright notice,
    22  //     this list of conditions and the following disclaimer.
    23  //
    24  //   * Redistribution's in binary form must reproduce the above copyright notice,
    25  //     this list of conditions and the following disclaimer in the documentation
    26  //     and/or other materials provided with the distribution.
    27  //
    28  //   * The name of the copyright holders may not be used to endorse or promote products
    29  //     derived from this software without specific prior written permission.
    30  //
    31  // This software is provided by the copyright holders and contributors "as is" and
    32  // any express or implied warranties, including, but not limited to, the implied
    33  // warranties of merchantability and fitness for a particular purpose are disclaimed.
    34  // In no event shall the Intel Corporation or contributors be liable for any direct,
    35  // indirect, incidental, special, exemplary, or consequential damages
    36  // (including, but not limited to, procurement of substitute goods or services;
    37  // loss of use, data, or profits; or business interruption) however caused
    38  // and on any theory of liability, whether in contract, strict liability,
    39  // or tort (including negligence or otherwise) arising in any way out of
    40  // the use of this software, even if advised of the possibility of such damage.
    41  //
    42  //M*/
    43  
    44  #ifndef OPENCV_CALIB3D_HPP
    45  #define OPENCV_CALIB3D_HPP
    46  
    47  #include "opencv2/core.hpp"
    48  #include "opencv2/features2d.hpp"
    49  #include "opencv2/core/affine.hpp"
    50  
    51  /**
    52    @defgroup calib3d Camera Calibration and 3D Reconstruction
    53  
    54  The functions in this section use a so-called pinhole camera model. The view of a scene
    55  is obtained by projecting a scene's 3D point \f$P_w\f$ into the image plane using a perspective
    56  transformation which forms the corresponding pixel \f$p\f$. Both \f$P_w\f$ and \f$p\f$ are
    57  represented in homogeneous coordinates, i.e. as 3D and 2D homogeneous vector respectively. You will
    58  find a brief introduction to projective geometry, homogeneous vectors and homogeneous
    59  transformations at the end of this section's introduction. For more succinct notation, we often drop
    60  the 'homogeneous' and say vector instead of homogeneous vector.
    61  
    62  The distortion-free projective transformation given by a  pinhole camera model is shown below.
    63  
    64  \f[s \; p = A \begin{bmatrix} R|t \end{bmatrix} P_w,\f]
    65  
    66  where \f$P_w\f$ is a 3D point expressed with respect to the world coordinate system,
    67  \f$p\f$ is a 2D pixel in the image plane, \f$A\f$ is the camera intrinsic matrix,
    68  \f$R\f$ and \f$t\f$ are the rotation and translation that describe the change of coordinates from
    69  world to camera coordinate systems (or camera frame) and \f$s\f$ is the projective transformation's
    70  arbitrary scaling and not part of the camera model.
    71  
    72  The camera intrinsic matrix \f$A\f$ (notation used as in @cite Zhang2000 and also generally notated
    73  as \f$K\f$) projects 3D points given in the camera coordinate system to 2D pixel coordinates, i.e.
    74  
    75  \f[p = A P_c.\f]
    76  
    77  The camera intrinsic matrix \f$A\f$ is composed of the focal lengths \f$f_x\f$ and \f$f_y\f$, which are
    78  expressed in pixel units, and the principal point \f$(c_x, c_y)\f$, that is usually close to the
    79  image center:
    80  
    81  \f[A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1},\f]
    82  
    83  and thus
    84  
    85  \f[s \vecthree{u}{v}{1} = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1} \vecthree{X_c}{Y_c}{Z_c}.\f]
    86  
    87  The matrix of intrinsic parameters does not depend on the scene viewed. So, once estimated, it can
    88  be re-used as long as the focal length is fixed (in case of a zoom lens). Thus, if an image from the
    89  camera is scaled by a factor, all of these parameters need to be scaled (multiplied/divided,
    90  respectively) by the same factor.
    91  
    92  The joint rotation-translation matrix \f$[R|t]\f$ is the matrix product of a projective
    93  transformation and a homogeneous transformation. The 3-by-4 projective transformation maps 3D points
    94  represented in camera coordinates to 2D points in the image plane and represented in normalized
    95  camera coordinates \f$x' = X_c / Z_c\f$ and \f$y' = Y_c / Z_c\f$:
    96  
    97  \f[Z_c \begin{bmatrix}
    98  x' \\
    99  y' \\
   100  1
   101  \end{bmatrix} = \begin{bmatrix}
   102  1 & 0 & 0 & 0 \\
   103  0 & 1 & 0 & 0 \\
   104  0 & 0 & 1 & 0
   105  \end{bmatrix}
   106  \begin{bmatrix}
   107  X_c \\
   108  Y_c \\
   109  Z_c \\
   110  1
   111  \end{bmatrix}.\f]
   112  
   113  The homogeneous transformation is encoded by the extrinsic parameters \f$R\f$ and \f$t\f$ and
   114  represents the change of basis from world coordinate system \f$w\f$ to the camera coordinate sytem
   115  \f$c\f$. Thus, given the representation of the point \f$P\f$ in world coordinates, \f$P_w\f$, we
   116  obtain \f$P\f$'s representation in the camera coordinate system, \f$P_c\f$, by
   117  
   118  \f[P_c = \begin{bmatrix}
   119  R & t \\
   120  0 & 1
   121  \end{bmatrix} P_w,\f]
   122  
   123  This homogeneous transformation is composed out of \f$R\f$, a 3-by-3 rotation matrix, and \f$t\f$, a
   124  3-by-1 translation vector:
   125  
   126  \f[\begin{bmatrix}
   127  R & t \\
   128  0 & 1
   129  \end{bmatrix} = \begin{bmatrix}
   130  r_{11} & r_{12} & r_{13} & t_x \\
   131  r_{21} & r_{22} & r_{23} & t_y \\
   132  r_{31} & r_{32} & r_{33} & t_z \\
   133  0 & 0 & 0 & 1
   134  \end{bmatrix},
   135  \f]
   136  
   137  and therefore
   138  
   139  \f[\begin{bmatrix}
   140  X_c \\
   141  Y_c \\
   142  Z_c \\
   143  1
   144  \end{bmatrix} = \begin{bmatrix}
   145  r_{11} & r_{12} & r_{13} & t_x \\
   146  r_{21} & r_{22} & r_{23} & t_y \\
   147  r_{31} & r_{32} & r_{33} & t_z \\
   148  0 & 0 & 0 & 1
   149  \end{bmatrix}
   150  \begin{bmatrix}
   151  X_w \\
   152  Y_w \\
   153  Z_w \\
   154  1
   155  \end{bmatrix}.\f]
   156  
   157  Combining the projective transformation and the homogeneous transformation, we obtain the projective
   158  transformation that maps 3D points in world coordinates into 2D points in the image plane and in
   159  normalized camera coordinates:
   160  
   161  \f[Z_c \begin{bmatrix}
   162  x' \\
   163  y' \\
   164  1
   165  \end{bmatrix} = \begin{bmatrix} R|t \end{bmatrix} \begin{bmatrix}
   166  X_w \\
   167  Y_w \\
   168  Z_w \\
   169  1
   170  \end{bmatrix} = \begin{bmatrix}
   171  r_{11} & r_{12} & r_{13} & t_x \\
   172  r_{21} & r_{22} & r_{23} & t_y \\
   173  r_{31} & r_{32} & r_{33} & t_z
   174  \end{bmatrix}
   175  \begin{bmatrix}
   176  X_w \\
   177  Y_w \\
   178  Z_w \\
   179  1
   180  \end{bmatrix},\f]
   181  
   182  with \f$x' = X_c / Z_c\f$ and \f$y' = Y_c / Z_c\f$. Putting the equations for instrincs and extrinsics together, we can write out
   183  \f$s \; p = A \begin{bmatrix} R|t \end{bmatrix} P_w\f$ as
   184  
   185  \f[s \vecthree{u}{v}{1} = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}
   186  \begin{bmatrix}
   187  r_{11} & r_{12} & r_{13} & t_x \\
   188  r_{21} & r_{22} & r_{23} & t_y \\
   189  r_{31} & r_{32} & r_{33} & t_z
   190  \end{bmatrix}
   191  \begin{bmatrix}
   192  X_w \\
   193  Y_w \\
   194  Z_w \\
   195  1
   196  \end{bmatrix}.\f]
   197  
   198  If \f$Z_c \ne 0\f$, the transformation above is equivalent to the following,
   199  
   200  \f[\begin{bmatrix}
   201  u \\
   202  v
   203  \end{bmatrix} = \begin{bmatrix}
   204  f_x X_c/Z_c + c_x \\
   205  f_y Y_c/Z_c + c_y
   206  \end{bmatrix}\f]
   207  
   208  with
   209  
   210  \f[\vecthree{X_c}{Y_c}{Z_c} = \begin{bmatrix}
   211  R|t
   212  \end{bmatrix} \begin{bmatrix}
   213  X_w \\
   214  Y_w \\
   215  Z_w \\
   216  1
   217  \end{bmatrix}.\f]
   218  
   219  The following figure illustrates the pinhole camera model.
   220  
   221  ![Pinhole camera model](pics/pinhole_camera_model.png)
   222  
   223  Real lenses usually have some distortion, mostly radial distortion, and slight tangential distortion.
   224  So, the above model is extended as:
   225  
   226  \f[\begin{bmatrix}
   227  u \\
   228  v
   229  \end{bmatrix} = \begin{bmatrix}
   230  f_x x'' + c_x \\
   231  f_y y'' + c_y
   232  \end{bmatrix}\f]
   233  
   234  where
   235  
   236  \f[\begin{bmatrix}
   237  x'' \\
   238  y''
   239  \end{bmatrix} = \begin{bmatrix}
   240  x' \frac{1 + k_1 r^2 + k_2 r^4 + k_3 r^6}{1 + k_4 r^2 + k_5 r^4 + k_6 r^6} + 2 p_1 x' y' + p_2(r^2 + 2 x'^2) + s_1 r^2 + s_2 r^4 \\
   241  y' \frac{1 + k_1 r^2 + k_2 r^4 + k_3 r^6}{1 + k_4 r^2 + k_5 r^4 + k_6 r^6} + p_1 (r^2 + 2 y'^2) + 2 p_2 x' y' + s_3 r^2 + s_4 r^4 \\
   242  \end{bmatrix}\f]
   243  
   244  with
   245  
   246  \f[r^2 = x'^2 + y'^2\f]
   247  
   248  and
   249  
   250  \f[\begin{bmatrix}
   251  x'\\
   252  y'
   253  \end{bmatrix} = \begin{bmatrix}
   254  X_c/Z_c \\
   255  Y_c/Z_c
   256  \end{bmatrix},\f]
   257  
   258  if \f$Z_c \ne 0\f$.
   259  
   260  The distortion parameters are the radial coefficients \f$k_1\f$, \f$k_2\f$, \f$k_3\f$, \f$k_4\f$, \f$k_5\f$, and \f$k_6\f$
   261  ,\f$p_1\f$ and \f$p_2\f$ are the tangential distortion coefficients, and \f$s_1\f$, \f$s_2\f$, \f$s_3\f$, and \f$s_4\f$,
   262  are the thin prism distortion coefficients. Higher-order coefficients are not considered in OpenCV.
   263  
   264  The next figures show two common types of radial distortion: barrel distortion
   265  (\f$ 1 + k_1 r^2 + k_2 r^4 + k_3 r^6 \f$ monotonically decreasing)
   266  and pincushion distortion (\f$ 1 + k_1 r^2 + k_2 r^4 + k_3 r^6 \f$ monotonically increasing).
   267  Radial distortion is always monotonic for real lenses,
   268  and if the estimator produces a non-monotonic result,
   269  this should be considered a calibration failure.
   270  More generally, radial distortion must be monotonic and the distortion function must be bijective.
   271  A failed estimation result may look deceptively good near the image center
   272  but will work poorly in e.g. AR/SFM applications.
   273  The optimization method used in OpenCV camera calibration does not include these constraints as
   274  the framework does not support the required integer programming and polynomial inequalities.
   275  See [issue #15992](https://github.com/opencv/opencv/issues/15992) for additional information.
   276  
   277  ![](pics/distortion_examples.png)
   278  ![](pics/distortion_examples2.png)
   279  
   280  In some cases, the image sensor may be tilted in order to focus an oblique plane in front of the
   281  camera (Scheimpflug principle). This can be useful for particle image velocimetry (PIV) or
   282  triangulation with a laser fan. The tilt causes a perspective distortion of \f$x''\f$ and
   283  \f$y''\f$. This distortion can be modeled in the following way, see e.g. @cite Louhichi07.
   284  
   285  \f[\begin{bmatrix}
   286  u \\
   287  v
   288  \end{bmatrix} = \begin{bmatrix}
   289  f_x x''' + c_x \\
   290  f_y y''' + c_y
   291  \end{bmatrix},\f]
   292  
   293  where
   294  
   295  \f[s\vecthree{x'''}{y'''}{1} =
   296  \vecthreethree{R_{33}(\tau_x, \tau_y)}{0}{-R_{13}(\tau_x, \tau_y)}
   297  {0}{R_{33}(\tau_x, \tau_y)}{-R_{23}(\tau_x, \tau_y)}
   298  {0}{0}{1} R(\tau_x, \tau_y) \vecthree{x''}{y''}{1}\f]
   299  
   300  and the matrix \f$R(\tau_x, \tau_y)\f$ is defined by two rotations with angular parameter
   301  \f$\tau_x\f$ and \f$\tau_y\f$, respectively,
   302  
   303  \f[
   304  R(\tau_x, \tau_y) =
   305  \vecthreethree{\cos(\tau_y)}{0}{-\sin(\tau_y)}{0}{1}{0}{\sin(\tau_y)}{0}{\cos(\tau_y)}
   306  \vecthreethree{1}{0}{0}{0}{\cos(\tau_x)}{\sin(\tau_x)}{0}{-\sin(\tau_x)}{\cos(\tau_x)} =
   307  \vecthreethree{\cos(\tau_y)}{\sin(\tau_y)\sin(\tau_x)}{-\sin(\tau_y)\cos(\tau_x)}
   308  {0}{\cos(\tau_x)}{\sin(\tau_x)}
   309  {\sin(\tau_y)}{-\cos(\tau_y)\sin(\tau_x)}{\cos(\tau_y)\cos(\tau_x)}.
   310  \f]
   311  
   312  In the functions below the coefficients are passed or returned as
   313  
   314  \f[(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f]
   315  
   316  vector. That is, if the vector contains four elements, it means that \f$k_3=0\f$ . The distortion
   317  coefficients do not depend on the scene viewed. Thus, they also belong to the intrinsic camera
   318  parameters. And they remain the same regardless of the captured image resolution. If, for example, a
   319  camera has been calibrated on images of 320 x 240 resolution, absolutely the same distortion
   320  coefficients can be used for 640 x 480 images from the same camera while \f$f_x\f$, \f$f_y\f$,
   321  \f$c_x\f$, and \f$c_y\f$ need to be scaled appropriately.
   322  
   323  The functions below use the above model to do the following:
   324  
   325  -   Project 3D points to the image plane given intrinsic and extrinsic parameters.
   326  -   Compute extrinsic parameters given intrinsic parameters, a few 3D points, and their
   327  projections.
   328  -   Estimate intrinsic and extrinsic camera parameters from several views of a known calibration
   329  pattern (every view is described by several 3D-2D point correspondences).
   330  -   Estimate the relative position and orientation of the stereo camera "heads" and compute the
   331  *rectification* transformation that makes the camera optical axes parallel.
   332  
   333  <B> Homogeneous Coordinates </B><br>
   334  Homogeneous Coordinates are a system of coordinates that are used in projective geometry. Their use
   335  allows to represent points at infinity by finite coordinates and simplifies formulas when compared
   336  to the cartesian counterparts, e.g. they have the advantage that affine transformations can be
   337  expressed as linear homogeneous transformation.
   338  
   339  One obtains the homogeneous vector \f$P_h\f$ by appending a 1 along an n-dimensional cartesian
   340  vector \f$P\f$ e.g. for a 3D cartesian vector the mapping \f$P \rightarrow P_h\f$ is:
   341  
   342  \f[\begin{bmatrix}
   343  X \\
   344  Y \\
   345  Z
   346  \end{bmatrix} \rightarrow \begin{bmatrix}
   347  X \\
   348  Y \\
   349  Z \\
   350  1
   351  \end{bmatrix}.\f]
   352  
   353  For the inverse mapping \f$P_h \rightarrow P\f$, one divides all elements of the homogeneous vector
   354  by its last element, e.g. for a 3D homogeneous vector one gets its 2D cartesian counterpart by:
   355  
   356  \f[\begin{bmatrix}
   357  X \\
   358  Y \\
   359  W
   360  \end{bmatrix} \rightarrow \begin{bmatrix}
   361  X / W \\
   362  Y / W
   363  \end{bmatrix},\f]
   364  
   365  if \f$W \ne 0\f$.
   366  
   367  Due to this mapping, all multiples \f$k P_h\f$, for \f$k \ne 0\f$, of a homogeneous point represent
   368  the same point \f$P_h\f$. An intuitive understanding of this property is that under a projective
   369  transformation, all multiples of \f$P_h\f$ are mapped to the same point. This is the physical
   370  observation one does for pinhole cameras, as all points along a ray through the camera's pinhole are
   371  projected to the same image point, e.g. all points along the red ray in the image of the pinhole
   372  camera model above would be mapped to the same image coordinate. This property is also the source
   373  for the scale ambiguity s in the equation of the pinhole camera model.
   374  
   375  As mentioned, by using homogeneous coordinates we can express any change of basis parameterized by
   376  \f$R\f$ and \f$t\f$ as a linear transformation, e.g. for the change of basis from coordinate system
   377  0 to coordinate system 1 becomes:
   378  
   379  \f[P_1 = R P_0 + t \rightarrow P_{h_1} = \begin{bmatrix}
   380  R & t \\
   381  0 & 1
   382  \end{bmatrix} P_{h_0}.\f]
   383  
   384  @note
   385      -   Many functions in this module take a camera intrinsic matrix as an input parameter. Although all
   386          functions assume the same structure of this parameter, they may name it differently. The
   387          parameter's description, however, will be clear in that a camera intrinsic matrix with the structure
   388          shown above is required.
   389      -   A calibration sample for 3 cameras in a horizontal position can be found at
   390          opencv_source_code/samples/cpp/3calibration.cpp
   391      -   A calibration sample based on a sequence of images can be found at
   392          opencv_source_code/samples/cpp/calibration.cpp
   393      -   A calibration sample in order to do 3D reconstruction can be found at
   394          opencv_source_code/samples/cpp/build3dmodel.cpp
   395      -   A calibration example on stereo calibration can be found at
   396          opencv_source_code/samples/cpp/stereo_calib.cpp
   397      -   A calibration example on stereo matching can be found at
   398          opencv_source_code/samples/cpp/stereo_match.cpp
   399      -   (Python) A camera calibration sample can be found at
   400          opencv_source_code/samples/python/calibrate.py
   401  
   402    @{
   403      @defgroup calib3d_fisheye Fisheye camera model
   404  
   405      Definitions: Let P be a point in 3D of coordinates X in the world reference frame (stored in the
   406      matrix X) The coordinate vector of P in the camera reference frame is:
   407  
   408      \f[Xc = R X + T\f]
   409  
   410      where R is the rotation matrix corresponding to the rotation vector om: R = rodrigues(om); call x, y
   411      and z the 3 coordinates of Xc:
   412  
   413      \f[x = Xc_1 \\ y = Xc_2 \\ z = Xc_3\f]
   414  
   415      The pinhole projection coordinates of P is [a; b] where
   416  
   417      \f[a = x / z \ and \ b = y / z \\ r^2 = a^2 + b^2 \\ \theta = atan(r)\f]
   418  
   419      Fisheye distortion:
   420  
   421      \f[\theta_d = \theta (1 + k_1 \theta^2 + k_2 \theta^4 + k_3 \theta^6 + k_4 \theta^8)\f]
   422  
   423      The distorted point coordinates are [x'; y'] where
   424  
   425      \f[x' = (\theta_d / r) a \\ y' = (\theta_d / r) b \f]
   426  
   427      Finally, conversion into pixel coordinates: The final pixel coordinates vector [u; v] where:
   428  
   429      \f[u = f_x (x' + \alpha y') + c_x \\
   430      v = f_y y' + c_y\f]
   431  
   432      @defgroup calib3d_c C API
   433  
   434    @}
   435   */
   436  
   437  namespace cv
   438  {
   439  
   440  //! @addtogroup calib3d
   441  //! @{
   442  
   443  //! type of the robust estimation algorithm
   444  enum { LMEDS  = 4,  //!< least-median of squares algorithm
   445         RANSAC = 8,  //!< RANSAC algorithm
   446         RHO    = 16, //!< RHO algorithm
   447         USAC_DEFAULT  = 32, //!< USAC algorithm, default settings
   448         USAC_PARALLEL = 33, //!< USAC, parallel version
   449         USAC_FM_8PTS = 34,  //!< USAC, fundamental matrix 8 points
   450         USAC_FAST = 35,     //!< USAC, fast settings
   451         USAC_ACCURATE = 36, //!< USAC, accurate settings
   452         USAC_PROSAC = 37,   //!< USAC, sorted points, runs PROSAC
   453         USAC_MAGSAC = 38    //!< USAC, runs MAGSAC++
   454       };
   455  
   456  enum SolvePnPMethod {
   457      SOLVEPNP_ITERATIVE   = 0,
   458      SOLVEPNP_EPNP        = 1, //!< EPnP: Efficient Perspective-n-Point Camera Pose Estimation @cite lepetit2009epnp
   459      SOLVEPNP_P3P         = 2, //!< Complete Solution Classification for the Perspective-Three-Point Problem @cite gao2003complete
   460      SOLVEPNP_DLS         = 3, //!< **Broken implementation. Using this flag will fallback to EPnP.** \n
   461                                //!< A Direct Least-Squares (DLS) Method for PnP @cite hesch2011direct
   462      SOLVEPNP_UPNP        = 4, //!< **Broken implementation. Using this flag will fallback to EPnP.** \n
   463                                //!< Exhaustive Linearization for Robust Camera Pose and Focal Length Estimation @cite penate2013exhaustive
   464      SOLVEPNP_AP3P        = 5, //!< An Efficient Algebraic Solution to the Perspective-Three-Point Problem @cite Ke17
   465      SOLVEPNP_IPPE        = 6, //!< Infinitesimal Plane-Based Pose Estimation @cite Collins14 \n
   466                                //!< Object points must be coplanar.
   467      SOLVEPNP_IPPE_SQUARE = 7, //!< Infinitesimal Plane-Based Pose Estimation @cite Collins14 \n
   468                                //!< This is a special case suitable for marker pose estimation.\n
   469                                //!< 4 coplanar object points must be defined in the following order:
   470                                //!<   - point 0: [-squareLength / 2,  squareLength / 2, 0]
   471                                //!<   - point 1: [ squareLength / 2,  squareLength / 2, 0]
   472                                //!<   - point 2: [ squareLength / 2, -squareLength / 2, 0]
   473                                //!<   - point 3: [-squareLength / 2, -squareLength / 2, 0]
   474      SOLVEPNP_SQPNP       = 8, //!< SQPnP: A Consistently Fast and Globally OptimalSolution to the Perspective-n-Point Problem @cite Terzakis20
   475  #ifndef CV_DOXYGEN
   476      SOLVEPNP_MAX_COUNT        //!< Used for count
   477  #endif
   478  };
   479  
   480  enum { CALIB_CB_ADAPTIVE_THRESH = 1,
   481         CALIB_CB_NORMALIZE_IMAGE = 2,
   482         CALIB_CB_FILTER_QUADS    = 4,
   483         CALIB_CB_FAST_CHECK      = 8,
   484         CALIB_CB_EXHAUSTIVE      = 16,
   485         CALIB_CB_ACCURACY        = 32,
   486         CALIB_CB_LARGER          = 64,
   487         CALIB_CB_MARKER          = 128
   488       };
   489  
   490  enum { CALIB_CB_SYMMETRIC_GRID  = 1,
   491         CALIB_CB_ASYMMETRIC_GRID = 2,
   492         CALIB_CB_CLUSTERING      = 4
   493       };
   494  
   495  enum { CALIB_NINTRINSIC          = 18,
   496         CALIB_USE_INTRINSIC_GUESS = 0x00001,
   497         CALIB_FIX_ASPECT_RATIO    = 0x00002,
   498         CALIB_FIX_PRINCIPAL_POINT = 0x00004,
   499         CALIB_ZERO_TANGENT_DIST   = 0x00008,
   500         CALIB_FIX_FOCAL_LENGTH    = 0x00010,
   501         CALIB_FIX_K1              = 0x00020,
   502         CALIB_FIX_K2              = 0x00040,
   503         CALIB_FIX_K3              = 0x00080,
   504         CALIB_FIX_K4              = 0x00800,
   505         CALIB_FIX_K5              = 0x01000,
   506         CALIB_FIX_K6              = 0x02000,
   507         CALIB_RATIONAL_MODEL      = 0x04000,
   508         CALIB_THIN_PRISM_MODEL    = 0x08000,
   509         CALIB_FIX_S1_S2_S3_S4     = 0x10000,
   510         CALIB_TILTED_MODEL        = 0x40000,
   511         CALIB_FIX_TAUX_TAUY       = 0x80000,
   512         CALIB_USE_QR              = 0x100000, //!< use QR instead of SVD decomposition for solving. Faster but potentially less precise
   513         CALIB_FIX_TANGENT_DIST    = 0x200000,
   514         // only for stereo
   515         CALIB_FIX_INTRINSIC       = 0x00100,
   516         CALIB_SAME_FOCAL_LENGTH   = 0x00200,
   517         // for stereo rectification
   518         CALIB_ZERO_DISPARITY      = 0x00400,
   519         CALIB_USE_LU              = (1 << 17), //!< use LU instead of SVD decomposition for solving. much faster but potentially less precise
   520         CALIB_USE_EXTRINSIC_GUESS = (1 << 22)  //!< for stereoCalibrate
   521       };
   522  
   523  //! the algorithm for finding fundamental matrix
   524  enum { FM_7POINT = 1, //!< 7-point algorithm
   525         FM_8POINT = 2, //!< 8-point algorithm
   526         FM_LMEDS  = 4, //!< least-median algorithm. 7-point algorithm is used.
   527         FM_RANSAC = 8  //!< RANSAC algorithm. It needs at least 15 points. 7-point algorithm is used.
   528       };
   529  
   530  enum HandEyeCalibrationMethod
   531  {
   532      CALIB_HAND_EYE_TSAI         = 0, //!< A New Technique for Fully Autonomous and Efficient 3D Robotics Hand/Eye Calibration @cite Tsai89
   533      CALIB_HAND_EYE_PARK         = 1, //!< Robot Sensor Calibration: Solving AX = XB on the Euclidean Group @cite Park94
   534      CALIB_HAND_EYE_HORAUD       = 2, //!< Hand-eye Calibration @cite Horaud95
   535      CALIB_HAND_EYE_ANDREFF      = 3, //!< On-line Hand-Eye Calibration @cite Andreff99
   536      CALIB_HAND_EYE_DANIILIDIS   = 4  //!< Hand-Eye Calibration Using Dual Quaternions @cite Daniilidis98
   537  };
   538  
   539  enum RobotWorldHandEyeCalibrationMethod
   540  {
   541      CALIB_ROBOT_WORLD_HAND_EYE_SHAH = 0, //!< Solving the robot-world/hand-eye calibration problem using the kronecker product @cite Shah2013SolvingTR
   542      CALIB_ROBOT_WORLD_HAND_EYE_LI   = 1  //!< Simultaneous robot-world and hand-eye calibration using dual-quaternions and kronecker product @cite Li2010SimultaneousRA
   543  };
   544  
   545  enum SamplingMethod { SAMPLING_UNIFORM, SAMPLING_PROGRESSIVE_NAPSAC, SAMPLING_NAPSAC,
   546          SAMPLING_PROSAC };
   547  enum LocalOptimMethod {LOCAL_OPTIM_NULL, LOCAL_OPTIM_INNER_LO, LOCAL_OPTIM_INNER_AND_ITER_LO,
   548          LOCAL_OPTIM_GC, LOCAL_OPTIM_SIGMA};
   549  enum ScoreMethod {SCORE_METHOD_RANSAC, SCORE_METHOD_MSAC, SCORE_METHOD_MAGSAC, SCORE_METHOD_LMEDS};
   550  enum NeighborSearchMethod { NEIGH_FLANN_KNN, NEIGH_GRID, NEIGH_FLANN_RADIUS };
   551  
   552  struct CV_EXPORTS_W_SIMPLE UsacParams
   553  { // in alphabetical order
   554      CV_WRAP UsacParams();
   555      CV_PROP_RW double confidence;
   556      CV_PROP_RW bool isParallel;
   557      CV_PROP_RW int loIterations;
   558      CV_PROP_RW LocalOptimMethod loMethod;
   559      CV_PROP_RW int loSampleSize;
   560      CV_PROP_RW int maxIterations;
   561      CV_PROP_RW NeighborSearchMethod neighborsSearch;
   562      CV_PROP_RW int randomGeneratorState;
   563      CV_PROP_RW SamplingMethod sampler;
   564      CV_PROP_RW ScoreMethod score;
   565      CV_PROP_RW double threshold;
   566  };
   567  
   568  /** @brief Converts a rotation matrix to a rotation vector or vice versa.
   569  
   570  @param src Input rotation vector (3x1 or 1x3) or rotation matrix (3x3).
   571  @param dst Output rotation matrix (3x3) or rotation vector (3x1 or 1x3), respectively.
   572  @param jacobian Optional output Jacobian matrix, 3x9 or 9x3, which is a matrix of partial
   573  derivatives of the output array components with respect to the input array components.
   574  
   575  \f[\begin{array}{l} \theta \leftarrow norm(r) \\ r  \leftarrow r/ \theta \\ R =  \cos(\theta) I + (1- \cos{\theta} ) r r^T +  \sin(\theta) \vecthreethree{0}{-r_z}{r_y}{r_z}{0}{-r_x}{-r_y}{r_x}{0} \end{array}\f]
   576  
   577  Inverse transformation can be also done easily, since
   578  
   579  \f[\sin ( \theta ) \vecthreethree{0}{-r_z}{r_y}{r_z}{0}{-r_x}{-r_y}{r_x}{0} = \frac{R - R^T}{2}\f]
   580  
   581  A rotation vector is a convenient and most compact representation of a rotation matrix (since any
   582  rotation matrix has just 3 degrees of freedom). The representation is used in the global 3D geometry
   583  optimization procedures like @ref calibrateCamera, @ref stereoCalibrate, or @ref solvePnP .
   584  
   585  @note More information about the computation of the derivative of a 3D rotation matrix with respect to its exponential coordinate
   586  can be found in:
   587      - A Compact Formula for the Derivative of a 3-D Rotation in Exponential Coordinates, Guillermo Gallego, Anthony J. Yezzi @cite Gallego2014ACF
   588  
   589  @note Useful information on SE(3) and Lie Groups can be found in:
   590      - A tutorial on SE(3) transformation parameterizations and on-manifold optimization, Jose-Luis Blanco @cite blanco2010tutorial
   591      - Lie Groups for 2D and 3D Transformation, Ethan Eade @cite Eade17
   592      - A micro Lie theory for state estimation in robotics, Joan Solà, Jérémie Deray, Dinesh Atchuthan @cite Sol2018AML
   593   */
   594  CV_EXPORTS_W void Rodrigues( InputArray src, OutputArray dst, OutputArray jacobian = noArray() );
   595  
   596  
   597  
   598  /** Levenberg-Marquardt solver. Starting with the specified vector of parameters it
   599      optimizes the target vector criteria "err"
   600      (finds local minima of each target vector component absolute value).
   601  
   602      When needed, it calls user-provided callback.
   603  */
   604  class CV_EXPORTS LMSolver : public Algorithm
   605  {
   606  public:
   607      class CV_EXPORTS Callback
   608      {
   609      public:
   610          virtual ~Callback() {}
   611          /**
   612           computes error and Jacobian for the specified vector of parameters
   613  
   614           @param param the current vector of parameters
   615           @param err output vector of errors: err_i = actual_f_i - ideal_f_i
   616           @param J output Jacobian: J_ij = d(err_i)/d(param_j)
   617  
   618           when J=noArray(), it means that it does not need to be computed.
   619           Dimensionality of error vector and param vector can be different.
   620           The callback should explicitly allocate (with "create" method) each output array
   621           (unless it's noArray()).
   622          */
   623          virtual bool compute(InputArray param, OutputArray err, OutputArray J) const = 0;
   624      };
   625  
   626      /**
   627         Runs Levenberg-Marquardt algorithm using the passed vector of parameters as the start point.
   628         The final vector of parameters (whether the algorithm converged or not) is stored at the same
   629         vector. The method returns the number of iterations used. If it's equal to the previously specified
   630         maxIters, there is a big chance the algorithm did not converge.
   631  
   632         @param param initial/final vector of parameters.
   633  
   634         Note that the dimensionality of parameter space is defined by the size of param vector,
   635         and the dimensionality of optimized criteria is defined by the size of err vector
   636         computed by the callback.
   637      */
   638      virtual int run(InputOutputArray param) const = 0;
   639  
   640      /**
   641         Sets the maximum number of iterations
   642         @param maxIters the number of iterations
   643      */
   644      virtual void setMaxIters(int maxIters) = 0;
   645      /**
   646         Retrieves the current maximum number of iterations
   647      */
   648      virtual int getMaxIters() const = 0;
   649  
   650      /**
   651         Creates Levenberg-Marquard solver
   652  
   653         @param cb callback
   654         @param maxIters maximum number of iterations that can be further
   655           modified using setMaxIters() method.
   656      */
   657      static Ptr<LMSolver> create(const Ptr<LMSolver::Callback>& cb, int maxIters);
   658      static Ptr<LMSolver> create(const Ptr<LMSolver::Callback>& cb, int maxIters, double eps);
   659  };
   660  
   661  
   662  
   663  /** @example samples/cpp/tutorial_code/features2D/Homography/pose_from_homography.cpp
   664  An example program about pose estimation from coplanar points
   665  
   666  Check @ref tutorial_homography "the corresponding tutorial" for more details
   667  */
   668  
   669  /** @brief Finds a perspective transformation between two planes.
   670  
   671  @param srcPoints Coordinates of the points in the original plane, a matrix of the type CV_32FC2
   672  or vector\<Point2f\> .
   673  @param dstPoints Coordinates of the points in the target plane, a matrix of the type CV_32FC2 or
   674  a vector\<Point2f\> .
   675  @param method Method used to compute a homography matrix. The following methods are possible:
   676  -   **0** - a regular method using all the points, i.e., the least squares method
   677  -   @ref RANSAC - RANSAC-based robust method
   678  -   @ref LMEDS - Least-Median robust method
   679  -   @ref RHO - PROSAC-based robust method
   680  @param ransacReprojThreshold Maximum allowed reprojection error to treat a point pair as an inlier
   681  (used in the RANSAC and RHO methods only). That is, if
   682  \f[\| \texttt{dstPoints} _i -  \texttt{convertPointsHomogeneous} ( \texttt{H} * \texttt{srcPoints} _i) \|_2  >  \texttt{ransacReprojThreshold}\f]
   683  then the point \f$i\f$ is considered as an outlier. If srcPoints and dstPoints are measured in pixels,
   684  it usually makes sense to set this parameter somewhere in the range of 1 to 10.
   685  @param mask Optional output mask set by a robust method ( RANSAC or LMeDS ). Note that the input
   686  mask values are ignored.
   687  @param maxIters The maximum number of RANSAC iterations.
   688  @param confidence Confidence level, between 0 and 1.
   689  
   690  The function finds and returns the perspective transformation \f$H\f$ between the source and the
   691  destination planes:
   692  
   693  \f[s_i  \vecthree{x'_i}{y'_i}{1} \sim H  \vecthree{x_i}{y_i}{1}\f]
   694  
   695  so that the back-projection error
   696  
   697  \f[\sum _i \left ( x'_i- \frac{h_{11} x_i + h_{12} y_i + h_{13}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2+ \left ( y'_i- \frac{h_{21} x_i + h_{22} y_i + h_{23}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2\f]
   698  
   699  is minimized. If the parameter method is set to the default value 0, the function uses all the point
   700  pairs to compute an initial homography estimate with a simple least-squares scheme.
   701  
   702  However, if not all of the point pairs ( \f$srcPoints_i\f$, \f$dstPoints_i\f$ ) fit the rigid perspective
   703  transformation (that is, there are some outliers), this initial estimate will be poor. In this case,
   704  you can use one of the three robust methods. The methods RANSAC, LMeDS and RHO try many different
   705  random subsets of the corresponding point pairs (of four pairs each, collinear pairs are discarded), estimate the homography matrix
   706  using this subset and a simple least-squares algorithm, and then compute the quality/goodness of the
   707  computed homography (which is the number of inliers for RANSAC or the least median re-projection error for
   708  LMeDS). The best subset is then used to produce the initial estimate of the homography matrix and
   709  the mask of inliers/outliers.
   710  
   711  Regardless of the method, robust or not, the computed homography matrix is refined further (using
   712  inliers only in case of a robust method) with the Levenberg-Marquardt method to reduce the
   713  re-projection error even more.
   714  
   715  The methods RANSAC and RHO can handle practically any ratio of outliers but need a threshold to
   716  distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
   717  correctly only when there are more than 50% of inliers. Finally, if there are no outliers and the
   718  noise is rather small, use the default method (method=0).
   719  
   720  The function is used to find initial intrinsic and extrinsic matrices. Homography matrix is
   721  determined up to a scale. Thus, it is normalized so that \f$h_{33}=1\f$. Note that whenever an \f$H\f$ matrix
   722  cannot be estimated, an empty one will be returned.
   723  
   724  @sa
   725  getAffineTransform, estimateAffine2D, estimateAffinePartial2D, getPerspectiveTransform, warpPerspective,
   726  perspectiveTransform
   727   */
   728  CV_EXPORTS_W Mat findHomography( InputArray srcPoints, InputArray dstPoints,
   729                                   int method = 0, double ransacReprojThreshold = 3,
   730                                   OutputArray mask=noArray(), const int maxIters = 2000,
   731                                   const double confidence = 0.995);
   732  
   733  /** @overload */
   734  CV_EXPORTS Mat findHomography( InputArray srcPoints, InputArray dstPoints,
   735                                 OutputArray mask, int method = 0, double ransacReprojThreshold = 3 );
   736  
   737  
   738  CV_EXPORTS_W Mat findHomography(InputArray srcPoints, InputArray dstPoints, OutputArray mask,
   739                     const UsacParams &params);
   740  
   741  /** @brief Computes an RQ decomposition of 3x3 matrices.
   742  
   743  @param src 3x3 input matrix.
   744  @param mtxR Output 3x3 upper-triangular matrix.
   745  @param mtxQ Output 3x3 orthogonal matrix.
   746  @param Qx Optional output 3x3 rotation matrix around x-axis.
   747  @param Qy Optional output 3x3 rotation matrix around y-axis.
   748  @param Qz Optional output 3x3 rotation matrix around z-axis.
   749  
   750  The function computes a RQ decomposition using the given rotations. This function is used in
   751  #decomposeProjectionMatrix to decompose the left 3x3 submatrix of a projection matrix into a camera
   752  and a rotation matrix.
   753  
   754  It optionally returns three rotation matrices, one for each axis, and the three Euler angles in
   755  degrees (as the return value) that could be used in OpenGL. Note, there is always more than one
   756  sequence of rotations about the three principal axes that results in the same orientation of an
   757  object, e.g. see @cite Slabaugh . Returned tree rotation matrices and corresponding three Euler angles
   758  are only one of the possible solutions.
   759   */
   760  CV_EXPORTS_W Vec3d RQDecomp3x3( InputArray src, OutputArray mtxR, OutputArray mtxQ,
   761                                  OutputArray Qx = noArray(),
   762                                  OutputArray Qy = noArray(),
   763                                  OutputArray Qz = noArray());
   764  
   765  /** @brief Decomposes a projection matrix into a rotation matrix and a camera intrinsic matrix.
   766  
   767  @param projMatrix 3x4 input projection matrix P.
   768  @param cameraMatrix Output 3x3 camera intrinsic matrix \f$\cameramatrix{A}\f$.
   769  @param rotMatrix Output 3x3 external rotation matrix R.
   770  @param transVect Output 4x1 translation vector T.
   771  @param rotMatrixX Optional 3x3 rotation matrix around x-axis.
   772  @param rotMatrixY Optional 3x3 rotation matrix around y-axis.
   773  @param rotMatrixZ Optional 3x3 rotation matrix around z-axis.
   774  @param eulerAngles Optional three-element vector containing three Euler angles of rotation in
   775  degrees.
   776  
   777  The function computes a decomposition of a projection matrix into a calibration and a rotation
   778  matrix and the position of a camera.
   779  
   780  It optionally returns three rotation matrices, one for each axis, and three Euler angles that could
   781  be used in OpenGL. Note, there is always more than one sequence of rotations about the three
   782  principal axes that results in the same orientation of an object, e.g. see @cite Slabaugh . Returned
   783  tree rotation matrices and corresponding three Euler angles are only one of the possible solutions.
   784  
   785  The function is based on RQDecomp3x3 .
   786   */
   787  CV_EXPORTS_W void decomposeProjectionMatrix( InputArray projMatrix, OutputArray cameraMatrix,
   788                                               OutputArray rotMatrix, OutputArray transVect,
   789                                               OutputArray rotMatrixX = noArray(),
   790                                               OutputArray rotMatrixY = noArray(),
   791                                               OutputArray rotMatrixZ = noArray(),
   792                                               OutputArray eulerAngles =noArray() );
   793  
   794  /** @brief Computes partial derivatives of the matrix product for each multiplied matrix.
   795  
   796  @param A First multiplied matrix.
   797  @param B Second multiplied matrix.
   798  @param dABdA First output derivative matrix d(A\*B)/dA of size
   799  \f$\texttt{A.rows*B.cols} \times {A.rows*A.cols}\f$ .
   800  @param dABdB Second output derivative matrix d(A\*B)/dB of size
   801  \f$\texttt{A.rows*B.cols} \times {B.rows*B.cols}\f$ .
   802  
   803  The function computes partial derivatives of the elements of the matrix product \f$A*B\f$ with regard to
   804  the elements of each of the two input matrices. The function is used to compute the Jacobian
   805  matrices in #stereoCalibrate but can also be used in any other similar optimization function.
   806   */
   807  CV_EXPORTS_W void matMulDeriv( InputArray A, InputArray B, OutputArray dABdA, OutputArray dABdB );
   808  
   809  /** @brief Combines two rotation-and-shift transformations.
   810  
   811  @param rvec1 First rotation vector.
   812  @param tvec1 First translation vector.
   813  @param rvec2 Second rotation vector.
   814  @param tvec2 Second translation vector.
   815  @param rvec3 Output rotation vector of the superposition.
   816  @param tvec3 Output translation vector of the superposition.
   817  @param dr3dr1 Optional output derivative of rvec3 with regard to rvec1
   818  @param dr3dt1 Optional output derivative of rvec3 with regard to tvec1
   819  @param dr3dr2 Optional output derivative of rvec3 with regard to rvec2
   820  @param dr3dt2 Optional output derivative of rvec3 with regard to tvec2
   821  @param dt3dr1 Optional output derivative of tvec3 with regard to rvec1
   822  @param dt3dt1 Optional output derivative of tvec3 with regard to tvec1
   823  @param dt3dr2 Optional output derivative of tvec3 with regard to rvec2
   824  @param dt3dt2 Optional output derivative of tvec3 with regard to tvec2
   825  
   826  The functions compute:
   827  
   828  \f[\begin{array}{l} \texttt{rvec3} =  \mathrm{rodrigues} ^{-1} \left ( \mathrm{rodrigues} ( \texttt{rvec2} )  \cdot \mathrm{rodrigues} ( \texttt{rvec1} ) \right )  \\ \texttt{tvec3} =  \mathrm{rodrigues} ( \texttt{rvec2} )  \cdot \texttt{tvec1} +  \texttt{tvec2} \end{array} ,\f]
   829  
   830  where \f$\mathrm{rodrigues}\f$ denotes a rotation vector to a rotation matrix transformation, and
   831  \f$\mathrm{rodrigues}^{-1}\f$ denotes the inverse transformation. See Rodrigues for details.
   832  
   833  Also, the functions can compute the derivatives of the output vectors with regards to the input
   834  vectors (see matMulDeriv ). The functions are used inside #stereoCalibrate but can also be used in
   835  your own code where Levenberg-Marquardt or another gradient-based solver is used to optimize a
   836  function that contains a matrix multiplication.
   837   */
   838  CV_EXPORTS_W void composeRT( InputArray rvec1, InputArray tvec1,
   839                               InputArray rvec2, InputArray tvec2,
   840                               OutputArray rvec3, OutputArray tvec3,
   841                               OutputArray dr3dr1 = noArray(), OutputArray dr3dt1 = noArray(),
   842                               OutputArray dr3dr2 = noArray(), OutputArray dr3dt2 = noArray(),
   843                               OutputArray dt3dr1 = noArray(), OutputArray dt3dt1 = noArray(),
   844                               OutputArray dt3dr2 = noArray(), OutputArray dt3dt2 = noArray() );
   845  
   846  /** @brief Projects 3D points to an image plane.
   847  
   848  @param objectPoints Array of object points expressed wrt. the world coordinate frame. A 3xN/Nx3
   849  1-channel or 1xN/Nx1 3-channel (or vector\<Point3f\> ), where N is the number of points in the view.
   850  @param rvec The rotation vector (@ref Rodrigues) that, together with tvec, performs a change of
   851  basis from world to camera coordinate system, see @ref calibrateCamera for details.
   852  @param tvec The translation vector, see parameter description above.
   853  @param cameraMatrix Camera intrinsic matrix \f$\cameramatrix{A}\f$ .
   854  @param distCoeffs Input vector of distortion coefficients
   855  \f$\distcoeffs\f$ . If the vector is empty, the zero distortion coefficients are assumed.
   856  @param imagePoints Output array of image points, 1xN/Nx1 2-channel, or
   857  vector\<Point2f\> .
   858  @param jacobian Optional output 2Nx(10+\<numDistCoeffs\>) jacobian matrix of derivatives of image
   859  points with respect to components of the rotation vector, translation vector, focal lengths,
   860  coordinates of the principal point and the distortion coefficients. In the old interface different
   861  components of the jacobian are returned via different output parameters.
   862  @param aspectRatio Optional "fixed aspect ratio" parameter. If the parameter is not 0, the
   863  function assumes that the aspect ratio (\f$f_x / f_y\f$) is fixed and correspondingly adjusts the
   864  jacobian matrix.
   865  
   866  The function computes the 2D projections of 3D points to the image plane, given intrinsic and
   867  extrinsic camera parameters. Optionally, the function computes Jacobians -matrices of partial
   868  derivatives of image points coordinates (as functions of all the input parameters) with respect to
   869  the particular parameters, intrinsic and/or extrinsic. The Jacobians are used during the global
   870  optimization in @ref calibrateCamera, @ref solvePnP, and @ref stereoCalibrate. The function itself
   871  can also be used to compute a re-projection error, given the current intrinsic and extrinsic
   872  parameters.
   873  
   874  @note By setting rvec = tvec = \f$[0, 0, 0]\f$, or by setting cameraMatrix to a 3x3 identity matrix,
   875  or by passing zero distortion coefficients, one can get various useful partial cases of the
   876  function. This means, one can compute the distorted coordinates for a sparse set of points or apply
   877  a perspective transformation (and also compute the derivatives) in the ideal zero-distortion setup.
   878   */
   879  CV_EXPORTS_W void projectPoints( InputArray objectPoints,
   880                                   InputArray rvec, InputArray tvec,
   881                                   InputArray cameraMatrix, InputArray distCoeffs,
   882                                   OutputArray imagePoints,
   883                                   OutputArray jacobian = noArray(),
   884                                   double aspectRatio = 0 );
   885  
   886  /** @example samples/cpp/tutorial_code/features2D/Homography/homography_from_camera_displacement.cpp
   887  An example program about homography from the camera displacement
   888  
   889  Check @ref tutorial_homography "the corresponding tutorial" for more details
   890  */
   891  
   892  /** @brief Finds an object pose from 3D-2D point correspondences.
   893  This function returns the rotation and the translation vectors that transform a 3D point expressed in the object
   894  coordinate frame to the camera coordinate frame, using different methods:
   895  - P3P methods (@ref SOLVEPNP_P3P, @ref SOLVEPNP_AP3P): need 4 input points to return a unique solution.
   896  - @ref SOLVEPNP_IPPE Input points must be >= 4 and object points must be coplanar.
   897  - @ref SOLVEPNP_IPPE_SQUARE Special case suitable for marker pose estimation.
   898  Number of input points must be 4. Object points must be defined in the following order:
   899    - point 0: [-squareLength / 2,  squareLength / 2, 0]
   900    - point 1: [ squareLength / 2,  squareLength / 2, 0]
   901    - point 2: [ squareLength / 2, -squareLength / 2, 0]
   902    - point 3: [-squareLength / 2, -squareLength / 2, 0]
   903  - for all the other flags, number of input points must be >= 4 and object points can be in any configuration.
   904  
   905  @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
   906  1xN/Nx1 3-channel, where N is the number of points. vector\<Point3d\> can be also passed here.
   907  @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
   908  where N is the number of points. vector\<Point2d\> can be also passed here.
   909  @param cameraMatrix Input camera intrinsic matrix \f$\cameramatrix{A}\f$ .
   910  @param distCoeffs Input vector of distortion coefficients
   911  \f$\distcoeffs\f$. If the vector is NULL/empty, the zero distortion coefficients are
   912  assumed.
   913  @param rvec Output rotation vector (see @ref Rodrigues ) that, together with tvec, brings points from
   914  the model coordinate system to the camera coordinate system.
   915  @param tvec Output translation vector.
   916  @param useExtrinsicGuess Parameter used for #SOLVEPNP_ITERATIVE. If true (1), the function uses
   917  the provided rvec and tvec values as initial approximations of the rotation and translation
   918  vectors, respectively, and further optimizes them.
   919  @param flags Method for solving a PnP problem:
   920  -   @ref SOLVEPNP_ITERATIVE Iterative method is based on a Levenberg-Marquardt optimization. In
   921  this case the function finds such a pose that minimizes reprojection error, that is the sum
   922  of squared distances between the observed projections imagePoints and the projected (using
   923  @ref projectPoints ) objectPoints .
   924  -   @ref SOLVEPNP_P3P Method is based on the paper of X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang
   925  "Complete Solution Classification for the Perspective-Three-Point Problem" (@cite gao2003complete).
   926  In this case the function requires exactly four object and image points.
   927  -   @ref SOLVEPNP_AP3P Method is based on the paper of T. Ke, S. Roumeliotis
   928  "An Efficient Algebraic Solution to the Perspective-Three-Point Problem" (@cite Ke17).
   929  In this case the function requires exactly four object and image points.
   930  -   @ref SOLVEPNP_EPNP Method has been introduced by F. Moreno-Noguer, V. Lepetit and P. Fua in the
   931  paper "EPnP: Efficient Perspective-n-Point Camera Pose Estimation" (@cite lepetit2009epnp).
   932  -   @ref SOLVEPNP_DLS **Broken implementation. Using this flag will fallback to EPnP.** \n
   933  Method is based on the paper of J. Hesch and S. Roumeliotis.
   934  "A Direct Least-Squares (DLS) Method for PnP" (@cite hesch2011direct).
   935  -   @ref SOLVEPNP_UPNP **Broken implementation. Using this flag will fallback to EPnP.** \n
   936  Method is based on the paper of A. Penate-Sanchez, J. Andrade-Cetto,
   937  F. Moreno-Noguer. "Exhaustive Linearization for Robust Camera Pose and Focal Length
   938  Estimation" (@cite penate2013exhaustive). In this case the function also estimates the parameters \f$f_x\f$ and \f$f_y\f$
   939  assuming that both have the same value. Then the cameraMatrix is updated with the estimated
   940  focal length.
   941  -   @ref SOLVEPNP_IPPE Method is based on the paper of T. Collins and A. Bartoli.
   942  "Infinitesimal Plane-Based Pose Estimation" (@cite Collins14). This method requires coplanar object points.
   943  -   @ref SOLVEPNP_IPPE_SQUARE Method is based on the paper of Toby Collins and Adrien Bartoli.
   944  "Infinitesimal Plane-Based Pose Estimation" (@cite Collins14). This method is suitable for marker pose estimation.
   945  It requires 4 coplanar object points defined in the following order:
   946    - point 0: [-squareLength / 2,  squareLength / 2, 0]
   947    - point 1: [ squareLength / 2,  squareLength / 2, 0]
   948    - point 2: [ squareLength / 2, -squareLength / 2, 0]
   949    - point 3: [-squareLength / 2, -squareLength / 2, 0]
   950  -   @ref SOLVEPNP_SQPNP Method is based on the paper "A Consistently Fast and Globally Optimal Solution to the
   951  Perspective-n-Point Problem" by G. Terzakis and M.Lourakis (@cite Terzakis20). It requires 3 or more points.
   952  
   953  
   954  The function estimates the object pose given a set of object points, their corresponding image
   955  projections, as well as the camera intrinsic matrix and the distortion coefficients, see the figure below
   956  (more precisely, the X-axis of the camera frame is pointing to the right, the Y-axis downward
   957  and the Z-axis forward).
   958  
   959  ![](pnp.jpg)
   960  
   961  Points expressed in the world frame \f$ \bf{X}_w \f$ are projected into the image plane \f$ \left[ u, v \right] \f$
   962  using the perspective projection model \f$ \Pi \f$ and the camera intrinsic parameters matrix \f$ \bf{A} \f$:
   963  
   964  \f[
   965    \begin{align*}
   966    \begin{bmatrix}
   967    u \\
   968    v \\
   969    1
   970    \end{bmatrix} &=
   971    \bf{A} \hspace{0.1em} \Pi \hspace{0.2em} ^{c}\bf{T}_w
   972    \begin{bmatrix}
   973    X_{w} \\
   974    Y_{w} \\
   975    Z_{w} \\
   976    1
   977    \end{bmatrix} \\
   978    \begin{bmatrix}
   979    u \\
   980    v \\
   981    1
   982    \end{bmatrix} &=
   983    \begin{bmatrix}
   984    f_x & 0 & c_x \\
   985    0 & f_y & c_y \\
   986    0 & 0 & 1
   987    \end{bmatrix}
   988    \begin{bmatrix}
   989    1 & 0 & 0 & 0 \\
   990    0 & 1 & 0 & 0 \\
   991    0 & 0 & 1 & 0
   992    \end{bmatrix}
   993    \begin{bmatrix}
   994    r_{11} & r_{12} & r_{13} & t_x \\
   995    r_{21} & r_{22} & r_{23} & t_y \\
   996    r_{31} & r_{32} & r_{33} & t_z \\
   997    0 & 0 & 0 & 1
   998    \end{bmatrix}
   999    \begin{bmatrix}
  1000    X_{w} \\
  1001    Y_{w} \\
  1002    Z_{w} \\
  1003    1
  1004    \end{bmatrix}
  1005    \end{align*}
  1006  \f]
  1007  
  1008  The estimated pose is thus the rotation (`rvec`) and the translation (`tvec`) vectors that allow transforming
  1009  a 3D point expressed in the world frame into the camera frame:
  1010  
  1011  \f[
  1012    \begin{align*}
  1013    \begin{bmatrix}
  1014    X_c \\
  1015    Y_c \\
  1016    Z_c \\
  1017    1
  1018    \end{bmatrix} &=
  1019    \hspace{0.2em} ^{c}\bf{T}_w
  1020    \begin{bmatrix}
  1021    X_{w} \\
  1022    Y_{w} \\
  1023    Z_{w} \\
  1024    1
  1025    \end{bmatrix} \\
  1026    \begin{bmatrix}
  1027    X_c \\
  1028    Y_c \\
  1029    Z_c \\
  1030    1
  1031    \end{bmatrix} &=
  1032    \begin{bmatrix}
  1033    r_{11} & r_{12} & r_{13} & t_x \\
  1034    r_{21} & r_{22} & r_{23} & t_y \\
  1035    r_{31} & r_{32} & r_{33} & t_z \\
  1036    0 & 0 & 0 & 1
  1037    \end{bmatrix}
  1038    \begin{bmatrix}
  1039    X_{w} \\
  1040    Y_{w} \\
  1041    Z_{w} \\
  1042    1
  1043    \end{bmatrix}
  1044    \end{align*}
  1045  \f]
  1046  
  1047  @note
  1048     -   An example of how to use solvePnP for planar augmented reality can be found at
  1049          opencv_source_code/samples/python/plane_ar.py
  1050     -   If you are using Python:
  1051          - Numpy array slices won't work as input because solvePnP requires contiguous
  1052          arrays (enforced by the assertion using cv::Mat::checkVector() around line 55 of
  1053          modules/calib3d/src/solvepnp.cpp version 2.4.9)
  1054          - The P3P algorithm requires image points to be in an array of shape (N,1,2) due
  1055          to its calling of #undistortPoints (around line 75 of modules/calib3d/src/solvepnp.cpp version 2.4.9)
  1056          which requires 2-channel information.
  1057          - Thus, given some data D = np.array(...) where D.shape = (N,M), in order to use a subset of
  1058          it as, e.g., imagePoints, one must effectively copy it into a new array: imagePoints =
  1059          np.ascontiguousarray(D[:,:2]).reshape((N,1,2))
  1060     -   The methods @ref SOLVEPNP_DLS and @ref SOLVEPNP_UPNP cannot be used as the current implementations are
  1061         unstable and sometimes give completely wrong results. If you pass one of these two
  1062         flags, @ref SOLVEPNP_EPNP method will be used instead.
  1063     -   The minimum number of points is 4 in the general case. In the case of @ref SOLVEPNP_P3P and @ref SOLVEPNP_AP3P
  1064         methods, it is required to use exactly 4 points (the first 3 points are used to estimate all the solutions
  1065         of the P3P problem, the last one is used to retain the best solution that minimizes the reprojection error).
  1066     -   With @ref SOLVEPNP_ITERATIVE method and `useExtrinsicGuess=true`, the minimum number of points is 3 (3 points
  1067         are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the
  1068         global solution to converge.
  1069     -   With @ref SOLVEPNP_IPPE input points must be >= 4 and object points must be coplanar.
  1070     -   With @ref SOLVEPNP_IPPE_SQUARE this is a special case suitable for marker pose estimation.
  1071         Number of input points must be 4. Object points must be defined in the following order:
  1072           - point 0: [-squareLength / 2,  squareLength / 2, 0]
  1073           - point 1: [ squareLength / 2,  squareLength / 2, 0]
  1074           - point 2: [ squareLength / 2, -squareLength / 2, 0]
  1075           - point 3: [-squareLength / 2, -squareLength / 2, 0]
  1076      -  With @ref SOLVEPNP_SQPNP input points must be >= 3
  1077   */
  1078  CV_EXPORTS_W bool solvePnP( InputArray objectPoints, InputArray imagePoints,
  1079                              InputArray cameraMatrix, InputArray distCoeffs,
  1080                              OutputArray rvec, OutputArray tvec,
  1081                              bool useExtrinsicGuess = false, int flags = SOLVEPNP_ITERATIVE );
  1082  
  1083  /** @brief Finds an object pose from 3D-2D point correspondences using the RANSAC scheme.
  1084  
  1085  @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
  1086  1xN/Nx1 3-channel, where N is the number of points. vector\<Point3d\> can be also passed here.
  1087  @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
  1088  where N is the number of points. vector\<Point2d\> can be also passed here.
  1089  @param cameraMatrix Input camera intrinsic matrix \f$\cameramatrix{A}\f$ .
  1090  @param distCoeffs Input vector of distortion coefficients
  1091  \f$\distcoeffs\f$. If the vector is NULL/empty, the zero distortion coefficients are
  1092  assumed.
  1093  @param rvec Output rotation vector (see @ref Rodrigues ) that, together with tvec, brings points from
  1094  the model coordinate system to the camera coordinate system.
  1095  @param tvec Output translation vector.
  1096  @param useExtrinsicGuess Parameter used for @ref SOLVEPNP_ITERATIVE. If true (1), the function uses
  1097  the provided rvec and tvec values as initial approximations of the rotation and translation
  1098  vectors, respectively, and further optimizes them.
  1099  @param iterationsCount Number of iterations.
  1100  @param reprojectionError Inlier threshold value used by the RANSAC procedure. The parameter value
  1101  is the maximum allowed distance between the observed and computed point projections to consider it
  1102  an inlier.
  1103  @param confidence The probability that the algorithm produces a useful result.
  1104  @param inliers Output vector that contains indices of inliers in objectPoints and imagePoints .
  1105  @param flags Method for solving a PnP problem (see @ref solvePnP ).
  1106  
  1107  The function estimates an object pose given a set of object points, their corresponding image
  1108  projections, as well as the camera intrinsic matrix and the distortion coefficients. This function finds such
  1109  a pose that minimizes reprojection error, that is, the sum of squared distances between the observed
  1110  projections imagePoints and the projected (using @ref projectPoints ) objectPoints. The use of RANSAC
  1111  makes the function resistant to outliers.
  1112  
  1113  @note
  1114     -   An example of how to use solvePNPRansac for object detection can be found at
  1115          opencv_source_code/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/
  1116     -   The default method used to estimate the camera pose for the Minimal Sample Sets step
  1117         is #SOLVEPNP_EPNP. Exceptions are:
  1118           - if you choose #SOLVEPNP_P3P or #SOLVEPNP_AP3P, these methods will be used.
  1119           - if the number of input points is equal to 4, #SOLVEPNP_P3P is used.
  1120     -   The method used to estimate the camera pose using all the inliers is defined by the
  1121         flags parameters unless it is equal to #SOLVEPNP_P3P or #SOLVEPNP_AP3P. In this case,
  1122         the method #SOLVEPNP_EPNP will be used instead.
  1123   */
  1124  CV_EXPORTS_W bool solvePnPRansac( InputArray objectPoints, InputArray imagePoints,
  1125                                    InputArray cameraMatrix, InputArray distCoeffs,
  1126                                    OutputArray rvec, OutputArray tvec,
  1127                                    bool useExtrinsicGuess = false, int iterationsCount = 100,
  1128                                    float reprojectionError = 8.0, double confidence = 0.99,
  1129                                    OutputArray inliers = noArray(), int flags = SOLVEPNP_ITERATIVE );
  1130  
  1131  
  1132  /*
  1133  Finds rotation and translation vector.
  1134  If cameraMatrix is given then run P3P. Otherwise run linear P6P and output cameraMatrix too.
  1135  */
  1136  CV_EXPORTS_W bool solvePnPRansac( InputArray objectPoints, InputArray imagePoints,
  1137                       InputOutputArray cameraMatrix, InputArray distCoeffs,
  1138                       OutputArray rvec, OutputArray tvec, OutputArray inliers,
  1139                       const UsacParams &params=UsacParams());
  1140  
  1141  /** @brief Finds an object pose from 3 3D-2D point correspondences.
  1142  
  1143  @param objectPoints Array of object points in the object coordinate space, 3x3 1-channel or
  1144  1x3/3x1 3-channel. vector\<Point3f\> can be also passed here.
  1145  @param imagePoints Array of corresponding image points, 3x2 1-channel or 1x3/3x1 2-channel.
  1146   vector\<Point2f\> can be also passed here.
  1147  @param cameraMatrix Input camera intrinsic matrix \f$\cameramatrix{A}\f$ .
  1148  @param distCoeffs Input vector of distortion coefficients
  1149  \f$\distcoeffs\f$. If the vector is NULL/empty, the zero distortion coefficients are
  1150  assumed.
  1151  @param rvecs Output rotation vectors (see @ref Rodrigues ) that, together with tvecs, brings points from
  1152  the model coordinate system to the camera coordinate system. A P3P problem has up to 4 solutions.
  1153  @param tvecs Output translation vectors.
  1154  @param flags Method for solving a P3P problem:
  1155  -   @ref SOLVEPNP_P3P Method is based on the paper of X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang
  1156  "Complete Solution Classification for the Perspective-Three-Point Problem" (@cite gao2003complete).
  1157  -   @ref SOLVEPNP_AP3P Method is based on the paper of T. Ke and S. Roumeliotis.
  1158  "An Efficient Algebraic Solution to the Perspective-Three-Point Problem" (@cite Ke17).
  1159  
  1160  The function estimates the object pose given 3 object points, their corresponding image
  1161  projections, as well as the camera intrinsic matrix and the distortion coefficients.
  1162  
  1163  @note
  1164  The solutions are sorted by reprojection errors (lowest to highest).
  1165   */
  1166  CV_EXPORTS_W int solveP3P( InputArray objectPoints, InputArray imagePoints,
  1167                             InputArray cameraMatrix, InputArray distCoeffs,
  1168                             OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs,
  1169                             int flags );
  1170  
  1171  /** @brief Refine a pose (the translation and the rotation that transform a 3D point expressed in the object coordinate frame
  1172  to the camera coordinate frame) from a 3D-2D point correspondences and starting from an initial solution.
  1173  
  1174  @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or 1xN/Nx1 3-channel,
  1175  where N is the number of points. vector\<Point3d\> can also be passed here.
  1176  @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
  1177  where N is the number of points. vector\<Point2d\> can also be passed here.
  1178  @param cameraMatrix Input camera intrinsic matrix \f$\cameramatrix{A}\f$ .
  1179  @param distCoeffs Input vector of distortion coefficients
  1180  \f$\distcoeffs\f$. If the vector is NULL/empty, the zero distortion coefficients are
  1181  assumed.
  1182  @param rvec Input/Output rotation vector (see @ref Rodrigues ) that, together with tvec, brings points from
  1183  the model coordinate system to the camera coordinate system. Input values are used as an initial solution.
  1184  @param tvec Input/Output translation vector. Input values are used as an initial solution.
  1185  @param criteria Criteria when to stop the Levenberg-Marquard iterative algorithm.
  1186  
  1187  The function refines the object pose given at least 3 object points, their corresponding image
  1188  projections, an initial solution for the rotation and translation vector,
  1189  as well as the camera intrinsic matrix and the distortion coefficients.
  1190  The function minimizes the projection error with respect to the rotation and the translation vectors, according
  1191  to a Levenberg-Marquardt iterative minimization @cite Madsen04 @cite Eade13 process.
  1192   */
  1193  CV_EXPORTS_W void solvePnPRefineLM( InputArray objectPoints, InputArray imagePoints,
  1194                                      InputArray cameraMatrix, InputArray distCoeffs,
  1195                                      InputOutputArray rvec, InputOutputArray tvec,
  1196                                      TermCriteria criteria = TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 20, FLT_EPSILON));
  1197  
  1198  /** @brief Refine a pose (the translation and the rotation that transform a 3D point expressed in the object coordinate frame
  1199  to the camera coordinate frame) from a 3D-2D point correspondences and starting from an initial solution.
  1200  
  1201  @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or 1xN/Nx1 3-channel,
  1202  where N is the number of points. vector\<Point3d\> can also be passed here.
  1203  @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
  1204  where N is the number of points. vector\<Point2d\> can also be passed here.
  1205  @param cameraMatrix Input camera intrinsic matrix \f$\cameramatrix{A}\f$ .
  1206  @param distCoeffs Input vector of distortion coefficients
  1207  \f$\distcoeffs\f$. If the vector is NULL/empty, the zero distortion coefficients are
  1208  assumed.
  1209  @param rvec Input/Output rotation vector (see @ref Rodrigues ) that, together with tvec, brings points from
  1210  the model coordinate system to the camera coordinate system. Input values are used as an initial solution.
  1211  @param tvec Input/Output translation vector. Input values are used as an initial solution.
  1212  @param criteria Criteria when to stop the Levenberg-Marquard iterative algorithm.
  1213  @param VVSlambda Gain for the virtual visual servoing control law, equivalent to the \f$\alpha\f$
  1214  gain in the Damped Gauss-Newton formulation.
  1215  
  1216  The function refines the object pose given at least 3 object points, their corresponding image
  1217  projections, an initial solution for the rotation and translation vector,
  1218  as well as the camera intrinsic matrix and the distortion coefficients.
  1219  The function minimizes the projection error with respect to the rotation and the translation vectors, using a
  1220  virtual visual servoing (VVS) @cite Chaumette06 @cite Marchand16 scheme.
  1221   */
  1222  CV_EXPORTS_W void solvePnPRefineVVS( InputArray objectPoints, InputArray imagePoints,
  1223                                       InputArray cameraMatrix, InputArray distCoeffs,
  1224                                       InputOutputArray rvec, InputOutputArray tvec,
  1225                                       TermCriteria criteria = TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 20, FLT_EPSILON),
  1226                                       double VVSlambda = 1);
  1227  
  1228  /** @brief Finds an object pose from 3D-2D point correspondences.
  1229  This function returns a list of all the possible solutions (a solution is a <rotation vector, translation vector>
  1230  couple), depending on the number of input points and the chosen method:
  1231  - P3P methods (@ref SOLVEPNP_P3P, @ref SOLVEPNP_AP3P): 3 or 4 input points. Number of returned solutions can be between 0 and 4 with 3 input points.
  1232  - @ref SOLVEPNP_IPPE Input points must be >= 4 and object points must be coplanar. Returns 2 solutions.
  1233  - @ref SOLVEPNP_IPPE_SQUARE Special case suitable for marker pose estimation.
  1234  Number of input points must be 4 and 2 solutions are returned. Object points must be defined in the following order:
  1235    - point 0: [-squareLength / 2,  squareLength / 2, 0]
  1236    - point 1: [ squareLength / 2,  squareLength / 2, 0]
  1237    - point 2: [ squareLength / 2, -squareLength / 2, 0]
  1238    - point 3: [-squareLength / 2, -squareLength / 2, 0]
  1239  - for all the other flags, number of input points must be >= 4 and object points can be in any configuration.
  1240  Only 1 solution is returned.
  1241  
  1242  @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
  1243  1xN/Nx1 3-channel, where N is the number of points. vector\<Point3d\> can be also passed here.
  1244  @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
  1245  where N is the number of points. vector\<Point2d\> can be also passed here.
  1246  @param cameraMatrix Input camera intrinsic matrix \f$\cameramatrix{A}\f$ .
  1247  @param distCoeffs Input vector of distortion coefficients
  1248  \f$\distcoeffs\f$. If the vector is NULL/empty, the zero distortion coefficients are
  1249  assumed.
  1250  @param rvecs Vector of output rotation vectors (see @ref Rodrigues ) that, together with tvecs, brings points from
  1251  the model coordinate system to the camera coordinate system.
  1252  @param tvecs Vector of output translation vectors.
  1253  @param useExtrinsicGuess Parameter used for #SOLVEPNP_ITERATIVE. If true (1), the function uses
  1254  the provided rvec and tvec values as initial approximations of the rotation and translation
  1255  vectors, respectively, and further optimizes them.
  1256  @param flags Method for solving a PnP problem:
  1257  -   @ref SOLVEPNP_ITERATIVE Iterative method is based on a Levenberg-Marquardt optimization. In
  1258  this case the function finds such a pose that minimizes reprojection error, that is the sum
  1259  of squared distances between the observed projections imagePoints and the projected (using
  1260   #projectPoints ) objectPoints .
  1261  -   @ref SOLVEPNP_P3P Method is based on the paper of X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang
  1262  "Complete Solution Classification for the Perspective-Three-Point Problem" (@cite gao2003complete).
  1263  In this case the function requires exactly four object and image points.
  1264  -   @ref SOLVEPNP_AP3P Method is based on the paper of T. Ke, S. Roumeliotis
  1265  "An Efficient Algebraic Solution to the Perspective-Three-Point Problem" (@cite Ke17).
  1266  In this case the function requires exactly four object and image points.
  1267  -   @ref SOLVEPNP_EPNP Method has been introduced by F.Moreno-Noguer, V.Lepetit and P.Fua in the
  1268  paper "EPnP: Efficient Perspective-n-Point Camera Pose Estimation" (@cite lepetit2009epnp).
  1269  -   @ref SOLVEPNP_DLS **Broken implementation. Using this flag will fallback to EPnP.** \n
  1270  Method is based on the paper of Joel A. Hesch and Stergios I. Roumeliotis.
  1271  "A Direct Least-Squares (DLS) Method for PnP" (@cite hesch2011direct).
  1272  -   @ref SOLVEPNP_UPNP **Broken implementation. Using this flag will fallback to EPnP.** \n
  1273  Method is based on the paper of A.Penate-Sanchez, J.Andrade-Cetto,
  1274  F.Moreno-Noguer. "Exhaustive Linearization for Robust Camera Pose and Focal Length
  1275  Estimation" (@cite penate2013exhaustive). In this case the function also estimates the parameters \f$f_x\f$ and \f$f_y\f$
  1276  assuming that both have the same value. Then the cameraMatrix is updated with the estimated
  1277  focal length.
  1278  -   @ref SOLVEPNP_IPPE Method is based on the paper of T. Collins and A. Bartoli.
  1279  "Infinitesimal Plane-Based Pose Estimation" (@cite Collins14). This method requires coplanar object points.
  1280  -   @ref SOLVEPNP_IPPE_SQUARE Method is based on the paper of Toby Collins and Adrien Bartoli.
  1281  "Infinitesimal Plane-Based Pose Estimation" (@cite Collins14). This method is suitable for marker pose estimation.
  1282  It requires 4 coplanar object points defined in the following order:
  1283    - point 0: [-squareLength / 2,  squareLength / 2, 0]
  1284    - point 1: [ squareLength / 2,  squareLength / 2, 0]
  1285    - point 2: [ squareLength / 2, -squareLength / 2, 0]
  1286    - point 3: [-squareLength / 2, -squareLength / 2, 0]
  1287  @param rvec Rotation vector used to initialize an iterative PnP refinement algorithm, when flag is @ref SOLVEPNP_ITERATIVE
  1288  and useExtrinsicGuess is set to true.
  1289  @param tvec Translation vector used to initialize an iterative PnP refinement algorithm, when flag is @ref SOLVEPNP_ITERATIVE
  1290  and useExtrinsicGuess is set to true.
  1291  @param reprojectionError Optional vector of reprojection error, that is the RMS error
  1292  (\f$ \text{RMSE} = \sqrt{\frac{\sum_{i}^{N} \left ( \hat{y_i} - y_i \right )^2}{N}} \f$) between the input image points
  1293  and the 3D object points projected with the estimated pose.
  1294  
  1295  The function estimates the object pose given a set of object points, their corresponding image
  1296  projections, as well as the camera intrinsic matrix and the distortion coefficients, see the figure below
  1297  (more precisely, the X-axis of the camera frame is pointing to the right, the Y-axis downward
  1298  and the Z-axis forward).
  1299  
  1300  ![](pnp.jpg)
  1301  
  1302  Points expressed in the world frame \f$ \bf{X}_w \f$ are projected into the image plane \f$ \left[ u, v \right] \f$
  1303  using the perspective projection model \f$ \Pi \f$ and the camera intrinsic parameters matrix \f$ \bf{A} \f$:
  1304  
  1305  \f[
  1306    \begin{align*}
  1307    \begin{bmatrix}
  1308    u \\
  1309    v \\
  1310    1
  1311    \end{bmatrix} &=
  1312    \bf{A} \hspace{0.1em} \Pi \hspace{0.2em} ^{c}\bf{T}_w
  1313    \begin{bmatrix}
  1314    X_{w} \\
  1315    Y_{w} \\
  1316    Z_{w} \\
  1317    1
  1318    \end{bmatrix} \\
  1319    \begin{bmatrix}
  1320    u \\
  1321    v \\
  1322    1
  1323    \end{bmatrix} &=
  1324    \begin{bmatrix}
  1325    f_x & 0 & c_x \\
  1326    0 & f_y & c_y \\
  1327    0 & 0 & 1
  1328    \end{bmatrix}
  1329    \begin{bmatrix}
  1330    1 & 0 & 0 & 0 \\
  1331    0 & 1 & 0 & 0 \\
  1332    0 & 0 & 1 & 0
  1333    \end{bmatrix}
  1334    \begin{bmatrix}
  1335    r_{11} & r_{12} & r_{13} & t_x \\
  1336    r_{21} & r_{22} & r_{23} & t_y \\
  1337    r_{31} & r_{32} & r_{33} & t_z \\
  1338    0 & 0 & 0 & 1
  1339    \end{bmatrix}
  1340    \begin{bmatrix}
  1341    X_{w} \\
  1342    Y_{w} \\
  1343    Z_{w} \\
  1344    1
  1345    \end{bmatrix}
  1346    \end{align*}
  1347  \f]
  1348  
  1349  The estimated pose is thus the rotation (`rvec`) and the translation (`tvec`) vectors that allow transforming
  1350  a 3D point expressed in the world frame into the camera frame:
  1351  
  1352  \f[
  1353    \begin{align*}
  1354    \begin{bmatrix}
  1355    X_c \\
  1356    Y_c \\
  1357    Z_c \\
  1358    1
  1359    \end{bmatrix} &=
  1360    \hspace{0.2em} ^{c}\bf{T}_w
  1361    \begin{bmatrix}
  1362    X_{w} \\
  1363    Y_{w} \\
  1364    Z_{w} \\
  1365    1
  1366    \end{bmatrix} \\
  1367    \begin{bmatrix}
  1368    X_c \\
  1369    Y_c \\
  1370    Z_c \\
  1371    1
  1372    \end{bmatrix} &=
  1373    \begin{bmatrix}
  1374    r_{11} & r_{12} & r_{13} & t_x \\
  1375    r_{21} & r_{22} & r_{23} & t_y \\
  1376    r_{31} & r_{32} & r_{33} & t_z \\
  1377    0 & 0 & 0 & 1
  1378    \end{bmatrix}
  1379    \begin{bmatrix}
  1380    X_{w} \\
  1381    Y_{w} \\
  1382    Z_{w} \\
  1383    1
  1384    \end{bmatrix}
  1385    \end{align*}
  1386  \f]
  1387  
  1388  @note
  1389     -   An example of how to use solvePnP for planar augmented reality can be found at
  1390          opencv_source_code/samples/python/plane_ar.py
  1391     -   If you are using Python:
  1392          - Numpy array slices won't work as input because solvePnP requires contiguous
  1393          arrays (enforced by the assertion using cv::Mat::checkVector() around line 55 of
  1394          modules/calib3d/src/solvepnp.cpp version 2.4.9)
  1395          - The P3P algorithm requires image points to be in an array of shape (N,1,2) due
  1396          to its calling of #undistortPoints (around line 75 of modules/calib3d/src/solvepnp.cpp version 2.4.9)
  1397          which requires 2-channel information.
  1398          - Thus, given some data D = np.array(...) where D.shape = (N,M), in order to use a subset of
  1399          it as, e.g., imagePoints, one must effectively copy it into a new array: imagePoints =
  1400          np.ascontiguousarray(D[:,:2]).reshape((N,1,2))
  1401     -   The methods @ref SOLVEPNP_DLS and @ref SOLVEPNP_UPNP cannot be used as the current implementations are
  1402         unstable and sometimes give completely wrong results. If you pass one of these two
  1403         flags, @ref SOLVEPNP_EPNP method will be used instead.
  1404     -   The minimum number of points is 4 in the general case. In the case of @ref SOLVEPNP_P3P and @ref SOLVEPNP_AP3P
  1405         methods, it is required to use exactly 4 points (the first 3 points are used to estimate all the solutions
  1406         of the P3P problem, the last one is used to retain the best solution that minimizes the reprojection error).
  1407     -   With @ref SOLVEPNP_ITERATIVE method and `useExtrinsicGuess=true`, the minimum number of points is 3 (3 points
  1408         are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the
  1409         global solution to converge.
  1410     -   With @ref SOLVEPNP_IPPE input points must be >= 4 and object points must be coplanar.
  1411     -   With @ref SOLVEPNP_IPPE_SQUARE this is a special case suitable for marker pose estimation.
  1412         Number of input points must be 4. Object points must be defined in the following order:
  1413           - point 0: [-squareLength / 2,  squareLength / 2, 0]
  1414           - point 1: [ squareLength / 2,  squareLength / 2, 0]
  1415           - point 2: [ squareLength / 2, -squareLength / 2, 0]
  1416           - point 3: [-squareLength / 2, -squareLength / 2, 0]
  1417   */
  1418  CV_EXPORTS_W int solvePnPGeneric( InputArray objectPoints, InputArray imagePoints,
  1419                                    InputArray cameraMatrix, InputArray distCoeffs,
  1420                                    OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs,
  1421                                    bool useExtrinsicGuess = false, SolvePnPMethod flags = SOLVEPNP_ITERATIVE,
  1422                                    InputArray rvec = noArray(), InputArray tvec = noArray(),
  1423                                    OutputArray reprojectionError = noArray() );
  1424  
  1425  /** @brief Finds an initial camera intrinsic matrix from 3D-2D point correspondences.
  1426  
  1427  @param objectPoints Vector of vectors of the calibration pattern points in the calibration pattern
  1428  coordinate space. In the old interface all the per-view vectors are concatenated. See
  1429  #calibrateCamera for details.
  1430  @param imagePoints Vector of vectors of the projections of the calibration pattern points. In the
  1431  old interface all the per-view vectors are concatenated.
  1432  @param imageSize Image size in pixels used to initialize the principal point.
  1433  @param aspectRatio If it is zero or negative, both \f$f_x\f$ and \f$f_y\f$ are estimated independently.
  1434  Otherwise, \f$f_x = f_y * \texttt{aspectRatio}\f$ .
  1435  
  1436  The function estimates and returns an initial camera intrinsic matrix for the camera calibration process.
  1437  Currently, the function only supports planar calibration patterns, which are patterns where each
  1438  object point has z-coordinate =0.
  1439   */
  1440  CV_EXPORTS_W Mat initCameraMatrix2D( InputArrayOfArrays objectPoints,
  1441                                       InputArrayOfArrays imagePoints,
  1442                                       Size imageSize, double aspectRatio = 1.0 );
  1443  
  1444  /** @brief Finds the positions of internal corners of the chessboard.
  1445  
  1446  @param image Source chessboard view. It must be an 8-bit grayscale or color image.
  1447  @param patternSize Number of inner corners per a chessboard row and column
  1448  ( patternSize = cv::Size(points_per_row,points_per_colum) = cv::Size(columns,rows) ).
  1449  @param corners Output array of detected corners.
  1450  @param flags Various operation flags that can be zero or a combination of the following values:
  1451  -   @ref CALIB_CB_ADAPTIVE_THRESH Use adaptive thresholding to convert the image to black
  1452  and white, rather than a fixed threshold level (computed from the average image brightness).
  1453  -   @ref CALIB_CB_NORMALIZE_IMAGE Normalize the image gamma with equalizeHist before
  1454  applying fixed or adaptive thresholding.
  1455  -   @ref CALIB_CB_FILTER_QUADS Use additional criteria (like contour area, perimeter,
  1456  square-like shape) to filter out false quads extracted at the contour retrieval stage.
  1457  -   @ref CALIB_CB_FAST_CHECK Run a fast check on the image that looks for chessboard corners,
  1458  and shortcut the call if none is found. This can drastically speed up the call in the
  1459  degenerate condition when no chessboard is observed.
  1460  
  1461  The function attempts to determine whether the input image is a view of the chessboard pattern and
  1462  locate the internal chessboard corners. The function returns a non-zero value if all of the corners
  1463  are found and they are placed in a certain order (row by row, left to right in every row).
  1464  Otherwise, if the function fails to find all the corners or reorder them, it returns 0. For example,
  1465  a regular chessboard has 8 x 8 squares and 7 x 7 internal corners, that is, points where the black
  1466  squares touch each other. The detected coordinates are approximate, and to determine their positions
  1467  more accurately, the function calls cornerSubPix. You also may use the function cornerSubPix with
  1468  different parameters if returned coordinates are not accurate enough.
  1469  
  1470  Sample usage of detecting and drawing chessboard corners: :
  1471  @code
  1472      Size patternsize(8,6); //interior number of corners
  1473      Mat gray = ....; //source image
  1474      vector<Point2f> corners; //this will be filled by the detected corners
  1475  
  1476      //CALIB_CB_FAST_CHECK saves a lot of time on images
  1477      //that do not contain any chessboard corners
  1478      bool patternfound = findChessboardCorners(gray, patternsize, corners,
  1479              CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE
  1480              + CALIB_CB_FAST_CHECK);
  1481  
  1482      if(patternfound)
  1483        cornerSubPix(gray, corners, Size(11, 11), Size(-1, -1),
  1484          TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1));
  1485  
  1486      drawChessboardCorners(img, patternsize, Mat(corners), patternfound);
  1487  @endcode
  1488  @note The function requires white space (like a square-thick border, the wider the better) around
  1489  the board to make the detection more robust in various environments. Otherwise, if there is no
  1490  border and the background is dark, the outer black squares cannot be segmented properly and so the
  1491  square grouping and ordering algorithm fails.
  1492   */
  1493  CV_EXPORTS_W bool findChessboardCorners( InputArray image, Size patternSize, OutputArray corners,
  1494                                           int flags = CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE );
  1495  
  1496  /*
  1497     Checks whether the image contains chessboard of the specific size or not.
  1498     If yes, nonzero value is returned.
  1499  */
  1500  CV_EXPORTS_W bool checkChessboard(InputArray img, Size size);
  1501  
  1502  /** @brief Finds the positions of internal corners of the chessboard using a sector based approach.
  1503  
  1504  @param image Source chessboard view. It must be an 8-bit grayscale or color image.
  1505  @param patternSize Number of inner corners per a chessboard row and column
  1506  ( patternSize = cv::Size(points_per_row,points_per_colum) = cv::Size(columns,rows) ).
  1507  @param corners Output array of detected corners.
  1508  @param flags Various operation flags that can be zero or a combination of the following values:
  1509  -   @ref CALIB_CB_NORMALIZE_IMAGE Normalize the image gamma with equalizeHist before detection.
  1510  -   @ref CALIB_CB_EXHAUSTIVE Run an exhaustive search to improve detection rate.
  1511  -   @ref CALIB_CB_ACCURACY Up sample input image to improve sub-pixel accuracy due to aliasing effects.
  1512  -   @ref CALIB_CB_LARGER The detected pattern is allowed to be larger than patternSize (see description).
  1513  -   @ref CALIB_CB_MARKER The detected pattern must have a marker (see description).
  1514  This should be used if an accurate camera calibration is required.
  1515  @param meta Optional output arrray of detected corners (CV_8UC1 and size = cv::Size(columns,rows)).
  1516  Each entry stands for one corner of the pattern and can have one of the following values:
  1517  -   0 = no meta data attached
  1518  -   1 = left-top corner of a black cell
  1519  -   2 = left-top corner of a white cell
  1520  -   3 = left-top corner of a black cell with a white marker dot
  1521  -   4 = left-top corner of a white cell with a black marker dot (pattern origin in case of markers otherwise first corner)
  1522  
  1523  The function is analog to #findChessboardCorners but uses a localized radon
  1524  transformation approximated by box filters being more robust to all sort of
  1525  noise, faster on larger images and is able to directly return the sub-pixel
  1526  position of the internal chessboard corners. The Method is based on the paper
  1527  @cite duda2018 "Accurate Detection and Localization of Checkerboard Corners for
  1528  Calibration" demonstrating that the returned sub-pixel positions are more
  1529  accurate than the one returned by cornerSubPix allowing a precise camera
  1530  calibration for demanding applications.
  1531  
  1532  In the case, the flags @ref CALIB_CB_LARGER or @ref CALIB_CB_MARKER are given,
  1533  the result can be recovered from the optional meta array. Both flags are
  1534  helpful to use calibration patterns exceeding the field of view of the camera.
  1535  These oversized patterns allow more accurate calibrations as corners can be
  1536  utilized, which are as close as possible to the image borders.  For a
  1537  consistent coordinate system across all images, the optional marker (see image
  1538  below) can be used to move the origin of the board to the location where the
  1539  black circle is located.
  1540  
  1541  @note The function requires a white boarder with roughly the same width as one
  1542  of the checkerboard fields around the whole board to improve the detection in
  1543  various environments. In addition, because of the localized radon
  1544  transformation it is beneficial to use round corners for the field corners
  1545  which are located on the outside of the board. The following figure illustrates
  1546  a sample checkerboard optimized for the detection. However, any other checkerboard
  1547  can be used as well.
  1548  ![Checkerboard](pics/checkerboard_radon.png)
  1549   */
  1550  CV_EXPORTS_AS(findChessboardCornersSBWithMeta)
  1551  bool findChessboardCornersSB(InputArray image,Size patternSize, OutputArray corners,
  1552                               int flags,OutputArray meta);
  1553  /** @overload */
  1554  CV_EXPORTS_W inline
  1555  bool findChessboardCornersSB(InputArray image, Size patternSize, OutputArray corners,
  1556                               int flags = 0)
  1557  {
  1558      return findChessboardCornersSB(image, patternSize, corners, flags, noArray());
  1559  }
  1560  
  1561  /** @brief Estimates the sharpness of a detected chessboard.
  1562  
  1563  Image sharpness, as well as brightness, are a critical parameter for accuracte
  1564  camera calibration. For accessing these parameters for filtering out
  1565  problematic calibraiton images, this method calculates edge profiles by traveling from
  1566  black to white chessboard cell centers. Based on this, the number of pixels is
  1567  calculated required to transit from black to white. This width of the
  1568  transition area is a good indication of how sharp the chessboard is imaged
  1569  and should be below ~3.0 pixels.
  1570  
  1571  @param image Gray image used to find chessboard corners
  1572  @param patternSize Size of a found chessboard pattern
  1573  @param corners Corners found by #findChessboardCornersSB
  1574  @param rise_distance Rise distance 0.8 means 10% ... 90% of the final signal strength
  1575  @param vertical By default edge responses for horizontal lines are calculated
  1576  @param sharpness Optional output array with a sharpness value for calculated edge responses (see description)
  1577  
  1578  The optional sharpness array is of type CV_32FC1 and has for each calculated
  1579  profile one row with the following five entries:
  1580  * 0 = x coordinate of the underlying edge in the image
  1581  * 1 = y coordinate of the underlying edge in the image
  1582  * 2 = width of the transition area (sharpness)
  1583  * 3 = signal strength in the black cell (min brightness)
  1584  * 4 = signal strength in the white cell (max brightness)
  1585  
  1586  @return Scalar(average sharpness, average min brightness, average max brightness,0)
  1587  */
  1588  CV_EXPORTS_W Scalar estimateChessboardSharpness(InputArray image, Size patternSize, InputArray corners,
  1589                                                  float rise_distance=0.8F,bool vertical=false,
  1590                                                  OutputArray sharpness=noArray());
  1591  
  1592  
  1593  //! finds subpixel-accurate positions of the chessboard corners
  1594  CV_EXPORTS_W bool find4QuadCornerSubpix( InputArray img, InputOutputArray corners, Size region_size );
  1595  
  1596  /** @brief Renders the detected chessboard corners.
  1597  
  1598  @param image Destination image. It must be an 8-bit color image.
  1599  @param patternSize Number of inner corners per a chessboard row and column
  1600  (patternSize = cv::Size(points_per_row,points_per_column)).
  1601  @param corners Array of detected corners, the output of #findChessboardCorners.
  1602  @param patternWasFound Parameter indicating whether the complete board was found or not. The
  1603  return value of #findChessboardCorners should be passed here.
  1604  
  1605  The function draws individual chessboard corners detected either as red circles if the board was not
  1606  found, or as colored corners connected with lines if the board was found.
  1607   */
  1608  CV_EXPORTS_W void drawChessboardCorners( InputOutputArray image, Size patternSize,
  1609                                           InputArray corners, bool patternWasFound );
  1610  
  1611  /** @brief Draw axes of the world/object coordinate system from pose estimation. @sa solvePnP
  1612  
  1613  @param image Input/output image. It must have 1 or 3 channels. The number of channels is not altered.
  1614  @param cameraMatrix Input 3x3 floating-point matrix of camera intrinsic parameters.
  1615  \f$\cameramatrix{A}\f$
  1616  @param distCoeffs Input vector of distortion coefficients
  1617  \f$\distcoeffs\f$. If the vector is empty, the zero distortion coefficients are assumed.
  1618  @param rvec Rotation vector (see @ref Rodrigues ) that, together with tvec, brings points from
  1619  the model coordinate system to the camera coordinate system.
  1620  @param tvec Translation vector.
  1621  @param length Length of the painted axes in the same unit than tvec (usually in meters).
  1622  @param thickness Line thickness of the painted axes.
  1623  
  1624  This function draws the axes of the world/object coordinate system w.r.t. to the camera frame.
  1625  OX is drawn in red, OY in green and OZ in blue.
  1626   */
  1627  CV_EXPORTS_W void drawFrameAxes(InputOutputArray image, InputArray cameraMatrix, InputArray distCoeffs,
  1628                                  InputArray rvec, InputArray tvec, float length, int thickness=3);
  1629  
  1630  struct CV_EXPORTS_W_SIMPLE CirclesGridFinderParameters
  1631  {
  1632      CV_WRAP CirclesGridFinderParameters();
  1633      CV_PROP_RW cv::Size2f densityNeighborhoodSize;
  1634      CV_PROP_RW float minDensity;
  1635      CV_PROP_RW int kmeansAttempts;
  1636      CV_PROP_RW int minDistanceToAddKeypoint;
  1637      CV_PROP_RW int keypointScale;
  1638      CV_PROP_RW float minGraphConfidence;
  1639      CV_PROP_RW float vertexGain;
  1640      CV_PROP_RW float vertexPenalty;
  1641      CV_PROP_RW float existingVertexGain;
  1642      CV_PROP_RW float edgeGain;
  1643      CV_PROP_RW float edgePenalty;
  1644      CV_PROP_RW float convexHullFactor;
  1645      CV_PROP_RW float minRNGEdgeSwitchDist;
  1646  
  1647      enum GridType
  1648      {
  1649        SYMMETRIC_GRID, ASYMMETRIC_GRID
  1650      };
  1651      GridType gridType;
  1652  
  1653      CV_PROP_RW float squareSize; //!< Distance between two adjacent points. Used by CALIB_CB_CLUSTERING.
  1654      CV_PROP_RW float maxRectifiedDistance; //!< Max deviation from prediction. Used by CALIB_CB_CLUSTERING.
  1655  };
  1656  
  1657  #ifndef DISABLE_OPENCV_3_COMPATIBILITY
  1658  typedef CirclesGridFinderParameters CirclesGridFinderParameters2;
  1659  #endif
  1660  
  1661  /** @brief Finds centers in the grid of circles.
  1662  
  1663  @param image grid view of input circles; it must be an 8-bit grayscale or color image.
  1664  @param patternSize number of circles per row and column
  1665  ( patternSize = Size(points_per_row, points_per_colum) ).
  1666  @param centers output array of detected centers.
  1667  @param flags various operation flags that can be one of the following values:
  1668  -   @ref CALIB_CB_SYMMETRIC_GRID uses symmetric pattern of circles.
  1669  -   @ref CALIB_CB_ASYMMETRIC_GRID uses asymmetric pattern of circles.
  1670  -   @ref CALIB_CB_CLUSTERING uses a special algorithm for grid detection. It is more robust to
  1671  perspective distortions but much more sensitive to background clutter.
  1672  @param blobDetector feature detector that finds blobs like dark circles on light background.
  1673                      If `blobDetector` is NULL then `image` represents Point2f array of candidates.
  1674  @param parameters struct for finding circles in a grid pattern.
  1675  
  1676  The function attempts to determine whether the input image contains a grid of circles. If it is, the
  1677  function locates centers of the circles. The function returns a non-zero value if all of the centers
  1678  have been found and they have been placed in a certain order (row by row, left to right in every
  1679  row). Otherwise, if the function fails to find all the corners or reorder them, it returns 0.
  1680  
  1681  Sample usage of detecting and drawing the centers of circles: :
  1682  @code
  1683      Size patternsize(7,7); //number of centers
  1684      Mat gray = ...; //source image
  1685      vector<Point2f> centers; //this will be filled by the detected centers
  1686  
  1687      bool patternfound = findCirclesGrid(gray, patternsize, centers);
  1688  
  1689      drawChessboardCorners(img, patternsize, Mat(centers), patternfound);
  1690  @endcode
  1691  @note The function requires white space (like a square-thick border, the wider the better) around
  1692  the board to make the detection more robust in various environments.
  1693   */
  1694  CV_EXPORTS_W bool findCirclesGrid( InputArray image, Size patternSize,
  1695                                     OutputArray centers, int flags,
  1696                                     const Ptr<FeatureDetector> &blobDetector,
  1697                                     const CirclesGridFinderParameters& parameters);
  1698  
  1699  /** @overload */
  1700  CV_EXPORTS_W bool findCirclesGrid( InputArray image, Size patternSize,
  1701                                     OutputArray centers, int flags = CALIB_CB_SYMMETRIC_GRID,
  1702                                     const Ptr<FeatureDetector> &blobDetector = SimpleBlobDetector::create());
  1703  
  1704  /** @brief Finds the camera intrinsic and extrinsic parameters from several views of a calibration
  1705  pattern.
  1706  
  1707  @param objectPoints In the new interface it is a vector of vectors of calibration pattern points in
  1708  the calibration pattern coordinate space (e.g. std::vector<std::vector<cv::Vec3f>>). The outer
  1709  vector contains as many elements as the number of pattern views. If the same calibration pattern
  1710  is shown in each view and it is fully visible, all the vectors will be the same. Although, it is
  1711  possible to use partially occluded patterns or even different patterns in different views. Then,
  1712  the vectors will be different. Although the points are 3D, they all lie in the calibration pattern's
  1713  XY coordinate plane (thus 0 in the Z-coordinate), if the used calibration pattern is a planar rig.
  1714  In the old interface all the vectors of object points from different views are concatenated
  1715  together.
  1716  @param imagePoints In the new interface it is a vector of vectors of the projections of calibration
  1717  pattern points (e.g. std::vector<std::vector<cv::Vec2f>>). imagePoints.size() and
  1718  objectPoints.size(), and imagePoints[i].size() and objectPoints[i].size() for each i, must be equal,
  1719  respectively. In the old interface all the vectors of object points from different views are
  1720  concatenated together.
  1721  @param imageSize Size of the image used only to initialize the camera intrinsic matrix.
  1722  @param cameraMatrix Input/output 3x3 floating-point camera intrinsic matrix
  1723  \f$\cameramatrix{A}\f$ . If @ref CALIB_USE_INTRINSIC_GUESS
  1724  and/or @ref CALIB_FIX_ASPECT_RATIO, @ref CALIB_FIX_PRINCIPAL_POINT or @ref CALIB_FIX_FOCAL_LENGTH
  1725  are specified, some or all of fx, fy, cx, cy must be initialized before calling the function.
  1726  @param distCoeffs Input/output vector of distortion coefficients
  1727  \f$\distcoeffs\f$.
  1728  @param rvecs Output vector of rotation vectors (@ref Rodrigues ) estimated for each pattern view
  1729  (e.g. std::vector<cv::Mat>>). That is, each i-th rotation vector together with the corresponding
  1730  i-th translation vector (see the next output parameter description) brings the calibration pattern
  1731  from the object coordinate space (in which object points are specified) to the camera coordinate
  1732  space. In more technical terms, the tuple of the i-th rotation and translation vector performs
  1733  a change of basis from object coordinate space to camera coordinate space. Due to its duality, this
  1734  tuple is equivalent to the position of the calibration pattern with respect to the camera coordinate
  1735  space.
  1736  @param tvecs Output vector of translation vectors estimated for each pattern view, see parameter
  1737  describtion above.
  1738  @param stdDeviationsIntrinsics Output vector of standard deviations estimated for intrinsic
  1739  parameters. Order of deviations values:
  1740  \f$(f_x, f_y, c_x, c_y, k_1, k_2, p_1, p_2, k_3, k_4, k_5, k_6 , s_1, s_2, s_3,
  1741   s_4, \tau_x, \tau_y)\f$ If one of parameters is not estimated, it's deviation is equals to zero.
  1742  @param stdDeviationsExtrinsics Output vector of standard deviations estimated for extrinsic
  1743  parameters. Order of deviations values: \f$(R_0, T_0, \dotsc , R_{M - 1}, T_{M - 1})\f$ where M is
  1744  the number of pattern views. \f$R_i, T_i\f$ are concatenated 1x3 vectors.
  1745   @param perViewErrors Output vector of the RMS re-projection error estimated for each pattern view.
  1746  @param flags Different flags that may be zero or a combination of the following values:
  1747  -   @ref CALIB_USE_INTRINSIC_GUESS cameraMatrix contains valid initial values of
  1748  fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image
  1749  center ( imageSize is used), and focal distances are computed in a least-squares fashion.
  1750  Note, that if intrinsic parameters are known, there is no need to use this function just to
  1751  estimate extrinsic parameters. Use @ref solvePnP instead.
  1752  -   @ref CALIB_FIX_PRINCIPAL_POINT The principal point is not changed during the global
  1753  optimization. It stays at the center or at a different location specified when
  1754   @ref CALIB_USE_INTRINSIC_GUESS is set too.
  1755  -   @ref CALIB_FIX_ASPECT_RATIO The functions consider only fy as a free parameter. The
  1756  ratio fx/fy stays the same as in the input cameraMatrix . When
  1757   @ref CALIB_USE_INTRINSIC_GUESS is not set, the actual input values of fx and fy are
  1758  ignored, only their ratio is computed and used further.
  1759  -   @ref CALIB_ZERO_TANGENT_DIST Tangential distortion coefficients \f$(p_1, p_2)\f$ are set
  1760  to zeros and stay zero.
  1761  -   @ref CALIB_FIX_FOCAL_LENGTH The focal length is not changed during the global optimization if
  1762   @ref CALIB_USE_INTRINSIC_GUESS is set.
  1763  -   @ref CALIB_FIX_K1,..., @ref CALIB_FIX_K6 The corresponding radial distortion
  1764  coefficient is not changed during the optimization. If @ref CALIB_USE_INTRINSIC_GUESS is
  1765  set, the coefficient from the supplied distCoeffs matrix is used. Otherwise, it is set to 0.
  1766  -   @ref CALIB_RATIONAL_MODEL Coefficients k4, k5, and k6 are enabled. To provide the
  1767  backward compatibility, this extra flag should be explicitly specified to make the
  1768  calibration function use the rational model and return 8 coefficients or more.
  1769  -   @ref CALIB_THIN_PRISM_MODEL Coefficients s1, s2, s3 and s4 are enabled. To provide the
  1770  backward compatibility, this extra flag should be explicitly specified to make the
  1771  calibration function use the thin prism model and return 12 coefficients or more.
  1772  -   @ref CALIB_FIX_S1_S2_S3_S4 The thin prism distortion coefficients are not changed during
  1773  the optimization. If @ref CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
  1774  supplied distCoeffs matrix is used. Otherwise, it is set to 0.
  1775  -   @ref CALIB_TILTED_MODEL Coefficients tauX and tauY are enabled. To provide the
  1776  backward compatibility, this extra flag should be explicitly specified to make the
  1777  calibration function use the tilted sensor model and return 14 coefficients.
  1778  -   @ref CALIB_FIX_TAUX_TAUY The coefficients of the tilted sensor model are not changed during
  1779  the optimization. If @ref CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
  1780  supplied distCoeffs matrix is used. Otherwise, it is set to 0.
  1781  @param criteria Termination criteria for the iterative optimization algorithm.
  1782  
  1783  @return the overall RMS re-projection error.
  1784  
  1785  The function estimates the intrinsic camera parameters and extrinsic parameters for each of the
  1786  views. The algorithm is based on @cite Zhang2000 and @cite BouguetMCT . The coordinates of 3D object
  1787  points and their corresponding 2D projections in each view must be specified. That may be achieved
  1788  by using an object with known geometry and easily detectable feature points. Such an object is
  1789  called a calibration rig or calibration pattern, and OpenCV has built-in support for a chessboard as
  1790  a calibration rig (see @ref findChessboardCorners). Currently, initialization of intrinsic
  1791  parameters (when @ref CALIB_USE_INTRINSIC_GUESS is not set) is only implemented for planar calibration
  1792  patterns (where Z-coordinates of the object points must be all zeros). 3D calibration rigs can also
  1793  be used as long as initial cameraMatrix is provided.
  1794  
  1795  The algorithm performs the following steps:
  1796  
  1797  -   Compute the initial intrinsic parameters (the option only available for planar calibration
  1798      patterns) or read them from the input parameters. The distortion coefficients are all set to
  1799      zeros initially unless some of CALIB_FIX_K? are specified.
  1800  
  1801  -   Estimate the initial camera pose as if the intrinsic parameters have been already known. This is
  1802      done using @ref solvePnP .
  1803  
  1804  -   Run the global Levenberg-Marquardt optimization algorithm to minimize the reprojection error,
  1805      that is, the total sum of squared distances between the observed feature points imagePoints and
  1806      the projected (using the current estimates for camera parameters and the poses) object points
  1807      objectPoints. See @ref projectPoints for details.
  1808  
  1809  @note
  1810      If you use a non-square (i.e. non-N-by-N) grid and @ref findChessboardCorners for calibration,
  1811      and @ref calibrateCamera returns bad values (zero distortion coefficients, \f$c_x\f$ and
  1812      \f$c_y\f$ very far from the image center, and/or large differences between \f$f_x\f$ and
  1813      \f$f_y\f$ (ratios of 10:1 or more)), then you are probably using patternSize=cvSize(rows,cols)
  1814      instead of using patternSize=cvSize(cols,rows) in @ref findChessboardCorners.
  1815  
  1816  @sa
  1817     calibrateCameraRO, findChessboardCorners, solvePnP, initCameraMatrix2D, stereoCalibrate,
  1818     undistort
  1819   */
  1820  CV_EXPORTS_AS(calibrateCameraExtended) double calibrateCamera( InputArrayOfArrays objectPoints,
  1821                                       InputArrayOfArrays imagePoints, Size imageSize,
  1822                                       InputOutputArray cameraMatrix, InputOutputArray distCoeffs,
  1823                                       OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs,
  1824                                       OutputArray stdDeviationsIntrinsics,
  1825                                       OutputArray stdDeviationsExtrinsics,
  1826                                       OutputArray perViewErrors,
  1827                                       int flags = 0, TermCriteria criteria = TermCriteria(
  1828                                          TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON) );
  1829  
  1830  /** @overload */
  1831  CV_EXPORTS_W double calibrateCamera( InputArrayOfArrays objectPoints,
  1832                                       InputArrayOfArrays imagePoints, Size imageSize,
  1833                                       InputOutputArray cameraMatrix, InputOutputArray distCoeffs,
  1834                                       OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs,
  1835                                       int flags = 0, TermCriteria criteria = TermCriteria(
  1836                                          TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON) );
  1837  
  1838  /** @brief Finds the camera intrinsic and extrinsic parameters from several views of a calibration pattern.
  1839  
  1840  This function is an extension of #calibrateCamera with the method of releasing object which was
  1841  proposed in @cite strobl2011iccv. In many common cases with inaccurate, unmeasured, roughly planar
  1842  targets (calibration plates), this method can dramatically improve the precision of the estimated
  1843  camera parameters. Both the object-releasing method and standard method are supported by this
  1844  function. Use the parameter **iFixedPoint** for method selection. In the internal implementation,
  1845  #calibrateCamera is a wrapper for this function.
  1846  
  1847  @param objectPoints Vector of vectors of calibration pattern points in the calibration pattern
  1848  coordinate space. See #calibrateCamera for details. If the method of releasing object to be used,
  1849  the identical calibration board must be used in each view and it must be fully visible, and all
  1850  objectPoints[i] must be the same and all points should be roughly close to a plane. **The calibration
  1851  target has to be rigid, or at least static if the camera (rather than the calibration target) is
  1852  shifted for grabbing images.**
  1853  @param imagePoints Vector of vectors of the projections of calibration pattern points. See
  1854  #calibrateCamera for details.
  1855  @param imageSize Size of the image used only to initialize the intrinsic camera matrix.
  1856  @param iFixedPoint The index of the 3D object point in objectPoints[0] to be fixed. It also acts as
  1857  a switch for calibration method selection. If object-releasing method to be used, pass in the
  1858  parameter in the range of [1, objectPoints[0].size()-2], otherwise a value out of this range will
  1859  make standard calibration method selected. Usually the top-right corner point of the calibration
  1860  board grid is recommended to be fixed when object-releasing method being utilized. According to
  1861  \cite strobl2011iccv, two other points are also fixed. In this implementation, objectPoints[0].front
  1862  and objectPoints[0].back.z are used. With object-releasing method, accurate rvecs, tvecs and
  1863  newObjPoints are only possible if coordinates of these three fixed points are accurate enough.
  1864  @param cameraMatrix Output 3x3 floating-point camera matrix. See #calibrateCamera for details.
  1865  @param distCoeffs Output vector of distortion coefficients. See #calibrateCamera for details.
  1866  @param rvecs Output vector of rotation vectors estimated for each pattern view. See #calibrateCamera
  1867  for details.
  1868  @param tvecs Output vector of translation vectors estimated for each pattern view.
  1869  @param newObjPoints The updated output vector of calibration pattern points. The coordinates might
  1870  be scaled based on three fixed points. The returned coordinates are accurate only if the above
  1871  mentioned three fixed points are accurate. If not needed, noArray() can be passed in. This parameter
  1872  is ignored with standard calibration method.
  1873  @param stdDeviationsIntrinsics Output vector of standard deviations estimated for intrinsic parameters.
  1874  See #calibrateCamera for details.
  1875  @param stdDeviationsExtrinsics Output vector of standard deviations estimated for extrinsic parameters.
  1876  See #calibrateCamera for details.
  1877  @param stdDeviationsObjPoints Output vector of standard deviations estimated for refined coordinates
  1878  of calibration pattern points. It has the same size and order as objectPoints[0] vector. This
  1879  parameter is ignored with standard calibration method.
  1880   @param perViewErrors Output vector of the RMS re-projection error estimated for each pattern view.
  1881  @param flags Different flags that may be zero or a combination of some predefined values. See
  1882  #calibrateCamera for details. If the method of releasing object is used, the calibration time may
  1883  be much longer. CALIB_USE_QR or CALIB_USE_LU could be used for faster calibration with potentially
  1884  less precise and less stable in some rare cases.
  1885  @param criteria Termination criteria for the iterative optimization algorithm.
  1886  
  1887  @return the overall RMS re-projection error.
  1888  
  1889  The function estimates the intrinsic camera parameters and extrinsic parameters for each of the
  1890  views. The algorithm is based on @cite Zhang2000, @cite BouguetMCT and @cite strobl2011iccv. See
  1891  #calibrateCamera for other detailed explanations.
  1892  @sa
  1893     calibrateCamera, findChessboardCorners, solvePnP, initCameraMatrix2D, stereoCalibrate, undistort
  1894   */
  1895  CV_EXPORTS_AS(calibrateCameraROExtended) double calibrateCameraRO( InputArrayOfArrays objectPoints,
  1896                                       InputArrayOfArrays imagePoints, Size imageSize, int iFixedPoint,
  1897                                       InputOutputArray cameraMatrix, InputOutputArray distCoeffs,
  1898                                       OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs,
  1899                                       OutputArray newObjPoints,
  1900                                       OutputArray stdDeviationsIntrinsics,
  1901                                       OutputArray stdDeviationsExtrinsics,
  1902                                       OutputArray stdDeviationsObjPoints,
  1903                                       OutputArray perViewErrors,
  1904                                       int flags = 0, TermCriteria criteria = TermCriteria(
  1905                                          TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON) );
  1906  
  1907  /** @overload */
  1908  CV_EXPORTS_W double calibrateCameraRO( InputArrayOfArrays objectPoints,
  1909                                       InputArrayOfArrays imagePoints, Size imageSize, int iFixedPoint,
  1910                                       InputOutputArray cameraMatrix, InputOutputArray distCoeffs,
  1911                                       OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs,
  1912                                       OutputArray newObjPoints,
  1913                                       int flags = 0, TermCriteria criteria = TermCriteria(
  1914                                          TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON) );
  1915  
  1916  /** @brief Computes useful camera characteristics from the camera intrinsic matrix.
  1917  
  1918  @param cameraMatrix Input camera intrinsic matrix that can be estimated by #calibrateCamera or
  1919  #stereoCalibrate .
  1920  @param imageSize Input image size in pixels.
  1921  @param apertureWidth Physical width in mm of the sensor.
  1922  @param apertureHeight Physical height in mm of the sensor.
  1923  @param fovx Output field of view in degrees along the horizontal sensor axis.
  1924  @param fovy Output field of view in degrees along the vertical sensor axis.
  1925  @param focalLength Focal length of the lens in mm.
  1926  @param principalPoint Principal point in mm.
  1927  @param aspectRatio \f$f_y/f_x\f$
  1928  
  1929  The function computes various useful camera characteristics from the previously estimated camera
  1930  matrix.
  1931  
  1932  @note
  1933     Do keep in mind that the unity measure 'mm' stands for whatever unit of measure one chooses for
  1934      the chessboard pitch (it can thus be any value).
  1935   */
  1936  CV_EXPORTS_W void calibrationMatrixValues( InputArray cameraMatrix, Size imageSize,
  1937                                             double apertureWidth, double apertureHeight,
  1938                                             CV_OUT double& fovx, CV_OUT double& fovy,
  1939                                             CV_OUT double& focalLength, CV_OUT Point2d& principalPoint,
  1940                                             CV_OUT double& aspectRatio );
  1941  
  1942  /** @brief Calibrates a stereo camera set up. This function finds the intrinsic parameters
  1943  for each of the two cameras and the extrinsic parameters between the two cameras.
  1944  
  1945  @param objectPoints Vector of vectors of the calibration pattern points. The same structure as
  1946  in @ref calibrateCamera. For each pattern view, both cameras need to see the same object
  1947  points. Therefore, objectPoints.size(), imagePoints1.size(), and imagePoints2.size() need to be
  1948  equal as well as objectPoints[i].size(), imagePoints1[i].size(), and imagePoints2[i].size() need to
  1949  be equal for each i.
  1950  @param imagePoints1 Vector of vectors of the projections of the calibration pattern points,
  1951  observed by the first camera. The same structure as in @ref calibrateCamera.
  1952  @param imagePoints2 Vector of vectors of the projections of the calibration pattern points,
  1953  observed by the second camera. The same structure as in @ref calibrateCamera.
  1954  @param cameraMatrix1 Input/output camera intrinsic matrix for the first camera, the same as in
  1955  @ref calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below.
  1956  @param distCoeffs1 Input/output vector of distortion coefficients, the same as in
  1957  @ref calibrateCamera.
  1958  @param cameraMatrix2 Input/output second camera intrinsic matrix for the second camera. See description for
  1959  cameraMatrix1.
  1960  @param distCoeffs2 Input/output lens distortion coefficients for the second camera. See
  1961  description for distCoeffs1.
  1962  @param imageSize Size of the image used only to initialize the camera intrinsic matrices.
  1963  @param R Output rotation matrix. Together with the translation vector T, this matrix brings
  1964  points given in the first camera's coordinate system to points in the second camera's
  1965  coordinate system. In more technical terms, the tuple of R and T performs a change of basis
  1966  from the first camera's coordinate system to the second camera's coordinate system. Due to its
  1967  duality, this tuple is equivalent to the position of the first camera with respect to the
  1968  second camera coordinate system.
  1969  @param T Output translation vector, see description above.
  1970  @param E Output essential matrix.
  1971  @param F Output fundamental matrix.
  1972  @param perViewErrors Output vector of the RMS re-projection error estimated for each pattern view.
  1973  @param flags Different flags that may be zero or a combination of the following values:
  1974  -   @ref CALIB_FIX_INTRINSIC Fix cameraMatrix? and distCoeffs? so that only R, T, E, and F
  1975  matrices are estimated.
  1976  -   @ref CALIB_USE_INTRINSIC_GUESS Optimize some or all of the intrinsic parameters
  1977  according to the specified flags. Initial values are provided by the user.
  1978  -   @ref CALIB_USE_EXTRINSIC_GUESS R and T contain valid initial values that are optimized further.
  1979  Otherwise R and T are initialized to the median value of the pattern views (each dimension separately).
  1980  -   @ref CALIB_FIX_PRINCIPAL_POINT Fix the principal points during the optimization.
  1981  -   @ref CALIB_FIX_FOCAL_LENGTH Fix \f$f^{(j)}_x\f$ and \f$f^{(j)}_y\f$ .
  1982  -   @ref CALIB_FIX_ASPECT_RATIO Optimize \f$f^{(j)}_y\f$ . Fix the ratio \f$f^{(j)}_x/f^{(j)}_y\f$
  1983  .
  1984  -   @ref CALIB_SAME_FOCAL_LENGTH Enforce \f$f^{(0)}_x=f^{(1)}_x\f$ and \f$f^{(0)}_y=f^{(1)}_y\f$ .
  1985  -   @ref CALIB_ZERO_TANGENT_DIST Set tangential distortion coefficients for each camera to
  1986  zeros and fix there.
  1987  -   @ref CALIB_FIX_K1,..., @ref CALIB_FIX_K6 Do not change the corresponding radial
  1988  distortion coefficient during the optimization. If @ref CALIB_USE_INTRINSIC_GUESS is set,
  1989  the coefficient from the supplied distCoeffs matrix is used. Otherwise, it is set to 0.
  1990  -   @ref CALIB_RATIONAL_MODEL Enable coefficients k4, k5, and k6. To provide the backward
  1991  compatibility, this extra flag should be explicitly specified to make the calibration
  1992  function use the rational model and return 8 coefficients. If the flag is not set, the
  1993  function computes and returns only 5 distortion coefficients.
  1994  -   @ref CALIB_THIN_PRISM_MODEL Coefficients s1, s2, s3 and s4 are enabled. To provide the
  1995  backward compatibility, this extra flag should be explicitly specified to make the
  1996  calibration function use the thin prism model and return 12 coefficients. If the flag is not
  1997  set, the function computes and returns only 5 distortion coefficients.
  1998  -   @ref CALIB_FIX_S1_S2_S3_S4 The thin prism distortion coefficients are not changed during
  1999  the optimization. If @ref CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
  2000  supplied distCoeffs matrix is used. Otherwise, it is set to 0.
  2001  -   @ref CALIB_TILTED_MODEL Coefficients tauX and tauY are enabled. To provide the
  2002  backward compatibility, this extra flag should be explicitly specified to make the
  2003  calibration function use the tilted sensor model and return 14 coefficients. If the flag is not
  2004  set, the function computes and returns only 5 distortion coefficients.
  2005  -   @ref CALIB_FIX_TAUX_TAUY The coefficients of the tilted sensor model are not changed during
  2006  the optimization. If @ref CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
  2007  supplied distCoeffs matrix is used. Otherwise, it is set to 0.
  2008  @param criteria Termination criteria for the iterative optimization algorithm.
  2009  
  2010  The function estimates the transformation between two cameras making a stereo pair. If one computes
  2011  the poses of an object relative to the first camera and to the second camera,
  2012  ( \f$R_1\f$,\f$T_1\f$ ) and (\f$R_2\f$,\f$T_2\f$), respectively, for a stereo camera where the
  2013  relative position and orientation between the two cameras are fixed, then those poses definitely
  2014  relate to each other. This means, if the relative position and orientation (\f$R\f$,\f$T\f$) of the
  2015  two cameras is known, it is possible to compute (\f$R_2\f$,\f$T_2\f$) when (\f$R_1\f$,\f$T_1\f$) is
  2016  given. This is what the described function does. It computes (\f$R\f$,\f$T\f$) such that:
  2017  
  2018  \f[R_2=R R_1\f]
  2019  \f[T_2=R T_1 + T.\f]
  2020  
  2021  Therefore, one can compute the coordinate representation of a 3D point for the second camera's
  2022  coordinate system when given the point's coordinate representation in the first camera's coordinate
  2023  system:
  2024  
  2025  \f[\begin{bmatrix}
  2026  X_2 \\
  2027  Y_2 \\
  2028  Z_2 \\
  2029  1
  2030  \end{bmatrix} = \begin{bmatrix}
  2031  R & T \\
  2032  0 & 1
  2033  \end{bmatrix} \begin{bmatrix}
  2034  X_1 \\
  2035  Y_1 \\
  2036  Z_1 \\
  2037  1
  2038  \end{bmatrix}.\f]
  2039  
  2040  
  2041  Optionally, it computes the essential matrix E:
  2042  
  2043  \f[E= \vecthreethree{0}{-T_2}{T_1}{T_2}{0}{-T_0}{-T_1}{T_0}{0} R\f]
  2044  
  2045  where \f$T_i\f$ are components of the translation vector \f$T\f$ : \f$T=[T_0, T_1, T_2]^T\f$ .
  2046  And the function can also compute the fundamental matrix F:
  2047  
  2048  \f[F = cameraMatrix2^{-T}\cdot E \cdot cameraMatrix1^{-1}\f]
  2049  
  2050  Besides the stereo-related information, the function can also perform a full calibration of each of
  2051  the two cameras. However, due to the high dimensionality of the parameter space and noise in the
  2052  input data, the function can diverge from the correct solution. If the intrinsic parameters can be
  2053  estimated with high accuracy for each of the cameras individually (for example, using
  2054  #calibrateCamera ), you are recommended to do so and then pass @ref CALIB_FIX_INTRINSIC flag to the
  2055  function along with the computed intrinsic parameters. Otherwise, if all the parameters are
  2056  estimated at once, it makes sense to restrict some parameters, for example, pass
  2057   @ref CALIB_SAME_FOCAL_LENGTH and @ref CALIB_ZERO_TANGENT_DIST flags, which is usually a
  2058  reasonable assumption.
  2059  
  2060  Similarly to #calibrateCamera, the function minimizes the total re-projection error for all the
  2061  points in all the available views from both cameras. The function returns the final value of the
  2062  re-projection error.
  2063   */
  2064  CV_EXPORTS_AS(stereoCalibrateExtended) double stereoCalibrate( InputArrayOfArrays objectPoints,
  2065                                       InputArrayOfArrays imagePoints1, InputArrayOfArrays imagePoints2,
  2066                                       InputOutputArray cameraMatrix1, InputOutputArray distCoeffs1,
  2067                                       InputOutputArray cameraMatrix2, InputOutputArray distCoeffs2,
  2068                                       Size imageSize, InputOutputArray R,InputOutputArray T, OutputArray E, OutputArray F,
  2069                                       OutputArray perViewErrors, int flags = CALIB_FIX_INTRINSIC,
  2070                                       TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 1e-6) );
  2071  
  2072  /// @overload
  2073  CV_EXPORTS_W double stereoCalibrate( InputArrayOfArrays objectPoints,
  2074                                       InputArrayOfArrays imagePoints1, InputArrayOfArrays imagePoints2,
  2075                                       InputOutputArray cameraMatrix1, InputOutputArray distCoeffs1,
  2076                                       InputOutputArray cameraMatrix2, InputOutputArray distCoeffs2,
  2077                                       Size imageSize, OutputArray R,OutputArray T, OutputArray E, OutputArray F,
  2078                                       int flags = CALIB_FIX_INTRINSIC,
  2079                                       TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 1e-6) );
  2080  
  2081  /** @brief Computes rectification transforms for each head of a calibrated stereo camera.
  2082  
  2083  @param cameraMatrix1 First camera intrinsic matrix.
  2084  @param distCoeffs1 First camera distortion parameters.
  2085  @param cameraMatrix2 Second camera intrinsic matrix.
  2086  @param distCoeffs2 Second camera distortion parameters.
  2087  @param imageSize Size of the image used for stereo calibration.
  2088  @param R Rotation matrix from the coordinate system of the first camera to the second camera,
  2089  see @ref stereoCalibrate.
  2090  @param T Translation vector from the coordinate system of the first camera to the second camera,
  2091  see @ref stereoCalibrate.
  2092  @param R1 Output 3x3 rectification transform (rotation matrix) for the first camera. This matrix
  2093  brings points given in the unrectified first camera's coordinate system to points in the rectified
  2094  first camera's coordinate system. In more technical terms, it performs a change of basis from the
  2095  unrectified first camera's coordinate system to the rectified first camera's coordinate system.
  2096  @param R2 Output 3x3 rectification transform (rotation matrix) for the second camera. This matrix
  2097  brings points given in the unrectified second camera's coordinate system to points in the rectified
  2098  second camera's coordinate system. In more technical terms, it performs a change of basis from the
  2099  unrectified second camera's coordinate system to the rectified second camera's coordinate system.
  2100  @param P1 Output 3x4 projection matrix in the new (rectified) coordinate systems for the first
  2101  camera, i.e. it projects points given in the rectified first camera coordinate system into the
  2102  rectified first camera's image.
  2103  @param P2 Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
  2104  camera, i.e. it projects points given in the rectified first camera coordinate system into the
  2105  rectified second camera's image.
  2106  @param Q Output \f$4 \times 4\f$ disparity-to-depth mapping matrix (see @ref reprojectImageTo3D).
  2107  @param flags Operation flags that may be zero or @ref CALIB_ZERO_DISPARITY . If the flag is set,
  2108  the function makes the principal points of each camera have the same pixel coordinates in the
  2109  rectified views. And if the flag is not set, the function may still shift the images in the
  2110  horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the
  2111  useful image area.
  2112  @param alpha Free scaling parameter. If it is -1 or absent, the function performs the default
  2113  scaling. Otherwise, the parameter should be between 0 and 1. alpha=0 means that the rectified
  2114  images are zoomed and shifted so that only valid pixels are visible (no black areas after
  2115  rectification). alpha=1 means that the rectified image is decimated and shifted so that all the
  2116  pixels from the original images from the cameras are retained in the rectified images (no source
  2117  image pixels are lost). Any intermediate value yields an intermediate result between
  2118  those two extreme cases.
  2119  @param newImageSize New image resolution after rectification. The same size should be passed to
  2120  #initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0)
  2121  is passed (default), it is set to the original imageSize . Setting it to a larger value can help you
  2122  preserve details in the original image, especially when there is a big radial distortion.
  2123  @param validPixROI1 Optional output rectangles inside the rectified images where all the pixels
  2124  are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
  2125  (see the picture below).
  2126  @param validPixROI2 Optional output rectangles inside the rectified images where all the pixels
  2127  are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
  2128  (see the picture below).
  2129  
  2130  The function computes the rotation matrices for each camera that (virtually) make both camera image
  2131  planes the same plane. Consequently, this makes all the epipolar lines parallel and thus simplifies
  2132  the dense stereo correspondence problem. The function takes the matrices computed by #stereoCalibrate
  2133  as input. As output, it provides two rotation matrices and also two projection matrices in the new
  2134  coordinates. The function distinguishes the following two cases:
  2135  
  2136  -   **Horizontal stereo**: the first and the second camera views are shifted relative to each other
  2137      mainly along the x-axis (with possible small vertical shift). In the rectified images, the
  2138      corresponding epipolar lines in the left and right cameras are horizontal and have the same
  2139      y-coordinate. P1 and P2 look like:
  2140  
  2141      \f[\texttt{P1} = \begin{bmatrix}
  2142                          f & 0 & cx_1 & 0 \\
  2143                          0 & f & cy & 0 \\
  2144                          0 & 0 & 1 & 0
  2145                       \end{bmatrix}\f]
  2146  
  2147      \f[\texttt{P2} = \begin{bmatrix}
  2148                          f & 0 & cx_2 & T_x*f \\
  2149                          0 & f & cy & 0 \\
  2150                          0 & 0 & 1 & 0
  2151                       \end{bmatrix} ,\f]
  2152  
  2153      where \f$T_x\f$ is a horizontal shift between the cameras and \f$cx_1=cx_2\f$ if
  2154      @ref CALIB_ZERO_DISPARITY is set.
  2155  
  2156  -   **Vertical stereo**: the first and the second camera views are shifted relative to each other
  2157      mainly in the vertical direction (and probably a bit in the horizontal direction too). The epipolar
  2158      lines in the rectified images are vertical and have the same x-coordinate. P1 and P2 look like:
  2159  
  2160      \f[\texttt{P1} = \begin{bmatrix}
  2161                          f & 0 & cx & 0 \\
  2162                          0 & f & cy_1 & 0 \\
  2163                          0 & 0 & 1 & 0
  2164                       \end{bmatrix}\f]
  2165  
  2166      \f[\texttt{P2} = \begin{bmatrix}
  2167                          f & 0 & cx & 0 \\
  2168                          0 & f & cy_2 & T_y*f \\
  2169                          0 & 0 & 1 & 0
  2170                       \end{bmatrix},\f]
  2171  
  2172      where \f$T_y\f$ is a vertical shift between the cameras and \f$cy_1=cy_2\f$ if
  2173      @ref CALIB_ZERO_DISPARITY is set.
  2174  
  2175  As you can see, the first three columns of P1 and P2 will effectively be the new "rectified" camera
  2176  matrices. The matrices, together with R1 and R2 , can then be passed to #initUndistortRectifyMap to
  2177  initialize the rectification map for each camera.
  2178  
  2179  See below the screenshot from the stereo_calib.cpp sample. Some red horizontal lines pass through
  2180  the corresponding image regions. This means that the images are well rectified, which is what most
  2181  stereo correspondence algorithms rely on. The green rectangles are roi1 and roi2 . You see that
  2182  their interiors are all valid pixels.
  2183  
  2184  ![image](pics/stereo_undistort.jpg)
  2185   */
  2186  CV_EXPORTS_W void stereoRectify( InputArray cameraMatrix1, InputArray distCoeffs1,
  2187                                   InputArray cameraMatrix2, InputArray distCoeffs2,
  2188                                   Size imageSize, InputArray R, InputArray T,
  2189                                   OutputArray R1, OutputArray R2,
  2190                                   OutputArray P1, OutputArray P2,
  2191                                   OutputArray Q, int flags = CALIB_ZERO_DISPARITY,
  2192                                   double alpha = -1, Size newImageSize = Size(),
  2193                                   CV_OUT Rect* validPixROI1 = 0, CV_OUT Rect* validPixROI2 = 0 );
  2194  
  2195  /** @brief Computes a rectification transform for an uncalibrated stereo camera.
  2196  
  2197  @param points1 Array of feature points in the first image.
  2198  @param points2 The corresponding points in the second image. The same formats as in
  2199  #findFundamentalMat are supported.
  2200  @param F Input fundamental matrix. It can be computed from the same set of point pairs using
  2201  #findFundamentalMat .
  2202  @param imgSize Size of the image.
  2203  @param H1 Output rectification homography matrix for the first image.
  2204  @param H2 Output rectification homography matrix for the second image.
  2205  @param threshold Optional threshold used to filter out the outliers. If the parameter is greater
  2206  than zero, all the point pairs that do not comply with the epipolar geometry (that is, the points
  2207  for which \f$|\texttt{points2[i]}^T*\texttt{F}*\texttt{points1[i]}|>\texttt{threshold}\f$ ) are
  2208  rejected prior to computing the homographies. Otherwise, all the points are considered inliers.
  2209  
  2210  The function computes the rectification transformations without knowing intrinsic parameters of the
  2211  cameras and their relative position in the space, which explains the suffix "uncalibrated". Another
  2212  related difference from #stereoRectify is that the function outputs not the rectification
  2213  transformations in the object (3D) space, but the planar perspective transformations encoded by the
  2214  homography matrices H1 and H2 . The function implements the algorithm @cite Hartley99 .
  2215  
  2216  @note
  2217     While the algorithm does not need to know the intrinsic parameters of the cameras, it heavily
  2218      depends on the epipolar geometry. Therefore, if the camera lenses have a significant distortion,
  2219      it would be better to correct it before computing the fundamental matrix and calling this
  2220      function. For example, distortion coefficients can be estimated for each head of stereo camera
  2221      separately by using #calibrateCamera . Then, the images can be corrected using #undistort , or
  2222      just the point coordinates can be corrected with #undistortPoints .
  2223   */
  2224  CV_EXPORTS_W bool stereoRectifyUncalibrated( InputArray points1, InputArray points2,
  2225                                               InputArray F, Size imgSize,
  2226                                               OutputArray H1, OutputArray H2,
  2227                                               double threshold = 5 );
  2228  
  2229  //! computes the rectification transformations for 3-head camera, where all the heads are on the same line.
  2230  CV_EXPORTS_W float rectify3Collinear( InputArray cameraMatrix1, InputArray distCoeffs1,
  2231                                        InputArray cameraMatrix2, InputArray distCoeffs2,
  2232                                        InputArray cameraMatrix3, InputArray distCoeffs3,
  2233                                        InputArrayOfArrays imgpt1, InputArrayOfArrays imgpt3,
  2234                                        Size imageSize, InputArray R12, InputArray T12,
  2235                                        InputArray R13, InputArray T13,
  2236                                        OutputArray R1, OutputArray R2, OutputArray R3,
  2237                                        OutputArray P1, OutputArray P2, OutputArray P3,
  2238                                        OutputArray Q, double alpha, Size newImgSize,
  2239                                        CV_OUT Rect* roi1, CV_OUT Rect* roi2, int flags );
  2240  
  2241  /** @brief Returns the new camera intrinsic matrix based on the free scaling parameter.
  2242  
  2243  @param cameraMatrix Input camera intrinsic matrix.
  2244  @param distCoeffs Input vector of distortion coefficients
  2245  \f$\distcoeffs\f$. If the vector is NULL/empty, the zero distortion coefficients are
  2246  assumed.
  2247  @param imageSize Original image size.
  2248  @param alpha Free scaling parameter between 0 (when all the pixels in the undistorted image are
  2249  valid) and 1 (when all the source image pixels are retained in the undistorted image). See
  2250  #stereoRectify for details.
  2251  @param newImgSize Image size after rectification. By default, it is set to imageSize .
  2252  @param validPixROI Optional output rectangle that outlines all-good-pixels region in the
  2253  undistorted image. See roi1, roi2 description in #stereoRectify .
  2254  @param centerPrincipalPoint Optional flag that indicates whether in the new camera intrinsic matrix the
  2255  principal point should be at the image center or not. By default, the principal point is chosen to
  2256  best fit a subset of the source image (determined by alpha) to the corrected image.
  2257  @return new_camera_matrix Output new camera intrinsic matrix.
  2258  
  2259  The function computes and returns the optimal new camera intrinsic matrix based on the free scaling parameter.
  2260  By varying this parameter, you may retrieve only sensible pixels alpha=0 , keep all the original
  2261  image pixels if there is valuable information in the corners alpha=1 , or get something in between.
  2262  When alpha\>0 , the undistorted result is likely to have some black pixels corresponding to
  2263  "virtual" pixels outside of the captured distorted image. The original camera intrinsic matrix, distortion
  2264  coefficients, the computed new camera intrinsic matrix, and newImageSize should be passed to
  2265  #initUndistortRectifyMap to produce the maps for #remap .
  2266   */
  2267  CV_EXPORTS_W Mat getOptimalNewCameraMatrix( InputArray cameraMatrix, InputArray distCoeffs,
  2268                                              Size imageSize, double alpha, Size newImgSize = Size(),
  2269                                              CV_OUT Rect* validPixROI = 0,
  2270                                              bool centerPrincipalPoint = false);
  2271  
  2272  /** @brief Computes Hand-Eye calibration: \f$_{}^{g}\textrm{T}_c\f$
  2273  
  2274  @param[in] R_gripper2base Rotation part extracted from the homogeneous matrix that transforms a point
  2275  expressed in the gripper frame to the robot base frame (\f$_{}^{b}\textrm{T}_g\f$).
  2276  This is a vector (`vector<Mat>`) that contains the rotation, `(3x3)` rotation matrices or `(3x1)` rotation vectors,
  2277  for all the transformations from gripper frame to robot base frame.
  2278  @param[in] t_gripper2base Translation part extracted from the homogeneous matrix that transforms a point
  2279  expressed in the gripper frame to the robot base frame (\f$_{}^{b}\textrm{T}_g\f$).
  2280  This is a vector (`vector<Mat>`) that contains the `(3x1)` translation vectors for all the transformations
  2281  from gripper frame to robot base frame.
  2282  @param[in] R_target2cam Rotation part extracted from the homogeneous matrix that transforms a point
  2283  expressed in the target frame to the camera frame (\f$_{}^{c}\textrm{T}_t\f$).
  2284  This is a vector (`vector<Mat>`) that contains the rotation, `(3x3)` rotation matrices or `(3x1)` rotation vectors,
  2285  for all the transformations from calibration target frame to camera frame.
  2286  @param[in] t_target2cam Rotation part extracted from the homogeneous matrix that transforms a point
  2287  expressed in the target frame to the camera frame (\f$_{}^{c}\textrm{T}_t\f$).
  2288  This is a vector (`vector<Mat>`) that contains the `(3x1)` translation vectors for all the transformations
  2289  from calibration target frame to camera frame.
  2290  @param[out] R_cam2gripper Estimated `(3x3)` rotation part extracted from the homogeneous matrix that transforms a point
  2291  expressed in the camera frame to the gripper frame (\f$_{}^{g}\textrm{T}_c\f$).
  2292  @param[out] t_cam2gripper Estimated `(3x1)` translation part extracted from the homogeneous matrix that transforms a point
  2293  expressed in the camera frame to the gripper frame (\f$_{}^{g}\textrm{T}_c\f$).
  2294  @param[in] method One of the implemented Hand-Eye calibration method, see cv::HandEyeCalibrationMethod
  2295  
  2296  The function performs the Hand-Eye calibration using various methods. One approach consists in estimating the
  2297  rotation then the translation (separable solutions) and the following methods are implemented:
  2298    - R. Tsai, R. Lenz A New Technique for Fully Autonomous and Efficient 3D Robotics Hand/EyeCalibration \cite Tsai89
  2299    - F. Park, B. Martin Robot Sensor Calibration: Solving AX = XB on the Euclidean Group \cite Park94
  2300    - R. Horaud, F. Dornaika Hand-Eye Calibration \cite Horaud95
  2301  
  2302  Another approach consists in estimating simultaneously the rotation and the translation (simultaneous solutions),
  2303  with the following implemented methods:
  2304    - N. Andreff, R. Horaud, B. Espiau On-line Hand-Eye Calibration \cite Andreff99
  2305    - K. Daniilidis Hand-Eye Calibration Using Dual Quaternions \cite Daniilidis98
  2306  
  2307  The following picture describes the Hand-Eye calibration problem where the transformation between a camera ("eye")
  2308  mounted on a robot gripper ("hand") has to be estimated. This configuration is called eye-in-hand.
  2309  
  2310  The eye-to-hand configuration consists in a static camera observing a calibration pattern mounted on the robot
  2311  end-effector. The transformation from the camera to the robot base frame can then be estimated by inputting
  2312  the suitable transformations to the function, see below.
  2313  
  2314  ![](pics/hand-eye_figure.png)
  2315  
  2316  The calibration procedure is the following:
  2317    - a static calibration pattern is used to estimate the transformation between the target frame
  2318    and the camera frame
  2319    - the robot gripper is moved in order to acquire several poses
  2320    - for each pose, the homogeneous transformation between the gripper frame and the robot base frame is recorded using for
  2321    instance the robot kinematics
  2322  \f[
  2323      \begin{bmatrix}
  2324      X_b\\
  2325      Y_b\\
  2326      Z_b\\
  2327      1
  2328      \end{bmatrix}
  2329      =
  2330      \begin{bmatrix}
  2331      _{}^{b}\textrm{R}_g & _{}^{b}\textrm{t}_g \\
  2332      0_{1 \times 3} & 1
  2333      \end{bmatrix}
  2334      \begin{bmatrix}
  2335      X_g\\
  2336      Y_g\\
  2337      Z_g\\
  2338      1
  2339      \end{bmatrix}
  2340  \f]
  2341    - for each pose, the homogeneous transformation between the calibration target frame and the camera frame is recorded using
  2342    for instance a pose estimation method (PnP) from 2D-3D point correspondences
  2343  \f[
  2344      \begin{bmatrix}
  2345      X_c\\
  2346      Y_c\\
  2347      Z_c\\
  2348      1
  2349      \end{bmatrix}
  2350      =
  2351      \begin{bmatrix}
  2352      _{}^{c}\textrm{R}_t & _{}^{c}\textrm{t}_t \\
  2353      0_{1 \times 3} & 1
  2354      \end{bmatrix}
  2355      \begin{bmatrix}
  2356      X_t\\
  2357      Y_t\\
  2358      Z_t\\
  2359      1
  2360      \end{bmatrix}
  2361  \f]
  2362  
  2363  The Hand-Eye calibration procedure returns the following homogeneous transformation
  2364  \f[
  2365      \begin{bmatrix}
  2366      X_g\\
  2367      Y_g\\
  2368      Z_g\\
  2369      1
  2370      \end{bmatrix}
  2371      =
  2372      \begin{bmatrix}
  2373      _{}^{g}\textrm{R}_c & _{}^{g}\textrm{t}_c \\
  2374      0_{1 \times 3} & 1
  2375      \end{bmatrix}
  2376      \begin{bmatrix}
  2377      X_c\\
  2378      Y_c\\
  2379      Z_c\\
  2380      1
  2381      \end{bmatrix}
  2382  \f]
  2383  
  2384  This problem is also known as solving the \f$\mathbf{A}\mathbf{X}=\mathbf{X}\mathbf{B}\f$ equation:
  2385    - for an eye-in-hand configuration
  2386  \f[
  2387      \begin{align*}
  2388      ^{b}{\textrm{T}_g}^{(1)} \hspace{0.2em} ^{g}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(1)} &=
  2389      \hspace{0.1em} ^{b}{\textrm{T}_g}^{(2)} \hspace{0.2em} ^{g}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(2)} \\
  2390  
  2391      (^{b}{\textrm{T}_g}^{(2)})^{-1} \hspace{0.2em} ^{b}{\textrm{T}_g}^{(1)} \hspace{0.2em} ^{g}\textrm{T}_c &=
  2392      \hspace{0.1em} ^{g}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(2)} (^{c}{\textrm{T}_t}^{(1)})^{-1} \\
  2393  
  2394      \textrm{A}_i \textrm{X} &= \textrm{X} \textrm{B}_i \\
  2395      \end{align*}
  2396  \f]
  2397  
  2398    - for an eye-to-hand configuration
  2399  \f[
  2400      \begin{align*}
  2401      ^{g}{\textrm{T}_b}^{(1)} \hspace{0.2em} ^{b}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(1)} &=
  2402      \hspace{0.1em} ^{g}{\textrm{T}_b}^{(2)} \hspace{0.2em} ^{b}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(2)} \\
  2403  
  2404      (^{g}{\textrm{T}_b}^{(2)})^{-1} \hspace{0.2em} ^{g}{\textrm{T}_b}^{(1)} \hspace{0.2em} ^{b}\textrm{T}_c &=
  2405      \hspace{0.1em} ^{b}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(2)} (^{c}{\textrm{T}_t}^{(1)})^{-1} \\
  2406  
  2407      \textrm{A}_i \textrm{X} &= \textrm{X} \textrm{B}_i \\
  2408      \end{align*}
  2409  \f]
  2410  
  2411  \note
  2412  Additional information can be found on this [website](http://campar.in.tum.de/Chair/HandEyeCalibration).
  2413  \note
  2414  A minimum of 2 motions with non parallel rotation axes are necessary to determine the hand-eye transformation.
  2415  So at least 3 different poses are required, but it is strongly recommended to use many more poses.
  2416  
  2417   */
  2418  CV_EXPORTS_W void calibrateHandEye( InputArrayOfArrays R_gripper2base, InputArrayOfArrays t_gripper2base,
  2419                                      InputArrayOfArrays R_target2cam, InputArrayOfArrays t_target2cam,
  2420                                      OutputArray R_cam2gripper, OutputArray t_cam2gripper,
  2421                                      HandEyeCalibrationMethod method=CALIB_HAND_EYE_TSAI );
  2422  
  2423  /** @brief Computes Robot-World/Hand-Eye calibration: \f$_{}^{w}\textrm{T}_b\f$ and \f$_{}^{c}\textrm{T}_g\f$
  2424  
  2425  @param[in] R_world2cam Rotation part extracted from the homogeneous matrix that transforms a point
  2426  expressed in the world frame to the camera frame (\f$_{}^{c}\textrm{T}_w\f$).
  2427  This is a vector (`vector<Mat>`) that contains the rotation, `(3x3)` rotation matrices or `(3x1)` rotation vectors,
  2428  for all the transformations from world frame to the camera frame.
  2429  @param[in] t_world2cam Translation part extracted from the homogeneous matrix that transforms a point
  2430  expressed in the world frame to the camera frame (\f$_{}^{c}\textrm{T}_w\f$).
  2431  This is a vector (`vector<Mat>`) that contains the `(3x1)` translation vectors for all the transformations
  2432  from world frame to the camera frame.
  2433  @param[in] R_base2gripper Rotation part extracted from the homogeneous matrix that transforms a point
  2434  expressed in the robot base frame to the gripper frame (\f$_{}^{g}\textrm{T}_b\f$).
  2435  This is a vector (`vector<Mat>`) that contains the rotation, `(3x3)` rotation matrices or `(3x1)` rotation vectors,
  2436  for all the transformations from robot base frame to the gripper frame.
  2437  @param[in] t_base2gripper Rotation part extracted from the homogeneous matrix that transforms a point
  2438  expressed in the robot base frame to the gripper frame (\f$_{}^{g}\textrm{T}_b\f$).
  2439  This is a vector (`vector<Mat>`) that contains the `(3x1)` translation vectors for all the transformations
  2440  from robot base frame to the gripper frame.
  2441  @param[out] R_base2world Estimated `(3x3)` rotation part extracted from the homogeneous matrix that transforms a point
  2442  expressed in the robot base frame to the world frame (\f$_{}^{w}\textrm{T}_b\f$).
  2443  @param[out] t_base2world Estimated `(3x1)` translation part extracted from the homogeneous matrix that transforms a point
  2444  expressed in the robot base frame to the world frame (\f$_{}^{w}\textrm{T}_b\f$).
  2445  @param[out] R_gripper2cam Estimated `(3x3)` rotation part extracted from the homogeneous matrix that transforms a point
  2446  expressed in the gripper frame to the camera frame (\f$_{}^{c}\textrm{T}_g\f$).
  2447  @param[out] t_gripper2cam Estimated `(3x1)` translation part extracted from the homogeneous matrix that transforms a point
  2448  expressed in the gripper frame to the camera frame (\f$_{}^{c}\textrm{T}_g\f$).
  2449  @param[in] method One of the implemented Robot-World/Hand-Eye calibration method, see cv::RobotWorldHandEyeCalibrationMethod
  2450  
  2451  The function performs the Robot-World/Hand-Eye calibration using various methods. One approach consists in estimating the
  2452  rotation then the translation (separable solutions):
  2453    - M. Shah, Solving the robot-world/hand-eye calibration problem using the kronecker product \cite Shah2013SolvingTR
  2454  
  2455  Another approach consists in estimating simultaneously the rotation and the translation (simultaneous solutions),
  2456  with the following implemented method:
  2457    - A. Li, L. Wang, and D. Wu, Simultaneous robot-world and hand-eye calibration using dual-quaternions and kronecker product \cite Li2010SimultaneousRA
  2458  
  2459  The following picture describes the Robot-World/Hand-Eye calibration problem where the transformations between a robot and a world frame
  2460  and between a robot gripper ("hand") and a camera ("eye") mounted at the robot end-effector have to be estimated.
  2461  
  2462  ![](pics/robot-world_hand-eye_figure.png)
  2463  
  2464  The calibration procedure is the following:
  2465    - a static calibration pattern is used to estimate the transformation between the target frame
  2466    and the camera frame
  2467    - the robot gripper is moved in order to acquire several poses
  2468    - for each pose, the homogeneous transformation between the gripper frame and the robot base frame is recorded using for
  2469    instance the robot kinematics
  2470  \f[
  2471      \begin{bmatrix}
  2472      X_g\\
  2473      Y_g\\
  2474      Z_g\\
  2475      1
  2476      \end{bmatrix}
  2477      =
  2478      \begin{bmatrix}
  2479      _{}^{g}\textrm{R}_b & _{}^{g}\textrm{t}_b \\
  2480      0_{1 \times 3} & 1
  2481      \end{bmatrix}
  2482      \begin{bmatrix}
  2483      X_b\\
  2484      Y_b\\
  2485      Z_b\\
  2486      1
  2487      \end{bmatrix}
  2488  \f]
  2489    - for each pose, the homogeneous transformation between the calibration target frame (the world frame) and the camera frame is recorded using
  2490    for instance a pose estimation method (PnP) from 2D-3D point correspondences
  2491  \f[
  2492      \begin{bmatrix}
  2493      X_c\\
  2494      Y_c\\
  2495      Z_c\\
  2496      1
  2497      \end{bmatrix}
  2498      =
  2499      \begin{bmatrix}
  2500      _{}^{c}\textrm{R}_w & _{}^{c}\textrm{t}_w \\
  2501      0_{1 \times 3} & 1
  2502      \end{bmatrix}
  2503      \begin{bmatrix}
  2504      X_w\\
  2505      Y_w\\
  2506      Z_w\\
  2507      1
  2508      \end{bmatrix}
  2509  \f]
  2510  
  2511  The Robot-World/Hand-Eye calibration procedure returns the following homogeneous transformations
  2512  \f[
  2513      \begin{bmatrix}
  2514      X_w\\
  2515      Y_w\\
  2516      Z_w\\
  2517      1
  2518      \end{bmatrix}
  2519      =
  2520      \begin{bmatrix}
  2521      _{}^{w}\textrm{R}_b & _{}^{w}\textrm{t}_b \\
  2522      0_{1 \times 3} & 1
  2523      \end{bmatrix}
  2524      \begin{bmatrix}
  2525      X_b\\
  2526      Y_b\\
  2527      Z_b\\
  2528      1
  2529      \end{bmatrix}
  2530  \f]
  2531  \f[
  2532      \begin{bmatrix}
  2533      X_c\\
  2534      Y_c\\
  2535      Z_c\\
  2536      1
  2537      \end{bmatrix}
  2538      =
  2539      \begin{bmatrix}
  2540      _{}^{c}\textrm{R}_g & _{}^{c}\textrm{t}_g \\
  2541      0_{1 \times 3} & 1
  2542      \end{bmatrix}
  2543      \begin{bmatrix}
  2544      X_g\\
  2545      Y_g\\
  2546      Z_g\\
  2547      1
  2548      \end{bmatrix}
  2549  \f]
  2550  
  2551  This problem is also known as solving the \f$\mathbf{A}\mathbf{X}=\mathbf{Z}\mathbf{B}\f$ equation, with:
  2552    - \f$\mathbf{A} \Leftrightarrow \hspace{0.1em} _{}^{c}\textrm{T}_w\f$
  2553    - \f$\mathbf{X} \Leftrightarrow \hspace{0.1em} _{}^{w}\textrm{T}_b\f$
  2554    - \f$\mathbf{Z} \Leftrightarrow \hspace{0.1em} _{}^{c}\textrm{T}_g\f$
  2555    - \f$\mathbf{B} \Leftrightarrow \hspace{0.1em} _{}^{g}\textrm{T}_b\f$
  2556  
  2557  \note
  2558  At least 3 measurements are required (input vectors size must be greater or equal to 3).
  2559  
  2560   */
  2561  CV_EXPORTS_W void calibrateRobotWorldHandEye( InputArrayOfArrays R_world2cam, InputArrayOfArrays t_world2cam,
  2562                                                InputArrayOfArrays R_base2gripper, InputArrayOfArrays t_base2gripper,
  2563                                                OutputArray R_base2world, OutputArray t_base2world,
  2564                                                OutputArray R_gripper2cam, OutputArray t_gripper2cam,
  2565                                                RobotWorldHandEyeCalibrationMethod method=CALIB_ROBOT_WORLD_HAND_EYE_SHAH );
  2566  
  2567  /** @brief Converts points from Euclidean to homogeneous space.
  2568  
  2569  @param src Input vector of N-dimensional points.
  2570  @param dst Output vector of N+1-dimensional points.
  2571  
  2572  The function converts points from Euclidean to homogeneous space by appending 1's to the tuple of
  2573  point coordinates. That is, each point (x1, x2, ..., xn) is converted to (x1, x2, ..., xn, 1).
  2574   */
  2575  CV_EXPORTS_W void convertPointsToHomogeneous( InputArray src, OutputArray dst );
  2576  
  2577  /** @brief Converts points from homogeneous to Euclidean space.
  2578  
  2579  @param src Input vector of N-dimensional points.
  2580  @param dst Output vector of N-1-dimensional points.
  2581  
  2582  The function converts points homogeneous to Euclidean space using perspective projection. That is,
  2583  each point (x1, x2, ... x(n-1), xn) is converted to (x1/xn, x2/xn, ..., x(n-1)/xn). When xn=0, the
  2584  output point coordinates will be (0,0,0,...).
  2585   */
  2586  CV_EXPORTS_W void convertPointsFromHomogeneous( InputArray src, OutputArray dst );
  2587  
  2588  /** @brief Converts points to/from homogeneous coordinates.
  2589  
  2590  @param src Input array or vector of 2D, 3D, or 4D points.
  2591  @param dst Output vector of 2D, 3D, or 4D points.
  2592  
  2593  The function converts 2D or 3D points from/to homogeneous coordinates by calling either
  2594  #convertPointsToHomogeneous or #convertPointsFromHomogeneous.
  2595  
  2596  @note The function is obsolete. Use one of the previous two functions instead.
  2597   */
  2598  CV_EXPORTS void convertPointsHomogeneous( InputArray src, OutputArray dst );
  2599  
  2600  /** @brief Calculates a fundamental matrix from the corresponding points in two images.
  2601  
  2602  @param points1 Array of N points from the first image. The point coordinates should be
  2603  floating-point (single or double precision).
  2604  @param points2 Array of the second image points of the same size and format as points1 .
  2605  @param method Method for computing a fundamental matrix.
  2606  -   @ref FM_7POINT for a 7-point algorithm. \f$N = 7\f$
  2607  -   @ref FM_8POINT for an 8-point algorithm. \f$N \ge 8\f$
  2608  -   @ref FM_RANSAC for the RANSAC algorithm. \f$N \ge 8\f$
  2609  -   @ref FM_LMEDS for the LMedS algorithm. \f$N \ge 8\f$
  2610  @param ransacReprojThreshold Parameter used only for RANSAC. It is the maximum distance from a point to an epipolar
  2611  line in pixels, beyond which the point is considered an outlier and is not used for computing the
  2612  final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
  2613  point localization, image resolution, and the image noise.
  2614  @param confidence Parameter used for the RANSAC and LMedS methods only. It specifies a desirable level
  2615  of confidence (probability) that the estimated matrix is correct.
  2616  @param[out] mask optional output mask
  2617  @param maxIters The maximum number of robust method iterations.
  2618  
  2619  The epipolar geometry is described by the following equation:
  2620  
  2621  \f[[p_2; 1]^T F [p_1; 1] = 0\f]
  2622  
  2623  where \f$F\f$ is a fundamental matrix, \f$p_1\f$ and \f$p_2\f$ are corresponding points in the first and the
  2624  second images, respectively.
  2625  
  2626  The function calculates the fundamental matrix using one of four methods listed above and returns
  2627  the found fundamental matrix. Normally just one matrix is found. But in case of the 7-point
  2628  algorithm, the function may return up to 3 solutions ( \f$9 \times 3\f$ matrix that stores all 3
  2629  matrices sequentially).
  2630  
  2631  The calculated fundamental matrix may be passed further to computeCorrespondEpilines that finds the
  2632  epipolar lines corresponding to the specified points. It can also be passed to
  2633  #stereoRectifyUncalibrated to compute the rectification transformation. :
  2634  @code
  2635      // Example. Estimation of fundamental matrix using the RANSAC algorithm
  2636      int point_count = 100;
  2637      vector<Point2f> points1(point_count);
  2638      vector<Point2f> points2(point_count);
  2639  
  2640      // initialize the points here ...
  2641      for( int i = 0; i < point_count; i++ )
  2642      {
  2643          points1[i] = ...;
  2644          points2[i] = ...;
  2645      }
  2646  
  2647      Mat fundamental_matrix =
  2648       findFundamentalMat(points1, points2, FM_RANSAC, 3, 0.99);
  2649  @endcode
  2650   */
  2651  CV_EXPORTS_W Mat findFundamentalMat( InputArray points1, InputArray points2,
  2652                                       int method, double ransacReprojThreshold, double confidence,
  2653                                       int maxIters, OutputArray mask = noArray() );
  2654  
  2655  /** @overload */
  2656  CV_EXPORTS_W Mat findFundamentalMat( InputArray points1, InputArray points2,
  2657                                       int method = FM_RANSAC,
  2658                                       double ransacReprojThreshold = 3., double confidence = 0.99,
  2659                                       OutputArray mask = noArray() );
  2660  
  2661  /** @overload */
  2662  CV_EXPORTS Mat findFundamentalMat( InputArray points1, InputArray points2,
  2663                                     OutputArray mask, int method = FM_RANSAC,
  2664                                     double ransacReprojThreshold = 3., double confidence = 0.99 );
  2665  
  2666  
  2667  CV_EXPORTS_W Mat findFundamentalMat( InputArray points1, InputArray points2,
  2668                          OutputArray mask, const UsacParams &params);
  2669  
  2670  /** @brief Calculates an essential matrix from the corresponding points in two images.
  2671  
  2672  @param points1 Array of N (N \>= 5) 2D points from the first image. The point coordinates should
  2673  be floating-point (single or double precision).
  2674  @param points2 Array of the second image points of the same size and format as points1 .
  2675  @param cameraMatrix Camera intrinsic matrix \f$\cameramatrix{A}\f$ .
  2676  Note that this function assumes that points1 and points2 are feature points from cameras with the
  2677  same camera intrinsic matrix. If this assumption does not hold for your use case, use
  2678  #undistortPoints with `P = cv::NoArray()` for both cameras to transform image points
  2679  to normalized image coordinates, which are valid for the identity camera intrinsic matrix. When
  2680  passing these coordinates, pass the identity matrix for this parameter.
  2681  @param method Method for computing an essential matrix.
  2682  -   @ref RANSAC for the RANSAC algorithm.
  2683  -   @ref LMEDS for the LMedS algorithm.
  2684  @param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
  2685  confidence (probability) that the estimated matrix is correct.
  2686  @param threshold Parameter used for RANSAC. It is the maximum distance from a point to an epipolar
  2687  line in pixels, beyond which the point is considered an outlier and is not used for computing the
  2688  final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
  2689  point localization, image resolution, and the image noise.
  2690  @param mask Output array of N elements, every element of which is set to 0 for outliers and to 1
  2691  for the other points. The array is computed only in the RANSAC and LMedS methods.
  2692  @param maxIters The maximum number of robust method iterations.
  2693  
  2694  This function estimates essential matrix based on the five-point algorithm solver in @cite Nister03 .
  2695  @cite SteweniusCFS is also a related. The epipolar geometry is described by the following equation:
  2696  
  2697  \f[[p_2; 1]^T K^{-T} E K^{-1} [p_1; 1] = 0\f]
  2698  
  2699  where \f$E\f$ is an essential matrix, \f$p_1\f$ and \f$p_2\f$ are corresponding points in the first and the
  2700  second images, respectively. The result of this function may be passed further to
  2701  #decomposeEssentialMat or  #recoverPose to recover the relative pose between cameras.
  2702   */
  2703  CV_EXPORTS_W
  2704  Mat findEssentialMat(
  2705      InputArray points1, InputArray points2,
  2706      InputArray cameraMatrix, int method = RANSAC,
  2707      double prob = 0.999, double threshold = 1.0,
  2708      int maxIters = 1000, OutputArray mask = noArray()
  2709  );
  2710  
  2711  /** @overload */
  2712  CV_EXPORTS
  2713  Mat findEssentialMat(
  2714      InputArray points1, InputArray points2,
  2715      InputArray cameraMatrix, int method,
  2716      double prob, double threshold,
  2717      OutputArray mask
  2718  );  // TODO remove from OpenCV 5.0
  2719  
  2720  /** @overload
  2721  @param points1 Array of N (N \>= 5) 2D points from the first image. The point coordinates should
  2722  be floating-point (single or double precision).
  2723  @param points2 Array of the second image points of the same size and format as points1 .
  2724  @param focal focal length of the camera. Note that this function assumes that points1 and points2
  2725  are feature points from cameras with same focal length and principal point.
  2726  @param pp principal point of the camera.
  2727  @param method Method for computing a fundamental matrix.
  2728  -   @ref RANSAC for the RANSAC algorithm.
  2729  -   @ref LMEDS for the LMedS algorithm.
  2730  @param threshold Parameter used for RANSAC. It is the maximum distance from a point to an epipolar
  2731  line in pixels, beyond which the point is considered an outlier and is not used for computing the
  2732  final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
  2733  point localization, image resolution, and the image noise.
  2734  @param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
  2735  confidence (probability) that the estimated matrix is correct.
  2736  @param mask Output array of N elements, every element of which is set to 0 for outliers and to 1
  2737  for the other points. The array is computed only in the RANSAC and LMedS methods.
  2738  @param maxIters The maximum number of robust method iterations.
  2739  
  2740  This function differs from the one above that it computes camera intrinsic matrix from focal length and
  2741  principal point:
  2742  
  2743  \f[A =
  2744  \begin{bmatrix}
  2745  f & 0 & x_{pp}  \\
  2746  0 & f & y_{pp}  \\
  2747  0 & 0 & 1
  2748  \end{bmatrix}\f]
  2749   */
  2750  CV_EXPORTS_W
  2751  Mat findEssentialMat(
  2752      InputArray points1, InputArray points2,
  2753      double focal = 1.0, Point2d pp = Point2d(0, 0),
  2754      int method = RANSAC, double prob = 0.999,
  2755      double threshold = 1.0, int maxIters = 1000,
  2756      OutputArray mask = noArray()
  2757  );
  2758  
  2759  /** @overload */
  2760  CV_EXPORTS
  2761  Mat findEssentialMat(
  2762      InputArray points1, InputArray points2,
  2763      double focal, Point2d pp,
  2764      int method, double prob,
  2765      double threshold, OutputArray mask
  2766  );  // TODO remove from OpenCV 5.0
  2767  
  2768  /** @brief Calculates an essential matrix from the corresponding points in two images from potentially two different cameras.
  2769  
  2770  @param points1 Array of N (N \>= 5) 2D points from the first image. The point coordinates should
  2771  be floating-point (single or double precision).
  2772  @param points2 Array of the second image points of the same size and format as points1 .
  2773  @param cameraMatrix1 Camera matrix \f$K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ .
  2774  Note that this function assumes that points1 and points2 are feature points from cameras with the
  2775  same camera matrix. If this assumption does not hold for your use case, use
  2776  #undistortPoints with `P = cv::NoArray()` for both cameras to transform image points
  2777  to normalized image coordinates, which are valid for the identity camera matrix. When
  2778  passing these coordinates, pass the identity matrix for this parameter.
  2779  @param cameraMatrix2 Camera matrix \f$K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ .
  2780  Note that this function assumes that points1 and points2 are feature points from cameras with the
  2781  same camera matrix. If this assumption does not hold for your use case, use
  2782  #undistortPoints with `P = cv::NoArray()` for both cameras to transform image points
  2783  to normalized image coordinates, which are valid for the identity camera matrix. When
  2784  passing these coordinates, pass the identity matrix for this parameter.
  2785  @param distCoeffs1 Input vector of distortion coefficients
  2786  \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$
  2787  of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
  2788  @param distCoeffs2 Input vector of distortion coefficients
  2789  \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$
  2790  of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
  2791  @param method Method for computing an essential matrix.
  2792  -   @ref RANSAC for the RANSAC algorithm.
  2793  -   @ref LMEDS for the LMedS algorithm.
  2794  @param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
  2795  confidence (probability) that the estimated matrix is correct.
  2796  @param threshold Parameter used for RANSAC. It is the maximum distance from a point to an epipolar
  2797  line in pixels, beyond which the point is considered an outlier and is not used for computing the
  2798  final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
  2799  point localization, image resolution, and the image noise.
  2800  @param mask Output array of N elements, every element of which is set to 0 for outliers and to 1
  2801  for the other points. The array is computed only in the RANSAC and LMedS methods.
  2802  
  2803  This function estimates essential matrix based on the five-point algorithm solver in @cite Nister03 .
  2804  @cite SteweniusCFS is also a related. The epipolar geometry is described by the following equation:
  2805  
  2806  \f[[p_2; 1]^T K^{-T} E K^{-1} [p_1; 1] = 0\f]
  2807  
  2808  where \f$E\f$ is an essential matrix, \f$p_1\f$ and \f$p_2\f$ are corresponding points in the first and the
  2809  second images, respectively. The result of this function may be passed further to
  2810  #decomposeEssentialMat or  #recoverPose to recover the relative pose between cameras.
  2811   */
  2812  CV_EXPORTS_W Mat findEssentialMat( InputArray points1, InputArray points2,
  2813                                   InputArray cameraMatrix1, InputArray distCoeffs1,
  2814                                   InputArray cameraMatrix2, InputArray distCoeffs2,
  2815                                   int method = RANSAC,
  2816                                   double prob = 0.999, double threshold = 1.0,
  2817                                   OutputArray mask = noArray() );
  2818  
  2819  
  2820  CV_EXPORTS_W Mat findEssentialMat( InputArray points1, InputArray points2,
  2821                        InputArray cameraMatrix1, InputArray cameraMatrix2,
  2822                        InputArray dist_coeff1, InputArray dist_coeff2, OutputArray mask,
  2823                        const UsacParams &params);
  2824  
  2825  /** @brief Decompose an essential matrix to possible rotations and translation.
  2826  
  2827  @param E The input essential matrix.
  2828  @param R1 One possible rotation matrix.
  2829  @param R2 Another possible rotation matrix.
  2830  @param t One possible translation.
  2831  
  2832  This function decomposes the essential matrix E using svd decomposition @cite HartleyZ00. In
  2833  general, four possible poses exist for the decomposition of E. They are \f$[R_1, t]\f$,
  2834  \f$[R_1, -t]\f$, \f$[R_2, t]\f$, \f$[R_2, -t]\f$.
  2835  
  2836  If E gives the epipolar constraint \f$[p_2; 1]^T A^{-T} E A^{-1} [p_1; 1] = 0\f$ between the image
  2837  points \f$p_1\f$ in the first image and \f$p_2\f$ in second image, then any of the tuples
  2838  \f$[R_1, t]\f$, \f$[R_1, -t]\f$, \f$[R_2, t]\f$, \f$[R_2, -t]\f$ is a change of basis from the first
  2839  camera's coordinate system to the second camera's coordinate system. However, by decomposing E, one
  2840  can only get the direction of the translation. For this reason, the translation t is returned with
  2841  unit length.
  2842   */
  2843  CV_EXPORTS_W void decomposeEssentialMat( InputArray E, OutputArray R1, OutputArray R2, OutputArray t );
  2844  
  2845  /** @brief Recovers the relative camera rotation and the translation from an estimated essential
  2846  matrix and the corresponding points in two images, using cheirality check. Returns the number of
  2847  inliers that pass the check.
  2848  
  2849  @param E The input essential matrix.
  2850  @param points1 Array of N 2D points from the first image. The point coordinates should be
  2851  floating-point (single or double precision).
  2852  @param points2 Array of the second image points of the same size and format as points1 .
  2853  @param cameraMatrix Camera intrinsic matrix \f$\cameramatrix{A}\f$ .
  2854  Note that this function assumes that points1 and points2 are feature points from cameras with the
  2855  same camera intrinsic matrix.
  2856  @param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple
  2857  that performs a change of basis from the first camera's coordinate system to the second camera's
  2858  coordinate system. Note that, in general, t can not be used for this tuple, see the parameter
  2859  described below.
  2860  @param t Output translation vector. This vector is obtained by @ref decomposeEssentialMat and
  2861  therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit
  2862  length.
  2863  @param mask Input/output mask for inliers in points1 and points2. If it is not empty, then it marks
  2864  inliers in points1 and points2 for then given essential matrix E. Only these inliers will be used to
  2865  recover pose. In the output mask only inliers which pass the cheirality check.
  2866  
  2867  This function decomposes an essential matrix using @ref decomposeEssentialMat and then verifies
  2868  possible pose hypotheses by doing cheirality check. The cheirality check means that the
  2869  triangulated 3D points should have positive depth. Some details can be found in @cite Nister03.
  2870  
  2871  This function can be used to process the output E and mask from @ref findEssentialMat. In this
  2872  scenario, points1 and points2 are the same input for #findEssentialMat :
  2873  @code
  2874      // Example. Estimation of fundamental matrix using the RANSAC algorithm
  2875      int point_count = 100;
  2876      vector<Point2f> points1(point_count);
  2877      vector<Point2f> points2(point_count);
  2878  
  2879      // initialize the points here ...
  2880      for( int i = 0; i < point_count; i++ )
  2881      {
  2882          points1[i] = ...;
  2883          points2[i] = ...;
  2884      }
  2885  
  2886      // cametra matrix with both focal lengths = 1, and principal point = (0, 0)
  2887      Mat cameraMatrix = Mat::eye(3, 3, CV_64F);
  2888  
  2889      Mat E, R, t, mask;
  2890  
  2891      E = findEssentialMat(points1, points2, cameraMatrix, RANSAC, 0.999, 1.0, mask);
  2892      recoverPose(E, points1, points2, cameraMatrix, R, t, mask);
  2893  @endcode
  2894   */
  2895  CV_EXPORTS_W int recoverPose( InputArray E, InputArray points1, InputArray points2,
  2896                              InputArray cameraMatrix, OutputArray R, OutputArray t,
  2897                              InputOutputArray mask = noArray() );
  2898  
  2899  /** @overload
  2900  @param E The input essential matrix.
  2901  @param points1 Array of N 2D points from the first image. The point coordinates should be
  2902  floating-point (single or double precision).
  2903  @param points2 Array of the second image points of the same size and format as points1 .
  2904  @param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple
  2905  that performs a change of basis from the first camera's coordinate system to the second camera's
  2906  coordinate system. Note that, in general, t can not be used for this tuple, see the parameter
  2907  description below.
  2908  @param t Output translation vector. This vector is obtained by @ref decomposeEssentialMat and
  2909  therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit
  2910  length.
  2911  @param focal Focal length of the camera. Note that this function assumes that points1 and points2
  2912  are feature points from cameras with same focal length and principal point.
  2913  @param pp principal point of the camera.
  2914  @param mask Input/output mask for inliers in points1 and points2. If it is not empty, then it marks
  2915  inliers in points1 and points2 for then given essential matrix E. Only these inliers will be used to
  2916  recover pose. In the output mask only inliers which pass the cheirality check.
  2917  
  2918  This function differs from the one above that it computes camera intrinsic matrix from focal length and
  2919  principal point:
  2920  
  2921  \f[A =
  2922  \begin{bmatrix}
  2923  f & 0 & x_{pp}  \\
  2924  0 & f & y_{pp}  \\
  2925  0 & 0 & 1
  2926  \end{bmatrix}\f]
  2927   */
  2928  CV_EXPORTS_W int recoverPose( InputArray E, InputArray points1, InputArray points2,
  2929                              OutputArray R, OutputArray t,
  2930                              double focal = 1.0, Point2d pp = Point2d(0, 0),
  2931                              InputOutputArray mask = noArray() );
  2932  
  2933  /** @overload
  2934  @param E The input essential matrix.
  2935  @param points1 Array of N 2D points from the first image. The point coordinates should be
  2936  floating-point (single or double precision).
  2937  @param points2 Array of the second image points of the same size and format as points1.
  2938  @param cameraMatrix Camera intrinsic matrix \f$\cameramatrix{A}\f$ .
  2939  Note that this function assumes that points1 and points2 are feature points from cameras with the
  2940  same camera intrinsic matrix.
  2941  @param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple
  2942  that performs a change of basis from the first camera's coordinate system to the second camera's
  2943  coordinate system. Note that, in general, t can not be used for this tuple, see the parameter
  2944  description below.
  2945  @param t Output translation vector. This vector is obtained by @ref decomposeEssentialMat and
  2946  therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit
  2947  length.
  2948  @param distanceThresh threshold distance which is used to filter out far away points (i.e. infinite
  2949  points).
  2950  @param mask Input/output mask for inliers in points1 and points2. If it is not empty, then it marks
  2951  inliers in points1 and points2 for then given essential matrix E. Only these inliers will be used to
  2952  recover pose. In the output mask only inliers which pass the cheirality check.
  2953  @param triangulatedPoints 3D points which were reconstructed by triangulation.
  2954  
  2955  This function differs from the one above that it outputs the triangulated 3D point that are used for
  2956  the cheirality check.
  2957   */
  2958  CV_EXPORTS_W int recoverPose( InputArray E, InputArray points1, InputArray points2,
  2959                              InputArray cameraMatrix, OutputArray R, OutputArray t, double distanceThresh, InputOutputArray mask = noArray(),
  2960                              OutputArray triangulatedPoints = noArray());
  2961  
  2962  /** @brief For points in an image of a stereo pair, computes the corresponding epilines in the other image.
  2963  
  2964  @param points Input points. \f$N \times 1\f$ or \f$1 \times N\f$ matrix of type CV_32FC2 or
  2965  vector\<Point2f\> .
  2966  @param whichImage Index of the image (1 or 2) that contains the points .
  2967  @param F Fundamental matrix that can be estimated using #findFundamentalMat or #stereoRectify .
  2968  @param lines Output vector of the epipolar lines corresponding to the points in the other image.
  2969  Each line \f$ax + by + c=0\f$ is encoded by 3 numbers \f$(a, b, c)\f$ .
  2970  
  2971  For every point in one of the two images of a stereo pair, the function finds the equation of the
  2972  corresponding epipolar line in the other image.
  2973  
  2974  From the fundamental matrix definition (see #findFundamentalMat ), line \f$l^{(2)}_i\f$ in the second
  2975  image for the point \f$p^{(1)}_i\f$ in the first image (when whichImage=1 ) is computed as:
  2976  
  2977  \f[l^{(2)}_i = F p^{(1)}_i\f]
  2978  
  2979  And vice versa, when whichImage=2, \f$l^{(1)}_i\f$ is computed from \f$p^{(2)}_i\f$ as:
  2980  
  2981  \f[l^{(1)}_i = F^T p^{(2)}_i\f]
  2982  
  2983  Line coefficients are defined up to a scale. They are normalized so that \f$a_i^2+b_i^2=1\f$ .
  2984   */
  2985  CV_EXPORTS_W void computeCorrespondEpilines( InputArray points, int whichImage,
  2986                                               InputArray F, OutputArray lines );
  2987  
  2988  /** @brief This function reconstructs 3-dimensional points (in homogeneous coordinates) by using
  2989  their observations with a stereo camera.
  2990  
  2991  @param projMatr1 3x4 projection matrix of the first camera, i.e. this matrix projects 3D points
  2992  given in the world's coordinate system into the first image.
  2993  @param projMatr2 3x4 projection matrix of the second camera, i.e. this matrix projects 3D points
  2994  given in the world's coordinate system into the second image.
  2995  @param projPoints1 2xN array of feature points in the first image. In the case of the c++ version,
  2996  it can be also a vector of feature points or two-channel matrix of size 1xN or Nx1.
  2997  @param projPoints2 2xN array of corresponding points in the second image. In the case of the c++
  2998  version, it can be also a vector of feature points or two-channel matrix of size 1xN or Nx1.
  2999  @param points4D 4xN array of reconstructed points in homogeneous coordinates. These points are
  3000  returned in the world's coordinate system.
  3001  
  3002  @note
  3003     Keep in mind that all input data should be of float type in order for this function to work.
  3004  
  3005  @note
  3006     If the projection matrices from @ref stereoRectify are used, then the returned points are
  3007     represented in the first camera's rectified coordinate system.
  3008  
  3009  @sa
  3010     reprojectImageTo3D
  3011   */
  3012  CV_EXPORTS_W void triangulatePoints( InputArray projMatr1, InputArray projMatr2,
  3013                                       InputArray projPoints1, InputArray projPoints2,
  3014                                       OutputArray points4D );
  3015  
  3016  /** @brief Refines coordinates of corresponding points.
  3017  
  3018  @param F 3x3 fundamental matrix.
  3019  @param points1 1xN array containing the first set of points.
  3020  @param points2 1xN array containing the second set of points.
  3021  @param newPoints1 The optimized points1.
  3022  @param newPoints2 The optimized points2.
  3023  
  3024  The function implements the Optimal Triangulation Method (see Multiple View Geometry for details).
  3025  For each given point correspondence points1[i] \<-\> points2[i], and a fundamental matrix F, it
  3026  computes the corrected correspondences newPoints1[i] \<-\> newPoints2[i] that minimize the geometric
  3027  error \f$d(points1[i], newPoints1[i])^2 + d(points2[i],newPoints2[i])^2\f$ (where \f$d(a,b)\f$ is the
  3028  geometric distance between points \f$a\f$ and \f$b\f$ ) subject to the epipolar constraint
  3029  \f$newPoints2^T * F * newPoints1 = 0\f$ .
  3030   */
  3031  CV_EXPORTS_W void correctMatches( InputArray F, InputArray points1, InputArray points2,
  3032                                    OutputArray newPoints1, OutputArray newPoints2 );
  3033  
  3034  /** @brief Filters off small noise blobs (speckles) in the disparity map
  3035  
  3036  @param img The input 16-bit signed disparity image
  3037  @param newVal The disparity value used to paint-off the speckles
  3038  @param maxSpeckleSize The maximum speckle size to consider it a speckle. Larger blobs are not
  3039  affected by the algorithm
  3040  @param maxDiff Maximum difference between neighbor disparity pixels to put them into the same
  3041  blob. Note that since StereoBM, StereoSGBM and may be other algorithms return a fixed-point
  3042  disparity map, where disparity values are multiplied by 16, this scale factor should be taken into
  3043  account when specifying this parameter value.
  3044  @param buf The optional temporary buffer to avoid memory allocation within the function.
  3045   */
  3046  CV_EXPORTS_W void filterSpeckles( InputOutputArray img, double newVal,
  3047                                    int maxSpeckleSize, double maxDiff,
  3048                                    InputOutputArray buf = noArray() );
  3049  
  3050  //! computes valid disparity ROI from the valid ROIs of the rectified images (that are returned by #stereoRectify)
  3051  CV_EXPORTS_W Rect getValidDisparityROI( Rect roi1, Rect roi2,
  3052                                          int minDisparity, int numberOfDisparities,
  3053                                          int blockSize );
  3054  
  3055  //! validates disparity using the left-right check. The matrix "cost" should be computed by the stereo correspondence algorithm
  3056  CV_EXPORTS_W void validateDisparity( InputOutputArray disparity, InputArray cost,
  3057                                       int minDisparity, int numberOfDisparities,
  3058                                       int disp12MaxDisp = 1 );
  3059  
  3060  /** @brief Reprojects a disparity image to 3D space.
  3061  
  3062  @param disparity Input single-channel 8-bit unsigned, 16-bit signed, 32-bit signed or 32-bit
  3063  floating-point disparity image. The values of 8-bit / 16-bit signed formats are assumed to have no
  3064  fractional bits. If the disparity is 16-bit signed format, as computed by @ref StereoBM or
  3065  @ref StereoSGBM and maybe other algorithms, it should be divided by 16 (and scaled to float) before
  3066  being used here.
  3067  @param _3dImage Output 3-channel floating-point image of the same size as disparity. Each element of
  3068  _3dImage(x,y) contains 3D coordinates of the point (x,y) computed from the disparity map. If one
  3069  uses Q obtained by @ref stereoRectify, then the returned points are represented in the first
  3070  camera's rectified coordinate system.
  3071  @param Q \f$4 \times 4\f$ perspective transformation matrix that can be obtained with
  3072  @ref stereoRectify.
  3073  @param handleMissingValues Indicates, whether the function should handle missing values (i.e.
  3074  points where the disparity was not computed). If handleMissingValues=true, then pixels with the
  3075  minimal disparity that corresponds to the outliers (see StereoMatcher::compute ) are transformed
  3076  to 3D points with a very large Z value (currently set to 10000).
  3077  @param ddepth The optional output array depth. If it is -1, the output image will have CV_32F
  3078  depth. ddepth can also be set to CV_16S, CV_32S or CV_32F.
  3079  
  3080  The function transforms a single-channel disparity map to a 3-channel image representing a 3D
  3081  surface. That is, for each pixel (x,y) and the corresponding disparity d=disparity(x,y) , it
  3082  computes:
  3083  
  3084  \f[\begin{bmatrix}
  3085  X \\
  3086  Y \\
  3087  Z \\
  3088  W
  3089  \end{bmatrix} = Q \begin{bmatrix}
  3090  x \\
  3091  y \\
  3092  \texttt{disparity} (x,y) \\
  3093  z
  3094  \end{bmatrix}.\f]
  3095  
  3096  @sa
  3097     To reproject a sparse set of points {(x,y,d),...} to 3D space, use perspectiveTransform.
  3098   */
  3099  CV_EXPORTS_W void reprojectImageTo3D( InputArray disparity,
  3100                                        OutputArray _3dImage, InputArray Q,
  3101                                        bool handleMissingValues = false,
  3102                                        int ddepth = -1 );
  3103  
  3104  /** @brief Calculates the Sampson Distance between two points.
  3105  
  3106  The function cv::sampsonDistance calculates and returns the first order approximation of the geometric error as:
  3107  \f[
  3108  sd( \texttt{pt1} , \texttt{pt2} )=
  3109  \frac{(\texttt{pt2}^t \cdot \texttt{F} \cdot \texttt{pt1})^2}
  3110  {((\texttt{F} \cdot \texttt{pt1})(0))^2 +
  3111  ((\texttt{F} \cdot \texttt{pt1})(1))^2 +
  3112  ((\texttt{F}^t \cdot \texttt{pt2})(0))^2 +
  3113  ((\texttt{F}^t \cdot \texttt{pt2})(1))^2}
  3114  \f]
  3115  The fundamental matrix may be calculated using the #findFundamentalMat function. See @cite HartleyZ00 11.4.3 for details.
  3116  @param pt1 first homogeneous 2d point
  3117  @param pt2 second homogeneous 2d point
  3118  @param F fundamental matrix
  3119  @return The computed Sampson distance.
  3120  */
  3121  CV_EXPORTS_W double sampsonDistance(InputArray pt1, InputArray pt2, InputArray F);
  3122  
  3123  /** @brief Computes an optimal affine transformation between two 3D point sets.
  3124  
  3125  It computes
  3126  \f[
  3127  \begin{bmatrix}
  3128  x\\
  3129  y\\
  3130  z\\
  3131  \end{bmatrix}
  3132  =
  3133  \begin{bmatrix}
  3134  a_{11} & a_{12} & a_{13}\\
  3135  a_{21} & a_{22} & a_{23}\\
  3136  a_{31} & a_{32} & a_{33}\\
  3137  \end{bmatrix}
  3138  \begin{bmatrix}
  3139  X\\
  3140  Y\\
  3141  Z\\
  3142  \end{bmatrix}
  3143  +
  3144  \begin{bmatrix}
  3145  b_1\\
  3146  b_2\\
  3147  b_3\\
  3148  \end{bmatrix}
  3149  \f]
  3150  
  3151  @param src First input 3D point set containing \f$(X,Y,Z)\f$.
  3152  @param dst Second input 3D point set containing \f$(x,y,z)\f$.
  3153  @param out Output 3D affine transformation matrix \f$3 \times 4\f$ of the form
  3154  \f[
  3155  \begin{bmatrix}
  3156  a_{11} & a_{12} & a_{13} & b_1\\
  3157  a_{21} & a_{22} & a_{23} & b_2\\
  3158  a_{31} & a_{32} & a_{33} & b_3\\
  3159  \end{bmatrix}
  3160  \f]
  3161  @param inliers Output vector indicating which points are inliers (1-inlier, 0-outlier).
  3162  @param ransacThreshold Maximum reprojection error in the RANSAC algorithm to consider a point as
  3163  an inlier.
  3164  @param confidence Confidence level, between 0 and 1, for the estimated transformation. Anything
  3165  between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
  3166  significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
  3167  
  3168  The function estimates an optimal 3D affine transformation between two 3D point sets using the
  3169  RANSAC algorithm.
  3170   */
  3171  CV_EXPORTS_W  int estimateAffine3D(InputArray src, InputArray dst,
  3172                                     OutputArray out, OutputArray inliers,
  3173                                     double ransacThreshold = 3, double confidence = 0.99);
  3174  
  3175  /** @brief Computes an optimal affine transformation between two 3D point sets.
  3176  
  3177  It computes \f$R,s,t\f$ minimizing \f$\sum{i} dst_i - c \cdot R \cdot src_i \f$
  3178  where \f$R\f$ is a 3x3 rotation matrix, \f$t\f$ is a 3x1 translation vector and \f$s\f$ is a
  3179  scalar size value. This is an implementation of the algorithm by Umeyama \cite umeyama1991least .
  3180  The estimated affine transform has a homogeneous scale which is a subclass of affine
  3181  transformations with 7 degrees of freedom. The paired point sets need to comprise at least 3
  3182  points each.
  3183  
  3184  @param src First input 3D point set.
  3185  @param dst Second input 3D point set.
  3186  @param scale If null is passed, the scale parameter c will be assumed to be 1.0.
  3187  Else the pointed-to variable will be set to the optimal scale.
  3188  @param force_rotation If true, the returned rotation will never be a reflection.
  3189  This might be unwanted, e.g. when optimizing a transform between a right- and a
  3190  left-handed coordinate system.
  3191  @return 3D affine transformation matrix \f$3 \times 4\f$ of the form
  3192  \f[T =
  3193  \begin{bmatrix}
  3194  R & t\\
  3195  \end{bmatrix}
  3196  \f]
  3197  
  3198   */
  3199  CV_EXPORTS_W   cv::Mat estimateAffine3D(InputArray src, InputArray dst,
  3200                                          CV_OUT double* scale = nullptr, bool force_rotation = true);
  3201  
  3202  /** @brief Computes an optimal translation between two 3D point sets.
  3203   *
  3204   * It computes
  3205   * \f[
  3206   * \begin{bmatrix}
  3207   * x\\
  3208   * y\\
  3209   * z\\
  3210   * \end{bmatrix}
  3211   * =
  3212   * \begin{bmatrix}
  3213   * X\\
  3214   * Y\\
  3215   * Z\\
  3216   * \end{bmatrix}
  3217   * +
  3218   * \begin{bmatrix}
  3219   * b_1\\
  3220   * b_2\\
  3221   * b_3\\
  3222   * \end{bmatrix}
  3223   * \f]
  3224   *
  3225   * @param src First input 3D point set containing \f$(X,Y,Z)\f$.
  3226   * @param dst Second input 3D point set containing \f$(x,y,z)\f$.
  3227   * @param out Output 3D translation vector \f$3 \times 1\f$ of the form
  3228   * \f[
  3229   * \begin{bmatrix}
  3230   * b_1 \\
  3231   * b_2 \\
  3232   * b_3 \\
  3233   * \end{bmatrix}
  3234   * \f]
  3235   * @param inliers Output vector indicating which points are inliers (1-inlier, 0-outlier).
  3236   * @param ransacThreshold Maximum reprojection error in the RANSAC algorithm to consider a point as
  3237   * an inlier.
  3238   * @param confidence Confidence level, between 0 and 1, for the estimated transformation. Anything
  3239   * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
  3240   * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
  3241   *
  3242   * The function estimates an optimal 3D translation between two 3D point sets using the
  3243   * RANSAC algorithm.
  3244   *  */
  3245  CV_EXPORTS_W  int estimateTranslation3D(InputArray src, InputArray dst,
  3246                                          OutputArray out, OutputArray inliers,
  3247                                          double ransacThreshold = 3, double confidence = 0.99);
  3248  
  3249  /** @brief Computes an optimal affine transformation between two 2D point sets.
  3250  
  3251  It computes
  3252  \f[
  3253  \begin{bmatrix}
  3254  x\\
  3255  y\\
  3256  \end{bmatrix}
  3257  =
  3258  \begin{bmatrix}
  3259  a_{11} & a_{12}\\
  3260  a_{21} & a_{22}\\
  3261  \end{bmatrix}
  3262  \begin{bmatrix}
  3263  X\\
  3264  Y\\
  3265  \end{bmatrix}
  3266  +
  3267  \begin{bmatrix}
  3268  b_1\\
  3269  b_2\\
  3270  \end{bmatrix}
  3271  \f]
  3272  
  3273  @param from First input 2D point set containing \f$(X,Y)\f$.
  3274  @param to Second input 2D point set containing \f$(x,y)\f$.
  3275  @param inliers Output vector indicating which points are inliers (1-inlier, 0-outlier).
  3276  @param method Robust method used to compute transformation. The following methods are possible:
  3277  -   @ref RANSAC - RANSAC-based robust method
  3278  -   @ref LMEDS - Least-Median robust method
  3279  RANSAC is the default method.
  3280  @param ransacReprojThreshold Maximum reprojection error in the RANSAC algorithm to consider
  3281  a point as an inlier. Applies only to RANSAC.
  3282  @param maxIters The maximum number of robust method iterations.
  3283  @param confidence Confidence level, between 0 and 1, for the estimated transformation. Anything
  3284  between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
  3285  significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
  3286  @param refineIters Maximum number of iterations of refining algorithm (Levenberg-Marquardt).
  3287  Passing 0 will disable refining, so the output matrix will be output of robust method.
  3288  
  3289  @return Output 2D affine transformation matrix \f$2 \times 3\f$ or empty matrix if transformation
  3290  could not be estimated. The returned matrix has the following form:
  3291  \f[
  3292  \begin{bmatrix}
  3293  a_{11} & a_{12} & b_1\\
  3294  a_{21} & a_{22} & b_2\\
  3295  \end{bmatrix}
  3296  \f]
  3297  
  3298  The function estimates an optimal 2D affine transformation between two 2D point sets using the
  3299  selected robust algorithm.
  3300  
  3301  The computed transformation is then refined further (using only inliers) with the
  3302  Levenberg-Marquardt method to reduce the re-projection error even more.
  3303  
  3304  @note
  3305  The RANSAC method can handle practically any ratio of outliers but needs a threshold to
  3306  distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
  3307  correctly only when there are more than 50% of inliers.
  3308  
  3309  @sa estimateAffinePartial2D, getAffineTransform
  3310  */
  3311  CV_EXPORTS_W cv::Mat estimateAffine2D(InputArray from, InputArray to, OutputArray inliers = noArray(),
  3312                                    int method = RANSAC, double ransacReprojThreshold = 3,
  3313                                    size_t maxIters = 2000, double confidence = 0.99,
  3314                                    size_t refineIters = 10);
  3315  
  3316  
  3317  CV_EXPORTS_W cv::Mat estimateAffine2D(InputArray pts1, InputArray pts2, OutputArray inliers,
  3318                       const UsacParams &params);
  3319  
  3320  /** @brief Computes an optimal limited affine transformation with 4 degrees of freedom between
  3321  two 2D point sets.
  3322  
  3323  @param from First input 2D point set.
  3324  @param to Second input 2D point set.
  3325  @param inliers Output vector indicating which points are inliers.
  3326  @param method Robust method used to compute transformation. The following methods are possible:
  3327  -   @ref RANSAC - RANSAC-based robust method
  3328  -   @ref LMEDS - Least-Median robust method
  3329  RANSAC is the default method.
  3330  @param ransacReprojThreshold Maximum reprojection error in the RANSAC algorithm to consider
  3331  a point as an inlier. Applies only to RANSAC.
  3332  @param maxIters The maximum number of robust method iterations.
  3333  @param confidence Confidence level, between 0 and 1, for the estimated transformation. Anything
  3334  between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
  3335  significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
  3336  @param refineIters Maximum number of iterations of refining algorithm (Levenberg-Marquardt).
  3337  Passing 0 will disable refining, so the output matrix will be output of robust method.
  3338  
  3339  @return Output 2D affine transformation (4 degrees of freedom) matrix \f$2 \times 3\f$ or
  3340  empty matrix if transformation could not be estimated.
  3341  
  3342  The function estimates an optimal 2D affine transformation with 4 degrees of freedom limited to
  3343  combinations of translation, rotation, and uniform scaling. Uses the selected algorithm for robust
  3344  estimation.
  3345  
  3346  The computed transformation is then refined further (using only inliers) with the
  3347  Levenberg-Marquardt method to reduce the re-projection error even more.
  3348  
  3349  Estimated transformation matrix is:
  3350  \f[ \begin{bmatrix} \cos(\theta) \cdot s & -\sin(\theta) \cdot s & t_x \\
  3351                  \sin(\theta) \cdot s & \cos(\theta) \cdot s & t_y
  3352  \end{bmatrix} \f]
  3353  Where \f$ \theta \f$ is the rotation angle, \f$ s \f$ the scaling factor and \f$ t_x, t_y \f$ are
  3354  translations in \f$ x, y \f$ axes respectively.
  3355  
  3356  @note
  3357  The RANSAC method can handle practically any ratio of outliers but need a threshold to
  3358  distinguish inliers from outliers. The method LMeDS does not need any threshold but it works
  3359  correctly only when there are more than 50% of inliers.
  3360  
  3361  @sa estimateAffine2D, getAffineTransform
  3362  */
  3363  CV_EXPORTS_W cv::Mat estimateAffinePartial2D(InputArray from, InputArray to, OutputArray inliers = noArray(),
  3364                                    int method = RANSAC, double ransacReprojThreshold = 3,
  3365                                    size_t maxIters = 2000, double confidence = 0.99,
  3366                                    size_t refineIters = 10);
  3367  
  3368  /** @example samples/cpp/tutorial_code/features2D/Homography/decompose_homography.cpp
  3369  An example program with homography decomposition.
  3370  
  3371  Check @ref tutorial_homography "the corresponding tutorial" for more details.
  3372  */
  3373  
  3374  /** @brief Decompose a homography matrix to rotation(s), translation(s) and plane normal(s).
  3375  
  3376  @param H The input homography matrix between two images.
  3377  @param K The input camera intrinsic matrix.
  3378  @param rotations Array of rotation matrices.
  3379  @param translations Array of translation matrices.
  3380  @param normals Array of plane normal matrices.
  3381  
  3382  This function extracts relative camera motion between two views of a planar object and returns up to
  3383  four mathematical solution tuples of rotation, translation, and plane normal. The decomposition of
  3384  the homography matrix H is described in detail in @cite Malis.
  3385  
  3386  If the homography H, induced by the plane, gives the constraint
  3387  \f[s_i \vecthree{x'_i}{y'_i}{1} \sim H \vecthree{x_i}{y_i}{1}\f] on the source image points
  3388  \f$p_i\f$ and the destination image points \f$p'_i\f$, then the tuple of rotations[k] and
  3389  translations[k] is a change of basis from the source camera's coordinate system to the destination
  3390  camera's coordinate system. However, by decomposing H, one can only get the translation normalized
  3391  by the (typically unknown) depth of the scene, i.e. its direction but with normalized length.
  3392  
  3393  If point correspondences are available, at least two solutions may further be invalidated, by
  3394  applying positive depth constraint, i.e. all points must be in front of the camera.
  3395   */
  3396  CV_EXPORTS_W int decomposeHomographyMat(InputArray H,
  3397                                          InputArray K,
  3398                                          OutputArrayOfArrays rotations,
  3399                                          OutputArrayOfArrays translations,
  3400                                          OutputArrayOfArrays normals);
  3401  
  3402  /** @brief Filters homography decompositions based on additional information.
  3403  
  3404  @param rotations Vector of rotation matrices.
  3405  @param normals Vector of plane normal matrices.
  3406  @param beforePoints Vector of (rectified) visible reference points before the homography is applied
  3407  @param afterPoints Vector of (rectified) visible reference points after the homography is applied
  3408  @param possibleSolutions Vector of int indices representing the viable solution set after filtering
  3409  @param pointsMask optional Mat/Vector of 8u type representing the mask for the inliers as given by the #findHomography function
  3410  
  3411  This function is intended to filter the output of the #decomposeHomographyMat based on additional
  3412  information as described in @cite Malis . The summary of the method: the #decomposeHomographyMat function
  3413  returns 2 unique solutions and their "opposites" for a total of 4 solutions. If we have access to the
  3414  sets of points visible in the camera frame before and after the homography transformation is applied,
  3415  we can determine which are the true potential solutions and which are the opposites by verifying which
  3416  homographies are consistent with all visible reference points being in front of the camera. The inputs
  3417  are left unchanged; the filtered solution set is returned as indices into the existing one.
  3418  
  3419  */
  3420  CV_EXPORTS_W void filterHomographyDecompByVisibleRefpoints(InputArrayOfArrays rotations,
  3421                                                             InputArrayOfArrays normals,
  3422                                                             InputArray beforePoints,
  3423                                                             InputArray afterPoints,
  3424                                                             OutputArray possibleSolutions,
  3425                                                             InputArray pointsMask = noArray());
  3426  
  3427  /** @brief The base class for stereo correspondence algorithms.
  3428   */
  3429  class CV_EXPORTS_W StereoMatcher : public Algorithm
  3430  {
  3431  public:
  3432      enum { DISP_SHIFT = 4,
  3433             DISP_SCALE = (1 << DISP_SHIFT)
  3434           };
  3435  
  3436      /** @brief Computes disparity map for the specified stereo pair
  3437  
  3438      @param left Left 8-bit single-channel image.
  3439      @param right Right image of the same size and the same type as the left one.
  3440      @param disparity Output disparity map. It has the same size as the input images. Some algorithms,
  3441      like StereoBM or StereoSGBM compute 16-bit fixed-point disparity map (where each disparity value
  3442      has 4 fractional bits), whereas other algorithms output 32-bit floating-point disparity map.
  3443       */
  3444      CV_WRAP virtual void compute( InputArray left, InputArray right,
  3445                                    OutputArray disparity ) = 0;
  3446  
  3447      CV_WRAP virtual int getMinDisparity() const = 0;
  3448      CV_WRAP virtual void setMinDisparity(int minDisparity) = 0;
  3449  
  3450      CV_WRAP virtual int getNumDisparities() const = 0;
  3451      CV_WRAP virtual void setNumDisparities(int numDisparities) = 0;
  3452  
  3453      CV_WRAP virtual int getBlockSize() const = 0;
  3454      CV_WRAP virtual void setBlockSize(int blockSize) = 0;
  3455  
  3456      CV_WRAP virtual int getSpeckleWindowSize() const = 0;
  3457      CV_WRAP virtual void setSpeckleWindowSize(int speckleWindowSize) = 0;
  3458  
  3459      CV_WRAP virtual int getSpeckleRange() const = 0;
  3460      CV_WRAP virtual void setSpeckleRange(int speckleRange) = 0;
  3461  
  3462      CV_WRAP virtual int getDisp12MaxDiff() const = 0;
  3463      CV_WRAP virtual void setDisp12MaxDiff(int disp12MaxDiff) = 0;
  3464  };
  3465  
  3466  
  3467  /** @brief Class for computing stereo correspondence using the block matching algorithm, introduced and
  3468  contributed to OpenCV by K. Konolige.
  3469   */
  3470  class CV_EXPORTS_W StereoBM : public StereoMatcher
  3471  {
  3472  public:
  3473      enum { PREFILTER_NORMALIZED_RESPONSE = 0,
  3474             PREFILTER_XSOBEL              = 1
  3475           };
  3476  
  3477      CV_WRAP virtual int getPreFilterType() const = 0;
  3478      CV_WRAP virtual void setPreFilterType(int preFilterType) = 0;
  3479  
  3480      CV_WRAP virtual int getPreFilterSize() const = 0;
  3481      CV_WRAP virtual void setPreFilterSize(int preFilterSize) = 0;
  3482  
  3483      CV_WRAP virtual int getPreFilterCap() const = 0;
  3484      CV_WRAP virtual void setPreFilterCap(int preFilterCap) = 0;
  3485  
  3486      CV_WRAP virtual int getTextureThreshold() const = 0;
  3487      CV_WRAP virtual void setTextureThreshold(int textureThreshold) = 0;
  3488  
  3489      CV_WRAP virtual int getUniquenessRatio() const = 0;
  3490      CV_WRAP virtual void setUniquenessRatio(int uniquenessRatio) = 0;
  3491  
  3492      CV_WRAP virtual int getSmallerBlockSize() const = 0;
  3493      CV_WRAP virtual void setSmallerBlockSize(int blockSize) = 0;
  3494  
  3495      CV_WRAP virtual Rect getROI1() const = 0;
  3496      CV_WRAP virtual void setROI1(Rect roi1) = 0;
  3497  
  3498      CV_WRAP virtual Rect getROI2() const = 0;
  3499      CV_WRAP virtual void setROI2(Rect roi2) = 0;
  3500  
  3501      /** @brief Creates StereoBM object
  3502  
  3503      @param numDisparities the disparity search range. For each pixel algorithm will find the best
  3504      disparity from 0 (default minimum disparity) to numDisparities. The search range can then be
  3505      shifted by changing the minimum disparity.
  3506      @param blockSize the linear size of the blocks compared by the algorithm. The size should be odd
  3507      (as the block is centered at the current pixel). Larger block size implies smoother, though less
  3508      accurate disparity map. Smaller block size gives more detailed disparity map, but there is higher
  3509      chance for algorithm to find a wrong correspondence.
  3510  
  3511      The function create StereoBM object. You can then call StereoBM::compute() to compute disparity for
  3512      a specific stereo pair.
  3513       */
  3514      CV_WRAP static Ptr<StereoBM> create(int numDisparities = 0, int blockSize = 21);
  3515  };
  3516  
  3517  /** @brief The class implements the modified H. Hirschmuller algorithm @cite HH08 that differs from the original
  3518  one as follows:
  3519  
  3520  -   By default, the algorithm is single-pass, which means that you consider only 5 directions
  3521  instead of 8. Set mode=StereoSGBM::MODE_HH in createStereoSGBM to run the full variant of the
  3522  algorithm but beware that it may consume a lot of memory.
  3523  -   The algorithm matches blocks, not individual pixels. Though, setting blockSize=1 reduces the
  3524  blocks to single pixels.
  3525  -   Mutual information cost function is not implemented. Instead, a simpler Birchfield-Tomasi
  3526  sub-pixel metric from @cite BT98 is used. Though, the color images are supported as well.
  3527  -   Some pre- and post- processing steps from K. Konolige algorithm StereoBM are included, for
  3528  example: pre-filtering (StereoBM::PREFILTER_XSOBEL type) and post-filtering (uniqueness
  3529  check, quadratic interpolation and speckle filtering).
  3530  
  3531  @note
  3532     -   (Python) An example illustrating the use of the StereoSGBM matching algorithm can be found
  3533          at opencv_source_code/samples/python/stereo_match.py
  3534   */
  3535  class CV_EXPORTS_W StereoSGBM : public StereoMatcher
  3536  {
  3537  public:
  3538      enum
  3539      {
  3540          MODE_SGBM = 0,
  3541          MODE_HH   = 1,
  3542          MODE_SGBM_3WAY = 2,
  3543          MODE_HH4  = 3
  3544      };
  3545  
  3546      CV_WRAP virtual int getPreFilterCap() const = 0;
  3547      CV_WRAP virtual void setPreFilterCap(int preFilterCap) = 0;
  3548  
  3549      CV_WRAP virtual int getUniquenessRatio() const = 0;
  3550      CV_WRAP virtual void setUniquenessRatio(int uniquenessRatio) = 0;
  3551  
  3552      CV_WRAP virtual int getP1() const = 0;
  3553      CV_WRAP virtual void setP1(int P1) = 0;
  3554  
  3555      CV_WRAP virtual int getP2() const = 0;
  3556      CV_WRAP virtual void setP2(int P2) = 0;
  3557  
  3558      CV_WRAP virtual int getMode() const = 0;
  3559      CV_WRAP virtual void setMode(int mode) = 0;
  3560  
  3561      /** @brief Creates StereoSGBM object
  3562  
  3563      @param minDisparity Minimum possible disparity value. Normally, it is zero but sometimes
  3564      rectification algorithms can shift images, so this parameter needs to be adjusted accordingly.
  3565      @param numDisparities Maximum disparity minus minimum disparity. The value is always greater than
  3566      zero. In the current implementation, this parameter must be divisible by 16.
  3567      @param blockSize Matched block size. It must be an odd number \>=1 . Normally, it should be
  3568      somewhere in the 3..11 range.
  3569      @param P1 The first parameter controlling the disparity smoothness. See below.
  3570      @param P2 The second parameter controlling the disparity smoothness. The larger the values are,
  3571      the smoother the disparity is. P1 is the penalty on the disparity change by plus or minus 1
  3572      between neighbor pixels. P2 is the penalty on the disparity change by more than 1 between neighbor
  3573      pixels. The algorithm requires P2 \> P1 . See stereo_match.cpp sample where some reasonably good
  3574      P1 and P2 values are shown (like 8\*number_of_image_channels\*blockSize\*blockSize and
  3575      32\*number_of_image_channels\*blockSize\*blockSize , respectively).
  3576      @param disp12MaxDiff Maximum allowed difference (in integer pixel units) in the left-right
  3577      disparity check. Set it to a non-positive value to disable the check.
  3578      @param preFilterCap Truncation value for the prefiltered image pixels. The algorithm first
  3579      computes x-derivative at each pixel and clips its value by [-preFilterCap, preFilterCap] interval.
  3580      The result values are passed to the Birchfield-Tomasi pixel cost function.
  3581      @param uniquenessRatio Margin in percentage by which the best (minimum) computed cost function
  3582      value should "win" the second best value to consider the found match correct. Normally, a value
  3583      within the 5-15 range is good enough.
  3584      @param speckleWindowSize Maximum size of smooth disparity regions to consider their noise speckles
  3585      and invalidate. Set it to 0 to disable speckle filtering. Otherwise, set it somewhere in the
  3586      50-200 range.
  3587      @param speckleRange Maximum disparity variation within each connected component. If you do speckle
  3588      filtering, set the parameter to a positive value, it will be implicitly multiplied by 16.
  3589      Normally, 1 or 2 is good enough.
  3590      @param mode Set it to StereoSGBM::MODE_HH to run the full-scale two-pass dynamic programming
  3591      algorithm. It will consume O(W\*H\*numDisparities) bytes, which is large for 640x480 stereo and
  3592      huge for HD-size pictures. By default, it is set to false .
  3593  
  3594      The first constructor initializes StereoSGBM with all the default parameters. So, you only have to
  3595      set StereoSGBM::numDisparities at minimum. The second constructor enables you to set each parameter
  3596      to a custom value.
  3597       */
  3598      CV_WRAP static Ptr<StereoSGBM> create(int minDisparity = 0, int numDisparities = 16, int blockSize = 3,
  3599                                            int P1 = 0, int P2 = 0, int disp12MaxDiff = 0,
  3600                                            int preFilterCap = 0, int uniquenessRatio = 0,
  3601                                            int speckleWindowSize = 0, int speckleRange = 0,
  3602                                            int mode = StereoSGBM::MODE_SGBM);
  3603  };
  3604  
  3605  
  3606  //! cv::undistort mode
  3607  enum UndistortTypes
  3608  {
  3609      PROJ_SPHERICAL_ORTHO  = 0,
  3610      PROJ_SPHERICAL_EQRECT = 1
  3611  };
  3612  
  3613  /** @brief Transforms an image to compensate for lens distortion.
  3614  
  3615  The function transforms an image to compensate radial and tangential lens distortion.
  3616  
  3617  The function is simply a combination of #initUndistortRectifyMap (with unity R ) and #remap
  3618  (with bilinear interpolation). See the former function for details of the transformation being
  3619  performed.
  3620  
  3621  Those pixels in the destination image, for which there is no correspondent pixels in the source
  3622  image, are filled with zeros (black color).
  3623  
  3624  A particular subset of the source image that will be visible in the corrected image can be regulated
  3625  by newCameraMatrix. You can use #getOptimalNewCameraMatrix to compute the appropriate
  3626  newCameraMatrix depending on your requirements.
  3627  
  3628  The camera matrix and the distortion parameters can be determined using #calibrateCamera. If
  3629  the resolution of images is different from the resolution used at the calibration stage, \f$f_x,
  3630  f_y, c_x\f$ and \f$c_y\f$ need to be scaled accordingly, while the distortion coefficients remain
  3631  the same.
  3632  
  3633  @param src Input (distorted) image.
  3634  @param dst Output (corrected) image that has the same size and type as src .
  3635  @param cameraMatrix Input camera matrix \f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ .
  3636  @param distCoeffs Input vector of distortion coefficients
  3637  \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$
  3638  of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
  3639  @param newCameraMatrix Camera matrix of the distorted image. By default, it is the same as
  3640  cameraMatrix but you may additionally scale and shift the result by using a different matrix.
  3641   */
  3642  CV_EXPORTS_W void undistort( InputArray src, OutputArray dst,
  3643                               InputArray cameraMatrix,
  3644                               InputArray distCoeffs,
  3645                               InputArray newCameraMatrix = noArray() );
  3646  
  3647  /** @brief Computes the undistortion and rectification transformation map.
  3648  
  3649  The function computes the joint undistortion and rectification transformation and represents the
  3650  result in the form of maps for #remap. The undistorted image looks like original, as if it is
  3651  captured with a camera using the camera matrix =newCameraMatrix and zero distortion. In case of a
  3652  monocular camera, newCameraMatrix is usually equal to cameraMatrix, or it can be computed by
  3653  #getOptimalNewCameraMatrix for a better control over scaling. In case of a stereo camera,
  3654  newCameraMatrix is normally set to P1 or P2 computed by #stereoRectify .
  3655  
  3656  Also, this new camera is oriented differently in the coordinate space, according to R. That, for
  3657  example, helps to align two heads of a stereo camera so that the epipolar lines on both images
  3658  become horizontal and have the same y- coordinate (in case of a horizontally aligned stereo camera).
  3659  
  3660  The function actually builds the maps for the inverse mapping algorithm that is used by #remap. That
  3661  is, for each pixel \f$(u, v)\f$ in the destination (corrected and rectified) image, the function
  3662  computes the corresponding coordinates in the source image (that is, in the original image from
  3663  camera). The following process is applied:
  3664  \f[
  3665  \begin{array}{l}
  3666  x  \leftarrow (u - {c'}_x)/{f'}_x  \\
  3667  y  \leftarrow (v - {c'}_y)/{f'}_y  \\
  3668  {[X\,Y\,W]} ^T  \leftarrow R^{-1}*[x \, y \, 1]^T  \\
  3669  x'  \leftarrow X/W  \\
  3670  y'  \leftarrow Y/W  \\
  3671  r^2  \leftarrow x'^2 + y'^2 \\
  3672  x''  \leftarrow x' \frac{1 + k_1 r^2 + k_2 r^4 + k_3 r^6}{1 + k_4 r^2 + k_5 r^4 + k_6 r^6}
  3673  + 2p_1 x' y' + p_2(r^2 + 2 x'^2)  + s_1 r^2 + s_2 r^4\\
  3674  y''  \leftarrow y' \frac{1 + k_1 r^2 + k_2 r^4 + k_3 r^6}{1 + k_4 r^2 + k_5 r^4 + k_6 r^6}
  3675  + p_1 (r^2 + 2 y'^2) + 2 p_2 x' y' + s_3 r^2 + s_4 r^4 \\
  3676  s\vecthree{x'''}{y'''}{1} =
  3677  \vecthreethree{R_{33}(\tau_x, \tau_y)}{0}{-R_{13}((\tau_x, \tau_y)}
  3678  {0}{R_{33}(\tau_x, \tau_y)}{-R_{23}(\tau_x, \tau_y)}
  3679  {0}{0}{1} R(\tau_x, \tau_y) \vecthree{x''}{y''}{1}\\
  3680  map_x(u,v)  \leftarrow x''' f_x + c_x  \\
  3681  map_y(u,v)  \leftarrow y''' f_y + c_y
  3682  \end{array}
  3683  \f]
  3684  where \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$
  3685  are the distortion coefficients.
  3686  
  3687  In case of a stereo camera, this function is called twice: once for each camera head, after
  3688  #stereoRectify, which in its turn is called after #stereoCalibrate. But if the stereo camera
  3689  was not calibrated, it is still possible to compute the rectification transformations directly from
  3690  the fundamental matrix using #stereoRectifyUncalibrated. For each camera, the function computes
  3691  homography H as the rectification transformation in a pixel domain, not a rotation matrix R in 3D
  3692  space. R can be computed from H as
  3693  \f[\texttt{R} = \texttt{cameraMatrix} ^{-1} \cdot \texttt{H} \cdot \texttt{cameraMatrix}\f]
  3694  where cameraMatrix can be chosen arbitrarily.
  3695  
  3696  @param cameraMatrix Input camera matrix \f$A=\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ .
  3697  @param distCoeffs Input vector of distortion coefficients
  3698  \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$
  3699  of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
  3700  @param R Optional rectification transformation in the object space (3x3 matrix). R1 or R2 ,
  3701  computed by #stereoRectify can be passed here. If the matrix is empty, the identity transformation
  3702  is assumed. In cvInitUndistortMap R assumed to be an identity matrix.
  3703  @param newCameraMatrix New camera matrix \f$A'=\vecthreethree{f_x'}{0}{c_x'}{0}{f_y'}{c_y'}{0}{0}{1}\f$.
  3704  @param size Undistorted image size.
  3705  @param m1type Type of the first output map that can be CV_32FC1, CV_32FC2 or CV_16SC2, see #convertMaps
  3706  @param map1 The first output map.
  3707  @param map2 The second output map.
  3708   */
  3709  CV_EXPORTS_W
  3710  void initUndistortRectifyMap(InputArray cameraMatrix, InputArray distCoeffs,
  3711                               InputArray R, InputArray newCameraMatrix,
  3712                               Size size, int m1type, OutputArray map1, OutputArray map2);
  3713  
  3714  /** @brief Computes the projection and inverse-rectification transformation map. In essense, this is the inverse of
  3715  #initUndistortRectifyMap to accomodate stereo-rectification of projectors ('inverse-cameras') in projector-camera pairs.
  3716  
  3717  The function computes the joint projection and inverse rectification transformation and represents the
  3718  result in the form of maps for #remap. The projected image looks like a distorted version of the original which,
  3719  once projected by a projector, should visually match the original. In case of a monocular camera, newCameraMatrix
  3720  is usually equal to cameraMatrix, or it can be computed by
  3721  #getOptimalNewCameraMatrix for a better control over scaling. In case of a projector-camera pair,
  3722  newCameraMatrix is normally set to P1 or P2 computed by #stereoRectify .
  3723  
  3724  The projector is oriented differently in the coordinate space, according to R. In case of projector-camera pairs,
  3725  this helps align the projector (in the same manner as #initUndistortRectifyMap for the camera) to create a stereo-rectified pair. This
  3726  allows epipolar lines on both images to become horizontal and have the same y-coordinate (in case of a horizontally aligned projector-camera pair).
  3727  
  3728  The function builds the maps for the inverse mapping algorithm that is used by #remap. That
  3729  is, for each pixel \f$(u, v)\f$ in the destination (projected and inverse-rectified) image, the function
  3730  computes the corresponding coordinates in the source image (that is, in the original digital image). The following process is applied:
  3731  
  3732  \f[
  3733  \begin{array}{l}
  3734  \text{newCameraMatrix}\\
  3735  x  \leftarrow (u - {c'}_x)/{f'}_x  \\
  3736  y  \leftarrow (v - {c'}_y)/{f'}_y  \\
  3737  
  3738  \\\text{Undistortion}
  3739  \\\scriptsize{\textit{though equation shown is for radial undistortion, function implements cv::undistortPoints()}}\\
  3740  r^2  \leftarrow x^2 + y^2 \\
  3741  \theta \leftarrow \frac{1 + k_1 r^2 + k_2 r^4 + k_3 r^6}{1 + k_4 r^2 + k_5 r^4 + k_6 r^6}\\
  3742  x' \leftarrow \frac{x}{\theta} \\
  3743  y'  \leftarrow \frac{y}{\theta} \\
  3744  
  3745  \\\text{Rectification}\\
  3746  {[X\,Y\,W]} ^T  \leftarrow R*[x' \, y' \, 1]^T  \\
  3747  x''  \leftarrow X/W  \\
  3748  y''  \leftarrow Y/W  \\
  3749  
  3750  \\\text{cameraMatrix}\\
  3751  map_x(u,v)  \leftarrow x'' f_x + c_x  \\
  3752  map_y(u,v)  \leftarrow y'' f_y + c_y
  3753  \end{array}
  3754  \f]
  3755  where \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$
  3756  are the distortion coefficients vector distCoeffs.
  3757  
  3758  In case of a stereo-rectified projector-camera pair, this function is called for the projector while #initUndistortRectifyMap is called for the camera head.
  3759  This is done after #stereoRectify, which in turn is called after #stereoCalibrate. If the projector-camera pair
  3760  is not calibrated, it is still possible to compute the rectification transformations directly from
  3761  the fundamental matrix using #stereoRectifyUncalibrated. For the projector and camera, the function computes
  3762  homography H as the rectification transformation in a pixel domain, not a rotation matrix R in 3D
  3763  space. R can be computed from H as
  3764  \f[\texttt{R} = \texttt{cameraMatrix} ^{-1} \cdot \texttt{H} \cdot \texttt{cameraMatrix}\f]
  3765  where cameraMatrix can be chosen arbitrarily.
  3766  
  3767  @param cameraMatrix Input camera matrix \f$A=\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ .
  3768  @param distCoeffs Input vector of distortion coefficients
  3769  \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$
  3770  of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
  3771  @param R Optional rectification transformation in the object space (3x3 matrix). R1 or R2,
  3772  computed by #stereoRectify can be passed here. If the matrix is empty, the identity transformation
  3773  is assumed.
  3774  @param newCameraMatrix New camera matrix \f$A'=\vecthreethree{f_x'}{0}{c_x'}{0}{f_y'}{c_y'}{0}{0}{1}\f$.
  3775  @param size Distorted image size.
  3776  @param m1type Type of the first output map. Can be CV_32FC1, CV_32FC2 or CV_16SC2, see #convertMaps
  3777  @param map1 The first output map for #remap.
  3778  @param map2 The second output map for #remap.
  3779   */
  3780  CV_EXPORTS_W
  3781  void initInverseRectificationMap( InputArray cameraMatrix, InputArray distCoeffs,
  3782                             InputArray R, InputArray newCameraMatrix,
  3783                             const Size& size, int m1type, OutputArray map1, OutputArray map2 );
  3784  
  3785  //! initializes maps for #remap for wide-angle
  3786  CV_EXPORTS
  3787  float initWideAngleProjMap(InputArray cameraMatrix, InputArray distCoeffs,
  3788                             Size imageSize, int destImageWidth,
  3789                             int m1type, OutputArray map1, OutputArray map2,
  3790                             enum UndistortTypes projType = PROJ_SPHERICAL_EQRECT, double alpha = 0);
  3791  static inline
  3792  float initWideAngleProjMap(InputArray cameraMatrix, InputArray distCoeffs,
  3793                             Size imageSize, int destImageWidth,
  3794                             int m1type, OutputArray map1, OutputArray map2,
  3795                             int projType, double alpha = 0)
  3796  {
  3797      return initWideAngleProjMap(cameraMatrix, distCoeffs, imageSize, destImageWidth,
  3798                                  m1type, map1, map2, (UndistortTypes)projType, alpha);
  3799  }
  3800  
  3801  /** @brief Returns the default new camera matrix.
  3802  
  3803  The function returns the camera matrix that is either an exact copy of the input cameraMatrix (when
  3804  centerPrinicipalPoint=false ), or the modified one (when centerPrincipalPoint=true).
  3805  
  3806  In the latter case, the new camera matrix will be:
  3807  
  3808  \f[\begin{bmatrix} f_x && 0 && ( \texttt{imgSize.width} -1)*0.5  \\ 0 && f_y && ( \texttt{imgSize.height} -1)*0.5  \\ 0 && 0 && 1 \end{bmatrix} ,\f]
  3809  
  3810  where \f$f_x\f$ and \f$f_y\f$ are \f$(0,0)\f$ and \f$(1,1)\f$ elements of cameraMatrix, respectively.
  3811  
  3812  By default, the undistortion functions in OpenCV (see #initUndistortRectifyMap, #undistort) do not
  3813  move the principal point. However, when you work with stereo, it is important to move the principal
  3814  points in both views to the same y-coordinate (which is required by most of stereo correspondence
  3815  algorithms), and may be to the same x-coordinate too. So, you can form the new camera matrix for
  3816  each view where the principal points are located at the center.
  3817  
  3818  @param cameraMatrix Input camera matrix.
  3819  @param imgsize Camera view image size in pixels.
  3820  @param centerPrincipalPoint Location of the principal point in the new camera matrix. The
  3821  parameter indicates whether this location should be at the image center or not.
  3822   */
  3823  CV_EXPORTS_W
  3824  Mat getDefaultNewCameraMatrix(InputArray cameraMatrix, Size imgsize = Size(),
  3825                                bool centerPrincipalPoint = false);
  3826  
  3827  /** @brief Computes the ideal point coordinates from the observed point coordinates.
  3828  
  3829  The function is similar to #undistort and #initUndistortRectifyMap but it operates on a
  3830  sparse set of points instead of a raster image. Also the function performs a reverse transformation
  3831  to  #projectPoints. In case of a 3D object, it does not reconstruct its 3D coordinates, but for a
  3832  planar object, it does, up to a translation vector, if the proper R is specified.
  3833  
  3834  For each observed point coordinate \f$(u, v)\f$ the function computes:
  3835  \f[
  3836  \begin{array}{l}
  3837  x^{"}  \leftarrow (u - c_x)/f_x  \\
  3838  y^{"}  \leftarrow (v - c_y)/f_y  \\
  3839  (x',y') = undistort(x^{"},y^{"}, \texttt{distCoeffs}) \\
  3840  {[X\,Y\,W]} ^T  \leftarrow R*[x' \, y' \, 1]^T  \\
  3841  x  \leftarrow X/W  \\
  3842  y  \leftarrow Y/W  \\
  3843  \text{only performed if P is specified:} \\
  3844  u'  \leftarrow x {f'}_x + {c'}_x  \\
  3845  v'  \leftarrow y {f'}_y + {c'}_y
  3846  \end{array}
  3847  \f]
  3848  
  3849  where *undistort* is an approximate iterative algorithm that estimates the normalized original
  3850  point coordinates out of the normalized distorted point coordinates ("normalized" means that the
  3851  coordinates do not depend on the camera matrix).
  3852  
  3853  The function can be used for both a stereo camera head or a monocular camera (when R is empty).
  3854  @param src Observed point coordinates, 2xN/Nx2 1-channel or 1xN/Nx1 2-channel (CV_32FC2 or CV_64FC2) (or
  3855  vector\<Point2f\> ).
  3856  @param dst Output ideal point coordinates (1xN/Nx1 2-channel or vector\<Point2f\> ) after undistortion and reverse perspective
  3857  transformation. If matrix P is identity or omitted, dst will contain normalized point coordinates.
  3858  @param cameraMatrix Camera matrix \f$\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ .
  3859  @param distCoeffs Input vector of distortion coefficients
  3860  \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$
  3861  of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed.
  3862  @param R Rectification transformation in the object space (3x3 matrix). R1 or R2 computed by
  3863  #stereoRectify can be passed here. If the matrix is empty, the identity transformation is used.
  3864  @param P New camera matrix (3x3) or new projection matrix (3x4) \f$\begin{bmatrix} {f'}_x & 0 & {c'}_x & t_x \\ 0 & {f'}_y & {c'}_y & t_y \\ 0 & 0 & 1 & t_z \end{bmatrix}\f$. P1 or P2 computed by
  3865  #stereoRectify can be passed here. If the matrix is empty, the identity new camera matrix is used.
  3866   */
  3867  CV_EXPORTS_W
  3868  void undistortPoints(InputArray src, OutputArray dst,
  3869                       InputArray cameraMatrix, InputArray distCoeffs,
  3870                       InputArray R = noArray(), InputArray P = noArray());
  3871  /** @overload
  3872      @note Default version of #undistortPoints does 5 iterations to compute undistorted points.
  3873   */
  3874  CV_EXPORTS_AS(undistortPointsIter)
  3875  void undistortPoints(InputArray src, OutputArray dst,
  3876                       InputArray cameraMatrix, InputArray distCoeffs,
  3877                       InputArray R, InputArray P, TermCriteria criteria);
  3878  
  3879  //! @} calib3d
  3880  
  3881  /** @brief The methods in this namespace use a so-called fisheye camera model.
  3882    @ingroup calib3d_fisheye
  3883  */
  3884  namespace fisheye
  3885  {
  3886  //! @addtogroup calib3d_fisheye
  3887  //! @{
  3888  
  3889      enum{
  3890          CALIB_USE_INTRINSIC_GUESS   = 1 << 0,
  3891          CALIB_RECOMPUTE_EXTRINSIC   = 1 << 1,
  3892          CALIB_CHECK_COND            = 1 << 2,
  3893          CALIB_FIX_SKEW              = 1 << 3,
  3894          CALIB_FIX_K1                = 1 << 4,
  3895          CALIB_FIX_K2                = 1 << 5,
  3896          CALIB_FIX_K3                = 1 << 6,
  3897          CALIB_FIX_K4                = 1 << 7,
  3898          CALIB_FIX_INTRINSIC         = 1 << 8,
  3899          CALIB_FIX_PRINCIPAL_POINT   = 1 << 9,
  3900          CALIB_ZERO_DISPARITY        = 1 << 10,
  3901          CALIB_FIX_FOCAL_LENGTH      = 1 << 11
  3902      };
  3903  
  3904      /** @brief Projects points using fisheye model
  3905  
  3906      @param objectPoints Array of object points, 1xN/Nx1 3-channel (or vector\<Point3f\> ), where N is
  3907      the number of points in the view.
  3908      @param imagePoints Output array of image points, 2xN/Nx2 1-channel or 1xN/Nx1 2-channel, or
  3909      vector\<Point2f\>.
  3910      @param affine
  3911      @param K Camera intrinsic matrix \f$cameramatrix{K}\f$.
  3912      @param D Input vector of distortion coefficients \f$\distcoeffsfisheye\f$.
  3913      @param alpha The skew coefficient.
  3914      @param jacobian Optional output 2Nx15 jacobian matrix of derivatives of image points with respect
  3915      to components of the focal lengths, coordinates of the principal point, distortion coefficients,
  3916      rotation vector, translation vector, and the skew. In the old interface different components of
  3917      the jacobian are returned via different output parameters.
  3918  
  3919      The function computes projections of 3D points to the image plane given intrinsic and extrinsic
  3920      camera parameters. Optionally, the function computes Jacobians - matrices of partial derivatives of
  3921      image points coordinates (as functions of all the input parameters) with respect to the particular
  3922      parameters, intrinsic and/or extrinsic.
  3923       */
  3924      CV_EXPORTS void projectPoints(InputArray objectPoints, OutputArray imagePoints, const Affine3d& affine,
  3925          InputArray K, InputArray D, double alpha = 0, OutputArray jacobian = noArray());
  3926  
  3927      /** @overload */
  3928      CV_EXPORTS_W void projectPoints(InputArray objectPoints, OutputArray imagePoints, InputArray rvec, InputArray tvec,
  3929          InputArray K, InputArray D, double alpha = 0, OutputArray jacobian = noArray());
  3930  
  3931      /** @brief Distorts 2D points using fisheye model.
  3932  
  3933      @param undistorted Array of object points, 1xN/Nx1 2-channel (or vector\<Point2f\> ), where N is
  3934      the number of points in the view.
  3935      @param K Camera intrinsic matrix \f$cameramatrix{K}\f$.
  3936      @param D Input vector of distortion coefficients \f$\distcoeffsfisheye\f$.
  3937      @param alpha The skew coefficient.
  3938      @param distorted Output array of image points, 1xN/Nx1 2-channel, or vector\<Point2f\> .
  3939  
  3940      Note that the function assumes the camera intrinsic matrix of the undistorted points to be identity.
  3941      This means if you want to transform back points undistorted with #fisheye::undistortPoints you have to
  3942      multiply them with \f$P^{-1}\f$.
  3943       */
  3944      CV_EXPORTS_W void distortPoints(InputArray undistorted, OutputArray distorted, InputArray K, InputArray D, double alpha = 0);
  3945  
  3946      /** @brief Undistorts 2D points using fisheye model
  3947  
  3948      @param distorted Array of object points, 1xN/Nx1 2-channel (or vector\<Point2f\> ), where N is the
  3949      number of points in the view.
  3950      @param K Camera intrinsic matrix \f$cameramatrix{K}\f$.
  3951      @param D Input vector of distortion coefficients \f$\distcoeffsfisheye\f$.
  3952      @param R Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3
  3953      1-channel or 1x1 3-channel
  3954      @param P New camera intrinsic matrix (3x3) or new projection matrix (3x4)
  3955      @param undistorted Output array of image points, 1xN/Nx1 2-channel, or vector\<Point2f\> .
  3956       */
  3957      CV_EXPORTS_W void undistortPoints(InputArray distorted, OutputArray undistorted,
  3958          InputArray K, InputArray D, InputArray R = noArray(), InputArray P  = noArray());
  3959  
  3960      /** @brief Computes undistortion and rectification maps for image transform by #remap. If D is empty zero
  3961      distortion is used, if R or P is empty identity matrixes are used.
  3962  
  3963      @param K Camera intrinsic matrix \f$cameramatrix{K}\f$.
  3964      @param D Input vector of distortion coefficients \f$\distcoeffsfisheye\f$.
  3965      @param R Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3
  3966      1-channel or 1x1 3-channel
  3967      @param P New camera intrinsic matrix (3x3) or new projection matrix (3x4)
  3968      @param size Undistorted image size.
  3969      @param m1type Type of the first output map that can be CV_32FC1 or CV_16SC2 . See #convertMaps
  3970      for details.
  3971      @param map1 The first output map.
  3972      @param map2 The second output map.
  3973       */
  3974      CV_EXPORTS_W void initUndistortRectifyMap(InputArray K, InputArray D, InputArray R, InputArray P,
  3975          const cv::Size& size, int m1type, OutputArray map1, OutputArray map2);
  3976  
  3977      /** @brief Transforms an image to compensate for fisheye lens distortion.
  3978  
  3979      @param distorted image with fisheye lens distortion.
  3980      @param undistorted Output image with compensated fisheye lens distortion.
  3981      @param K Camera intrinsic matrix \f$cameramatrix{K}\f$.
  3982      @param D Input vector of distortion coefficients \f$\distcoeffsfisheye\f$.
  3983      @param Knew Camera intrinsic matrix of the distorted image. By default, it is the identity matrix but you
  3984      may additionally scale and shift the result by using a different matrix.
  3985      @param new_size the new size
  3986  
  3987      The function transforms an image to compensate radial and tangential lens distortion.
  3988  
  3989      The function is simply a combination of #fisheye::initUndistortRectifyMap (with unity R ) and #remap
  3990      (with bilinear interpolation). See the former function for details of the transformation being
  3991      performed.
  3992  
  3993      See below the results of undistortImage.
  3994         -   a\) result of undistort of perspective camera model (all possible coefficients (k_1, k_2, k_3,
  3995              k_4, k_5, k_6) of distortion were optimized under calibration)
  3996          -   b\) result of #fisheye::undistortImage of fisheye camera model (all possible coefficients (k_1, k_2,
  3997              k_3, k_4) of fisheye distortion were optimized under calibration)
  3998          -   c\) original image was captured with fisheye lens
  3999  
  4000      Pictures a) and b) almost the same. But if we consider points of image located far from the center
  4001      of image, we can notice that on image a) these points are distorted.
  4002  
  4003      ![image](pics/fisheye_undistorted.jpg)
  4004       */
  4005      CV_EXPORTS_W void undistortImage(InputArray distorted, OutputArray undistorted,
  4006          InputArray K, InputArray D, InputArray Knew = cv::noArray(), const Size& new_size = Size());
  4007  
  4008      /** @brief Estimates new camera intrinsic matrix for undistortion or rectification.
  4009  
  4010      @param K Camera intrinsic matrix \f$cameramatrix{K}\f$.
  4011      @param image_size Size of the image
  4012      @param D Input vector of distortion coefficients \f$\distcoeffsfisheye\f$.
  4013      @param R Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3
  4014      1-channel or 1x1 3-channel
  4015      @param P New camera intrinsic matrix (3x3) or new projection matrix (3x4)
  4016      @param balance Sets the new focal length in range between the min focal length and the max focal
  4017      length. Balance is in range of [0, 1].
  4018      @param new_size the new size
  4019      @param fov_scale Divisor for new focal length.
  4020       */
  4021      CV_EXPORTS_W void estimateNewCameraMatrixForUndistortRectify(InputArray K, InputArray D, const Size &image_size, InputArray R,
  4022          OutputArray P, double balance = 0.0, const Size& new_size = Size(), double fov_scale = 1.0);
  4023  
  4024      /** @brief Performs camera calibaration
  4025  
  4026      @param objectPoints vector of vectors of calibration pattern points in the calibration pattern
  4027      coordinate space.
  4028      @param imagePoints vector of vectors of the projections of calibration pattern points.
  4029      imagePoints.size() and objectPoints.size() and imagePoints[i].size() must be equal to
  4030      objectPoints[i].size() for each i.
  4031      @param image_size Size of the image used only to initialize the camera intrinsic matrix.
  4032      @param K Output 3x3 floating-point camera intrinsic matrix
  4033      \f$\cameramatrix{A}\f$ . If
  4034      @ref fisheye::CALIB_USE_INTRINSIC_GUESS is specified, some or all of fx, fy, cx, cy must be
  4035      initialized before calling the function.
  4036      @param D Output vector of distortion coefficients \f$\distcoeffsfisheye\f$.
  4037      @param rvecs Output vector of rotation vectors (see Rodrigues ) estimated for each pattern view.
  4038      That is, each k-th rotation vector together with the corresponding k-th translation vector (see
  4039      the next output parameter description) brings the calibration pattern from the model coordinate
  4040      space (in which object points are specified) to the world coordinate space, that is, a real
  4041      position of the calibration pattern in the k-th pattern view (k=0.. *M* -1).
  4042      @param tvecs Output vector of translation vectors estimated for each pattern view.
  4043      @param flags Different flags that may be zero or a combination of the following values:
  4044      -    @ref fisheye::CALIB_USE_INTRINSIC_GUESS  cameraMatrix contains valid initial values of
  4045      fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image
  4046      center ( imageSize is used), and focal distances are computed in a least-squares fashion.
  4047      -    @ref fisheye::CALIB_RECOMPUTE_EXTRINSIC  Extrinsic will be recomputed after each iteration
  4048      of intrinsic optimization.
  4049      -    @ref fisheye::CALIB_CHECK_COND  The functions will check validity of condition number.
  4050      -    @ref fisheye::CALIB_FIX_SKEW  Skew coefficient (alpha) is set to zero and stay zero.
  4051      -    @ref fisheye::CALIB_FIX_K1,..., @ref fisheye::CALIB_FIX_K4 Selected distortion coefficients
  4052      are set to zeros and stay zero.
  4053      -    @ref fisheye::CALIB_FIX_PRINCIPAL_POINT  The principal point is not changed during the global
  4054  optimization. It stays at the center or at a different location specified when @ref fisheye::CALIB_USE_INTRINSIC_GUESS is set too.
  4055      -    @ref fisheye::CALIB_FIX_FOCAL_LENGTH The focal length is not changed during the global
  4056  optimization. It is the \f$max(width,height)/\pi\f$ or the provided \f$f_x\f$, \f$f_y\f$ when @ref fisheye::CALIB_USE_INTRINSIC_GUESS is set too.
  4057      @param criteria Termination criteria for the iterative optimization algorithm.
  4058       */
  4059      CV_EXPORTS_W double calibrate(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints, const Size& image_size,
  4060          InputOutputArray K, InputOutputArray D, OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs, int flags = 0,
  4061              TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 100, DBL_EPSILON));
  4062  
  4063      /** @brief Stereo rectification for fisheye camera model
  4064  
  4065      @param K1 First camera intrinsic matrix.
  4066      @param D1 First camera distortion parameters.
  4067      @param K2 Second camera intrinsic matrix.
  4068      @param D2 Second camera distortion parameters.
  4069      @param imageSize Size of the image used for stereo calibration.
  4070      @param R Rotation matrix between the coordinate systems of the first and the second
  4071      cameras.
  4072      @param tvec Translation vector between coordinate systems of the cameras.
  4073      @param R1 Output 3x3 rectification transform (rotation matrix) for the first camera.
  4074      @param R2 Output 3x3 rectification transform (rotation matrix) for the second camera.
  4075      @param P1 Output 3x4 projection matrix in the new (rectified) coordinate systems for the first
  4076      camera.
  4077      @param P2 Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
  4078      camera.
  4079      @param Q Output \f$4 \times 4\f$ disparity-to-depth mapping matrix (see reprojectImageTo3D ).
  4080      @param flags Operation flags that may be zero or @ref fisheye::CALIB_ZERO_DISPARITY . If the flag is set,
  4081      the function makes the principal points of each camera have the same pixel coordinates in the
  4082      rectified views. And if the flag is not set, the function may still shift the images in the
  4083      horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the
  4084      useful image area.
  4085      @param newImageSize New image resolution after rectification. The same size should be passed to
  4086      #initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0)
  4087      is passed (default), it is set to the original imageSize . Setting it to larger value can help you
  4088      preserve details in the original image, especially when there is a big radial distortion.
  4089      @param balance Sets the new focal length in range between the min focal length and the max focal
  4090      length. Balance is in range of [0, 1].
  4091      @param fov_scale Divisor for new focal length.
  4092       */
  4093      CV_EXPORTS_W void stereoRectify(InputArray K1, InputArray D1, InputArray K2, InputArray D2, const Size &imageSize, InputArray R, InputArray tvec,
  4094          OutputArray R1, OutputArray R2, OutputArray P1, OutputArray P2, OutputArray Q, int flags, const Size &newImageSize = Size(),
  4095          double balance = 0.0, double fov_scale = 1.0);
  4096  
  4097      /** @brief Performs stereo calibration
  4098  
  4099      @param objectPoints Vector of vectors of the calibration pattern points.
  4100      @param imagePoints1 Vector of vectors of the projections of the calibration pattern points,
  4101      observed by the first camera.
  4102      @param imagePoints2 Vector of vectors of the projections of the calibration pattern points,
  4103      observed by the second camera.
  4104      @param K1 Input/output first camera intrinsic matrix:
  4105      \f$\vecthreethree{f_x^{(j)}}{0}{c_x^{(j)}}{0}{f_y^{(j)}}{c_y^{(j)}}{0}{0}{1}\f$ , \f$j = 0,\, 1\f$ . If
  4106      any of @ref fisheye::CALIB_USE_INTRINSIC_GUESS , @ref fisheye::CALIB_FIX_INTRINSIC are specified,
  4107      some or all of the matrix components must be initialized.
  4108      @param D1 Input/output vector of distortion coefficients \f$\distcoeffsfisheye\f$ of 4 elements.
  4109      @param K2 Input/output second camera intrinsic matrix. The parameter is similar to K1 .
  4110      @param D2 Input/output lens distortion coefficients for the second camera. The parameter is
  4111      similar to D1 .
  4112      @param imageSize Size of the image used only to initialize camera intrinsic matrix.
  4113      @param R Output rotation matrix between the 1st and the 2nd camera coordinate systems.
  4114      @param T Output translation vector between the coordinate systems of the cameras.
  4115      @param flags Different flags that may be zero or a combination of the following values:
  4116      -    @ref fisheye::CALIB_FIX_INTRINSIC  Fix K1, K2? and D1, D2? so that only R, T matrices
  4117      are estimated.
  4118      -    @ref fisheye::CALIB_USE_INTRINSIC_GUESS  K1, K2 contains valid initial values of
  4119      fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image
  4120      center (imageSize is used), and focal distances are computed in a least-squares fashion.
  4121      -    @ref fisheye::CALIB_RECOMPUTE_EXTRINSIC  Extrinsic will be recomputed after each iteration
  4122      of intrinsic optimization.
  4123      -    @ref fisheye::CALIB_CHECK_COND  The functions will check validity of condition number.
  4124      -    @ref fisheye::CALIB_FIX_SKEW  Skew coefficient (alpha) is set to zero and stay zero.
  4125      -   @ref fisheye::CALIB_FIX_K1,..., @ref fisheye::CALIB_FIX_K4 Selected distortion coefficients are set to zeros and stay
  4126      zero.
  4127      @param criteria Termination criteria for the iterative optimization algorithm.
  4128       */
  4129      CV_EXPORTS_W double stereoCalibrate(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints1, InputArrayOfArrays imagePoints2,
  4130                                    InputOutputArray K1, InputOutputArray D1, InputOutputArray K2, InputOutputArray D2, Size imageSize,
  4131                                    OutputArray R, OutputArray T, int flags = fisheye::CALIB_FIX_INTRINSIC,
  4132                                    TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 100, DBL_EPSILON));
  4133  
  4134  //! @} calib3d_fisheye
  4135  } // end namespace fisheye
  4136  
  4137  } //end namespace cv
  4138  
  4139  #if 0 //def __cplusplus
  4140  //////////////////////////////////////////////////////////////////////////////////////////
  4141  class CV_EXPORTS CvLevMarq
  4142  {
  4143  public:
  4144      CvLevMarq();
  4145      CvLevMarq( int nparams, int nerrs, CvTermCriteria criteria=
  4146                cvTermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER,30,DBL_EPSILON),
  4147                bool completeSymmFlag=false );
  4148      ~CvLevMarq();
  4149      void init( int nparams, int nerrs, CvTermCriteria criteria=
  4150                cvTermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER,30,DBL_EPSILON),
  4151                bool completeSymmFlag=false );
  4152      bool update( const CvMat*& param, CvMat*& J, CvMat*& err );
  4153      bool updateAlt( const CvMat*& param, CvMat*& JtJ, CvMat*& JtErr, double*& errNorm );
  4154  
  4155      void clear();
  4156      void step();
  4157      enum { DONE=0, STARTED=1, CALC_J=2, CHECK_ERR=3 };
  4158  
  4159      cv::Ptr<CvMat> mask;
  4160      cv::Ptr<CvMat> prevParam;
  4161      cv::Ptr<CvMat> param;
  4162      cv::Ptr<CvMat> J;
  4163      cv::Ptr<CvMat> err;
  4164      cv::Ptr<CvMat> JtJ;
  4165      cv::Ptr<CvMat> JtJN;
  4166      cv::Ptr<CvMat> JtErr;
  4167      cv::Ptr<CvMat> JtJV;
  4168      cv::Ptr<CvMat> JtJW;
  4169      double prevErrNorm, errNorm;
  4170      int lambdaLg10;
  4171      CvTermCriteria criteria;
  4172      int state;
  4173      int iters;
  4174      bool completeSymmFlag;
  4175      int solveMethod;
  4176  };
  4177  #endif
  4178  
  4179  #endif