github.com/kaydxh/golang@v0.0.131/pkg/gocv/cgo/third_path/opencv4/include/opencv2/gapi/imgproc.hpp (about)

     1  // This file is part of OpenCV project.
     2  // It is subject to the license terms in the LICENSE file found in the top-level directory
     3  // of this distribution and at http://opencv.org/license.html.
     4  //
     5  // Copyright (C) 2018-2020 Intel Corporation
     6  
     7  
     8  #ifndef OPENCV_GAPI_IMGPROC_HPP
     9  #define OPENCV_GAPI_IMGPROC_HPP
    10  
    11  #include <opencv2/imgproc.hpp>
    12  
    13  #include <utility> // std::tuple
    14  
    15  #include <opencv2/gapi/gkernel.hpp>
    16  #include <opencv2/gapi/gmat.hpp>
    17  #include <opencv2/gapi/gscalar.hpp>
    18  
    19  
    20  /** \defgroup gapi_imgproc G-API Image processing functionality
    21  @{
    22      @defgroup gapi_filters Graph API: Image filters
    23      @defgroup gapi_colorconvert Graph API: Converting image from one color space to another
    24      @defgroup gapi_feature Graph API: Image Feature Detection
    25      @defgroup gapi_shape Graph API: Image Structural Analysis and Shape Descriptors
    26  @}
    27   */
    28  
    29  namespace {
    30  void validateFindingContoursMeta(const int depth, const int chan, const int mode)
    31  {
    32      GAPI_Assert(chan == 1);
    33      switch (mode)
    34      {
    35      case cv::RETR_CCOMP:
    36          GAPI_Assert(depth == CV_8U || depth == CV_32S);
    37          break;
    38      case cv::RETR_FLOODFILL:
    39          GAPI_Assert(depth == CV_32S);
    40          break;
    41      default:
    42          GAPI_Assert(depth == CV_8U);
    43          break;
    44      }
    45  }
    46  } // anonymous namespace
    47  
    48  namespace cv { namespace gapi {
    49  
    50  /**
    51   * @brief This namespace contains G-API Operation Types for OpenCV
    52   * ImgProc module functionality.
    53   */
    54  namespace imgproc {
    55      using GMat2 = std::tuple<GMat,GMat>;
    56      using GMat3 = std::tuple<GMat,GMat,GMat>; // FIXME: how to avoid this?
    57      using GFindContoursOutput = std::tuple<GArray<GArray<Point>>,GArray<Vec4i>>;
    58  
    59      G_TYPED_KERNEL(GFilter2D, <GMat(GMat,int,Mat,Point,Scalar,int,Scalar)>,"org.opencv.imgproc.filters.filter2D") {
    60          static GMatDesc outMeta(GMatDesc in, int ddepth, Mat, Point, Scalar, int, Scalar) {
    61              return in.withDepth(ddepth);
    62          }
    63      };
    64  
    65      G_TYPED_KERNEL(GSepFilter, <GMat(GMat,int,Mat,Mat,Point,Scalar,int,Scalar)>, "org.opencv.imgproc.filters.sepfilter") {
    66          static GMatDesc outMeta(GMatDesc in, int ddepth, Mat, Mat, Point, Scalar, int, Scalar) {
    67              return in.withDepth(ddepth);
    68          }
    69      };
    70  
    71      G_TYPED_KERNEL(GBoxFilter, <GMat(GMat,int,Size,Point,bool,int,Scalar)>, "org.opencv.imgproc.filters.boxfilter") {
    72          static GMatDesc outMeta(GMatDesc in, int ddepth, Size, Point, bool, int, Scalar) {
    73              return in.withDepth(ddepth);
    74          }
    75      };
    76  
    77      G_TYPED_KERNEL(GBlur, <GMat(GMat,Size,Point,int,Scalar)>,         "org.opencv.imgproc.filters.blur"){
    78          static GMatDesc outMeta(GMatDesc in, Size, Point, int, Scalar) {
    79              return in;
    80          }
    81      };
    82  
    83      G_TYPED_KERNEL(GGaussBlur, <GMat(GMat,Size,double,double,int,Scalar)>, "org.opencv.imgproc.filters.gaussianBlur") {
    84          static GMatDesc outMeta(GMatDesc in, Size, double, double, int, Scalar) {
    85              return in;
    86          }
    87      };
    88  
    89      G_TYPED_KERNEL(GMedianBlur, <GMat(GMat,int)>, "org.opencv.imgproc.filters.medianBlur") {
    90          static GMatDesc outMeta(GMatDesc in, int) {
    91              return in;
    92          }
    93      };
    94  
    95      G_TYPED_KERNEL(GErode, <GMat(GMat,Mat,Point,int,int,Scalar)>, "org.opencv.imgproc.filters.erode") {
    96          static GMatDesc outMeta(GMatDesc in, Mat, Point, int, int, Scalar) {
    97              return in;
    98          }
    99      };
   100  
   101      G_TYPED_KERNEL(GDilate, <GMat(GMat,Mat,Point,int,int,Scalar)>, "org.opencv.imgproc.filters.dilate") {
   102          static GMatDesc outMeta(GMatDesc in, Mat, Point, int, int, Scalar) {
   103              return in;
   104          }
   105      };
   106  
   107      G_TYPED_KERNEL(GMorphologyEx, <GMat(GMat,MorphTypes,Mat,Point,int,BorderTypes,Scalar)>,
   108                     "org.opencv.imgproc.filters.morphologyEx") {
   109          static GMatDesc outMeta(const GMatDesc &in, MorphTypes, Mat, Point, int,
   110                                  BorderTypes, Scalar) {
   111              return in;
   112          }
   113      };
   114  
   115      G_TYPED_KERNEL(GSobel, <GMat(GMat,int,int,int,int,double,double,int,Scalar)>, "org.opencv.imgproc.filters.sobel") {
   116          static GMatDesc outMeta(GMatDesc in, int ddepth, int, int, int, double, double, int, Scalar) {
   117              return in.withDepth(ddepth);
   118          }
   119      };
   120  
   121      G_TYPED_KERNEL_M(GSobelXY, <GMat2(GMat,int,int,int,double,double,int,Scalar)>, "org.opencv.imgproc.filters.sobelxy") {
   122          static std::tuple<GMatDesc, GMatDesc> outMeta(GMatDesc in, int ddepth, int, int, double, double, int, Scalar) {
   123              return std::make_tuple(in.withDepth(ddepth), in.withDepth(ddepth));
   124          }
   125      };
   126  
   127      G_TYPED_KERNEL(GLaplacian, <GMat(GMat,int, int, double, double, int)>,
   128                     "org.opencv.imgproc.filters.laplacian") {
   129          static GMatDesc outMeta(GMatDesc in, int ddepth, int, double, double, int) {
   130              return in.withDepth(ddepth);
   131          }
   132      };
   133  
   134      G_TYPED_KERNEL(GBilateralFilter, <GMat(GMat,int, double, double, int)>,
   135                     "org.opencv.imgproc.filters.bilateralfilter") {
   136          static GMatDesc outMeta(GMatDesc in, int, double, double, int) {
   137              return in;
   138          }
   139      };
   140  
   141      G_TYPED_KERNEL(GEqHist, <GMat(GMat)>, "org.opencv.imgproc.equalizeHist"){
   142          static GMatDesc outMeta(GMatDesc in) {
   143              return in.withType(CV_8U, 1);
   144          }
   145      };
   146  
   147      G_TYPED_KERNEL(GCanny, <GMat(GMat,double,double,int,bool)>, "org.opencv.imgproc.feature.canny"){
   148          static GMatDesc outMeta(GMatDesc in, double, double, int, bool) {
   149              return in.withType(CV_8U, 1);
   150          }
   151      };
   152  
   153      G_TYPED_KERNEL(GGoodFeatures,
   154                     <cv::GArray<cv::Point2f>(GMat,int,double,double,Mat,int,bool,double)>,
   155                     "org.opencv.imgproc.feature.goodFeaturesToTrack") {
   156          static GArrayDesc outMeta(GMatDesc, int, double, double, const Mat&, int, bool, double) {
   157              return empty_array_desc();
   158          }
   159      };
   160  
   161      using RetrMode = RetrievalModes;
   162      using ContMethod = ContourApproximationModes;
   163      G_TYPED_KERNEL(GFindContours, <GArray<GArray<Point>>(GMat,RetrMode,ContMethod,GOpaque<Point>)>,
   164                     "org.opencv.imgproc.shape.findContours")
   165      {
   166          static GArrayDesc outMeta(GMatDesc in, RetrMode mode, ContMethod, GOpaqueDesc)
   167          {
   168              validateFindingContoursMeta(in.depth, in.chan, mode);
   169              return empty_array_desc();
   170          }
   171      };
   172  
   173      // FIXME oc: make default value offset = Point()
   174      G_TYPED_KERNEL(GFindContoursNoOffset, <GArray<GArray<Point>>(GMat,RetrMode,ContMethod)>,
   175                     "org.opencv.imgproc.shape.findContoursNoOffset")
   176      {
   177          static GArrayDesc outMeta(GMatDesc in, RetrMode mode, ContMethod)
   178          {
   179              validateFindingContoursMeta(in.depth, in.chan, mode);
   180              return empty_array_desc();
   181          }
   182      };
   183  
   184      G_TYPED_KERNEL(GFindContoursH,<GFindContoursOutput(GMat,RetrMode,ContMethod,GOpaque<Point>)>,
   185                     "org.opencv.imgproc.shape.findContoursH")
   186      {
   187          static std::tuple<GArrayDesc,GArrayDesc>
   188          outMeta(GMatDesc in, RetrMode mode, ContMethod, GOpaqueDesc)
   189          {
   190              validateFindingContoursMeta(in.depth, in.chan, mode);
   191              return std::make_tuple(empty_array_desc(), empty_array_desc());
   192          }
   193      };
   194  
   195      // FIXME oc: make default value offset = Point()
   196      G_TYPED_KERNEL(GFindContoursHNoOffset,<GFindContoursOutput(GMat,RetrMode,ContMethod)>,
   197                     "org.opencv.imgproc.shape.findContoursHNoOffset")
   198      {
   199          static std::tuple<GArrayDesc,GArrayDesc>
   200          outMeta(GMatDesc in, RetrMode mode, ContMethod)
   201          {
   202              validateFindingContoursMeta(in.depth, in.chan, mode);
   203              return std::make_tuple(empty_array_desc(), empty_array_desc());
   204          }
   205      };
   206  
   207      G_TYPED_KERNEL(GBoundingRectMat, <GOpaque<Rect>(GMat)>,
   208                     "org.opencv.imgproc.shape.boundingRectMat") {
   209          static GOpaqueDesc outMeta(GMatDesc in) {
   210              if (in.depth == CV_8U)
   211              {
   212                  GAPI_Assert(in.chan == 1);
   213              }
   214              else
   215              {
   216                  GAPI_Assert (in.depth == CV_32S || in.depth == CV_32F);
   217                  int amount = detail::checkVector(in, 2u);
   218                  GAPI_Assert(amount != -1 &&
   219                              "Input Mat can't be described as vector of 2-dimentional points");
   220              }
   221              return empty_gopaque_desc();
   222          }
   223      };
   224  
   225      G_TYPED_KERNEL(GBoundingRectVector32S, <GOpaque<Rect>(GArray<Point2i>)>,
   226                     "org.opencv.imgproc.shape.boundingRectVector32S") {
   227          static GOpaqueDesc outMeta(GArrayDesc) {
   228              return empty_gopaque_desc();
   229          }
   230      };
   231  
   232      G_TYPED_KERNEL(GBoundingRectVector32F, <GOpaque<Rect>(GArray<Point2f>)>,
   233                     "org.opencv.imgproc.shape.boundingRectVector32F") {
   234          static GOpaqueDesc outMeta(GArrayDesc) {
   235              return empty_gopaque_desc();
   236          }
   237      };
   238  
   239      G_TYPED_KERNEL(GFitLine2DMat, <GOpaque<Vec4f>(GMat,DistanceTypes,double,double,double)>,
   240                     "org.opencv.imgproc.shape.fitLine2DMat") {
   241          static GOpaqueDesc outMeta(GMatDesc in,DistanceTypes,double,double,double) {
   242              int amount = detail::checkVector(in, 2u);
   243              GAPI_Assert(amount != -1 &&
   244                          "Input Mat can't be described as vector of 2-dimentional points");
   245              return empty_gopaque_desc();
   246          }
   247      };
   248  
   249      G_TYPED_KERNEL(GFitLine2DVector32S,
   250                     <GOpaque<Vec4f>(GArray<Point2i>,DistanceTypes,double,double,double)>,
   251                     "org.opencv.imgproc.shape.fitLine2DVector32S") {
   252          static GOpaqueDesc outMeta(GArrayDesc,DistanceTypes,double,double,double) {
   253              return empty_gopaque_desc();
   254          }
   255      };
   256  
   257      G_TYPED_KERNEL(GFitLine2DVector32F,
   258                     <GOpaque<Vec4f>(GArray<Point2f>,DistanceTypes,double,double,double)>,
   259                     "org.opencv.imgproc.shape.fitLine2DVector32F") {
   260          static GOpaqueDesc outMeta(GArrayDesc,DistanceTypes,double,double,double) {
   261              return empty_gopaque_desc();
   262          }
   263      };
   264  
   265      G_TYPED_KERNEL(GFitLine2DVector64F,
   266                     <GOpaque<Vec4f>(GArray<Point2d>,DistanceTypes,double,double,double)>,
   267                     "org.opencv.imgproc.shape.fitLine2DVector64F") {
   268          static GOpaqueDesc outMeta(GArrayDesc,DistanceTypes,double,double,double) {
   269              return empty_gopaque_desc();
   270          }
   271      };
   272  
   273      G_TYPED_KERNEL(GFitLine3DMat, <GOpaque<Vec6f>(GMat,DistanceTypes,double,double,double)>,
   274                     "org.opencv.imgproc.shape.fitLine3DMat") {
   275          static GOpaqueDesc outMeta(GMatDesc in,int,double,double,double) {
   276              int amount = detail::checkVector(in, 3u);
   277              GAPI_Assert(amount != -1 &&
   278                          "Input Mat can't be described as vector of 3-dimentional points");
   279              return empty_gopaque_desc();
   280          }
   281      };
   282  
   283      G_TYPED_KERNEL(GFitLine3DVector32S,
   284                     <GOpaque<Vec6f>(GArray<Point3i>,DistanceTypes,double,double,double)>,
   285                     "org.opencv.imgproc.shape.fitLine3DVector32S") {
   286          static GOpaqueDesc outMeta(GArrayDesc,DistanceTypes,double,double,double) {
   287              return empty_gopaque_desc();
   288          }
   289      };
   290  
   291      G_TYPED_KERNEL(GFitLine3DVector32F,
   292                     <GOpaque<Vec6f>(GArray<Point3f>,DistanceTypes,double,double,double)>,
   293                     "org.opencv.imgproc.shape.fitLine3DVector32F") {
   294          static GOpaqueDesc outMeta(GArrayDesc,DistanceTypes,double,double,double) {
   295              return empty_gopaque_desc();
   296          }
   297      };
   298  
   299      G_TYPED_KERNEL(GFitLine3DVector64F,
   300                     <GOpaque<Vec6f>(GArray<Point3d>,DistanceTypes,double,double,double)>,
   301                     "org.opencv.imgproc.shape.fitLine3DVector64F") {
   302          static GOpaqueDesc outMeta(GArrayDesc,DistanceTypes,double,double,double) {
   303              return empty_gopaque_desc();
   304          }
   305      };
   306  
   307      G_TYPED_KERNEL(GBGR2RGB, <GMat(GMat)>, "org.opencv.imgproc.colorconvert.bgr2rgb") {
   308          static GMatDesc outMeta(GMatDesc in) {
   309              return in; // type still remains CV_8UC3;
   310          }
   311      };
   312  
   313      G_TYPED_KERNEL(GRGB2YUV, <GMat(GMat)>, "org.opencv.imgproc.colorconvert.rgb2yuv") {
   314          static GMatDesc outMeta(GMatDesc in) {
   315              return in; // type still remains CV_8UC3;
   316          }
   317      };
   318  
   319      G_TYPED_KERNEL(GYUV2RGB, <GMat(GMat)>, "org.opencv.imgproc.colorconvert.yuv2rgb") {
   320          static GMatDesc outMeta(GMatDesc in) {
   321              return in; // type still remains CV_8UC3;
   322          }
   323      };
   324  
   325      G_TYPED_KERNEL(GBGR2I420, <GMat(GMat)>, "org.opencv.imgproc.colorconvert.bgr2i420") {
   326          static GMatDesc outMeta(GMatDesc in) {
   327              GAPI_Assert(in.depth == CV_8U);
   328              GAPI_Assert(in.chan == 3);
   329              GAPI_Assert(in.size.height % 2 == 0);
   330              return in.withType(in.depth, 1).withSize(Size(in.size.width, in.size.height * 3 / 2));
   331          }
   332      };
   333  
   334      G_TYPED_KERNEL(GRGB2I420, <GMat(GMat)>, "org.opencv.imgproc.colorconvert.rgb2i420") {
   335          static GMatDesc outMeta(GMatDesc in) {
   336              GAPI_Assert(in.depth == CV_8U);
   337              GAPI_Assert(in.chan == 3);
   338              GAPI_Assert(in.size.height % 2 == 0);
   339              return in.withType(in.depth, 1).withSize(Size(in.size.width, in.size.height * 3 / 2));
   340          }
   341      };
   342  
   343      G_TYPED_KERNEL(GI4202BGR, <GMat(GMat)>, "org.opencv.imgproc.colorconvert.i4202bgr") {
   344          static GMatDesc outMeta(GMatDesc in) {
   345              GAPI_Assert(in.depth == CV_8U);
   346              GAPI_Assert(in.chan == 1);
   347              GAPI_Assert(in.size.height % 3 == 0);
   348              return in.withType(in.depth, 3).withSize(Size(in.size.width, in.size.height * 2 / 3));
   349          }
   350      };
   351  
   352      G_TYPED_KERNEL(GI4202RGB, <GMat(GMat)>, "org.opencv.imgproc.colorconvert.i4202rgb") {
   353          static GMatDesc outMeta(GMatDesc in) {
   354              GAPI_Assert(in.depth == CV_8U);
   355              GAPI_Assert(in.chan == 1);
   356              GAPI_Assert(in.size.height % 3 == 0);
   357              return in.withType(in.depth, 3).withSize(Size(in.size.width, in.size.height * 2 / 3));
   358          }
   359      };
   360  
   361      G_TYPED_KERNEL(GNV12toRGB, <GMat(GMat, GMat)>, "org.opencv.imgproc.colorconvert.nv12torgb") {
   362          static GMatDesc outMeta(GMatDesc in_y, GMatDesc in_uv) {
   363              GAPI_Assert(in_y.chan == 1);
   364              GAPI_Assert(in_uv.chan == 2);
   365              GAPI_Assert(in_y.depth == CV_8U);
   366              GAPI_Assert(in_uv.depth == CV_8U);
   367              // UV size should be aligned with Y
   368              GAPI_Assert(in_y.size.width == 2 * in_uv.size.width);
   369              GAPI_Assert(in_y.size.height == 2 * in_uv.size.height);
   370              return in_y.withType(CV_8U, 3); // type will be CV_8UC3;
   371          }
   372      };
   373  
   374      G_TYPED_KERNEL(GNV12toBGR, <GMat(GMat, GMat)>, "org.opencv.imgproc.colorconvert.nv12tobgr") {
   375          static GMatDesc outMeta(GMatDesc in_y, GMatDesc in_uv) {
   376              GAPI_Assert(in_y.chan == 1);
   377              GAPI_Assert(in_uv.chan == 2);
   378              GAPI_Assert(in_y.depth == CV_8U);
   379              GAPI_Assert(in_uv.depth == CV_8U);
   380              // UV size should be aligned with Y
   381              GAPI_Assert(in_y.size.width == 2 * in_uv.size.width);
   382              GAPI_Assert(in_y.size.height == 2 * in_uv.size.height);
   383              return in_y.withType(CV_8U, 3); // type will be CV_8UC3;
   384          }
   385      };
   386  
   387      G_TYPED_KERNEL(GRGB2Lab, <GMat(GMat)>, "org.opencv.imgproc.colorconvert.rgb2lab") {
   388          static GMatDesc outMeta(GMatDesc in) {
   389              return in; // type still remains CV_8UC3;
   390          }
   391      };
   392  
   393      G_TYPED_KERNEL(GBGR2LUV, <GMat(GMat)>, "org.opencv.imgproc.colorconvert.bgr2luv") {
   394          static GMatDesc outMeta(GMatDesc in) {
   395              return in; // type still remains CV_8UC3;
   396          }
   397      };
   398  
   399      G_TYPED_KERNEL(GLUV2BGR, <GMat(GMat)>, "org.opencv.imgproc.colorconvert.luv2bgr") {
   400          static GMatDesc outMeta(GMatDesc in) {
   401              return in; // type still remains CV_8UC3;
   402          }
   403      };
   404  
   405      G_TYPED_KERNEL(GYUV2BGR, <GMat(GMat)>, "org.opencv.imgproc.colorconvert.yuv2bgr") {
   406          static GMatDesc outMeta(GMatDesc in) {
   407              return in; // type still remains CV_8UC3;
   408          }
   409      };
   410  
   411      G_TYPED_KERNEL(GBGR2YUV, <GMat(GMat)>, "org.opencv.imgproc.colorconvert.bgr2yuv") {
   412          static GMatDesc outMeta(GMatDesc in) {
   413              return in; // type still remains CV_8UC3;
   414          }
   415      };
   416  
   417      G_TYPED_KERNEL(GRGB2Gray, <GMat(GMat)>, "org.opencv.imgproc.colorconvert.rgb2gray") {
   418          static GMatDesc outMeta(GMatDesc in) {
   419              return in.withType(CV_8U, 1);
   420          }
   421      };
   422  
   423      G_TYPED_KERNEL(GRGB2GrayCustom, <GMat(GMat,float,float,float)>, "org.opencv.imgproc.colorconvert.rgb2graycustom") {
   424          static GMatDesc outMeta(GMatDesc in, float, float, float) {
   425              return in.withType(CV_8U, 1);
   426          }
   427      };
   428  
   429      G_TYPED_KERNEL(GBGR2Gray, <GMat(GMat)>, "org.opencv.imgproc.colorconvert.bgr2gray") {
   430          static GMatDesc outMeta(GMatDesc in) {
   431              return in.withType(CV_8U, 1);
   432          }
   433      };
   434  
   435      G_TYPED_KERNEL(GBayerGR2RGB, <cv::GMat(cv::GMat)>, "org.opencv.imgproc.colorconvert.bayergr2rgb") {
   436          static cv::GMatDesc outMeta(cv::GMatDesc in) {
   437              return in.withType(CV_8U, 3);
   438          }
   439      };
   440  
   441      G_TYPED_KERNEL(GRGB2HSV, <cv::GMat(cv::GMat)>, "org.opencv.imgproc.colorconvert.rgb2hsv") {
   442          static cv::GMatDesc outMeta(cv::GMatDesc in) {
   443              return in;
   444          }
   445      };
   446  
   447      G_TYPED_KERNEL(GRGB2YUV422, <cv::GMat(cv::GMat)>, "org.opencv.imgproc.colorconvert.rgb2yuv422") {
   448          static cv::GMatDesc outMeta(cv::GMatDesc in) {
   449              GAPI_Assert(in.depth == CV_8U);
   450              GAPI_Assert(in.chan == 3);
   451              return in.withType(in.depth, 2);
   452          }
   453      };
   454  
   455      G_TYPED_KERNEL(GNV12toRGBp, <GMatP(GMat,GMat)>, "org.opencv.imgproc.colorconvert.nv12torgbp") {
   456          static GMatDesc outMeta(GMatDesc inY, GMatDesc inUV) {
   457              GAPI_Assert(inY.depth == CV_8U);
   458              GAPI_Assert(inUV.depth == CV_8U);
   459              GAPI_Assert(inY.chan == 1);
   460              GAPI_Assert(inY.planar == false);
   461              GAPI_Assert(inUV.chan == 2);
   462              GAPI_Assert(inUV.planar == false);
   463              GAPI_Assert(inY.size.width  == 2 * inUV.size.width);
   464              GAPI_Assert(inY.size.height == 2 * inUV.size.height);
   465              return inY.withType(CV_8U, 3).asPlanar();
   466          }
   467      };
   468  
   469      G_TYPED_KERNEL(GNV12toGray, <GMat(GMat,GMat)>, "org.opencv.imgproc.colorconvert.nv12togray") {
   470          static GMatDesc outMeta(GMatDesc inY, GMatDesc inUV) {
   471              GAPI_Assert(inY.depth   == CV_8U);
   472              GAPI_Assert(inUV.depth  == CV_8U);
   473              GAPI_Assert(inY.chan    == 1);
   474              GAPI_Assert(inY.planar  == false);
   475              GAPI_Assert(inUV.chan   == 2);
   476              GAPI_Assert(inUV.planar == false);
   477  
   478              GAPI_Assert(inY.size.width  == 2 * inUV.size.width);
   479              GAPI_Assert(inY.size.height == 2 * inUV.size.height);
   480              return inY.withType(CV_8U, 1);
   481          }
   482      };
   483  
   484      G_TYPED_KERNEL(GNV12toBGRp, <GMatP(GMat,GMat)>, "org.opencv.imgproc.colorconvert.nv12tobgrp") {
   485          static GMatDesc outMeta(GMatDesc inY, GMatDesc inUV) {
   486              GAPI_Assert(inY.depth == CV_8U);
   487              GAPI_Assert(inUV.depth == CV_8U);
   488              GAPI_Assert(inY.chan == 1);
   489              GAPI_Assert(inY.planar == false);
   490              GAPI_Assert(inUV.chan == 2);
   491              GAPI_Assert(inUV.planar == false);
   492              GAPI_Assert(inY.size.width  == 2 * inUV.size.width);
   493              GAPI_Assert(inY.size.height == 2 * inUV.size.height);
   494              return inY.withType(CV_8U, 3).asPlanar();
   495          }
   496      };
   497  
   498  } //namespace imgproc
   499  
   500  //! @addtogroup gapi_filters
   501  //! @{
   502  /** @brief Applies a separable linear filter to a matrix(image).
   503  
   504  The function applies a separable linear filter to the matrix. That is, first, every row of src is
   505  filtered with the 1D kernel kernelX. Then, every column of the result is filtered with the 1D
   506  kernel kernelY. The final result is returned.
   507  
   508  Supported matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.
   509  Output image must have the same type, size, and number of channels as the input image.
   510  @note
   511   - In case of floating-point computation, rounding to nearest even is procedeed
   512  if hardware supports it (if not - to nearest value).
   513   - Function textual ID is "org.opencv.imgproc.filters.sepfilter"
   514  @param src Source image.
   515  @param ddepth desired depth of the destination image (the following combinations of src.depth() and ddepth are supported:
   516  
   517          src.depth() = CV_8U, ddepth = -1/CV_16S/CV_32F/CV_64F
   518          src.depth() = CV_16U/CV_16S, ddepth = -1/CV_32F/CV_64F
   519          src.depth() = CV_32F, ddepth = -1/CV_32F/CV_64F
   520          src.depth() = CV_64F, ddepth = -1/CV_64F
   521  
   522  when ddepth=-1, the output image will have the same depth as the source)
   523  @param kernelX Coefficients for filtering each row.
   524  @param kernelY Coefficients for filtering each column.
   525  @param anchor Anchor position within the kernel. The default value \f$(-1,-1)\f$ means that the anchor
   526  is at the kernel center.
   527  @param delta Value added to the filtered results before storing them.
   528  @param borderType Pixel extrapolation method, see cv::BorderTypes
   529  @param borderValue border value in case of constant border type
   530  @sa  boxFilter, gaussianBlur, medianBlur
   531   */
   532  GAPI_EXPORTS GMat sepFilter(const GMat& src, int ddepth, const Mat& kernelX, const Mat& kernelY, const Point& anchor /*FIXME: = Point(-1,-1)*/,
   533                              const Scalar& delta /*FIXME = GScalar(0)*/, int borderType = BORDER_DEFAULT,
   534                              const Scalar& borderValue = Scalar(0));
   535  
   536  /** @brief Convolves an image with the kernel.
   537  
   538  The function applies an arbitrary linear filter to an image. When
   539  the aperture is partially outside the image, the function interpolates outlier pixel values
   540  according to the specified border mode.
   541  
   542  The function does actually compute correlation, not the convolution:
   543  
   544  \f[\texttt{dst} (x,y) =  \sum _{ \substack{0\leq x' < \texttt{kernel.cols}\\{0\leq y' < \texttt{kernel.rows}}}}  \texttt{kernel} (x',y')* \texttt{src} (x+x'- \texttt{anchor.x} ,y+y'- \texttt{anchor.y} )\f]
   545  
   546  That is, the kernel is not mirrored around the anchor point. If you need a real convolution, flip
   547  the kernel using flip and set the new anchor to `(kernel.cols - anchor.x - 1, kernel.rows -
   548  anchor.y - 1)`.
   549  
   550  Supported matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.
   551  Output image must have the same size and number of channels an input image.
   552  @note
   553   - Rounding to nearest even is procedeed if hardware supports it, if not - to nearest.
   554   - Function textual ID is "org.opencv.imgproc.filters.filter2D"
   555  
   556  @param src input image.
   557  @param ddepth desired depth of the destination image
   558  @param kernel convolution kernel (or rather a correlation kernel), a single-channel floating point
   559  matrix; if you want to apply different kernels to different channels, split the image into
   560  separate color planes using split and process them individually.
   561  @param anchor anchor of the kernel that indicates the relative position of a filtered point within
   562  the kernel; the anchor should lie within the kernel; default value (-1,-1) means that the anchor
   563  is at the kernel center.
   564  @param delta optional value added to the filtered pixels before storing them in dst.
   565  @param borderType pixel extrapolation method, see cv::BorderTypes
   566  @param borderValue border value in case of constant border type
   567  @sa  sepFilter
   568   */
   569  GAPI_EXPORTS GMat filter2D(const GMat& src, int ddepth, const Mat& kernel, const Point& anchor = Point(-1,-1), const Scalar& delta = Scalar(0),
   570                             int borderType = BORDER_DEFAULT, const Scalar& borderValue = Scalar(0));
   571  
   572  
   573  /** @brief Blurs an image using the box filter.
   574  
   575  The function smooths an image using the kernel:
   576  
   577  \f[\texttt{K} =  \alpha \begin{bmatrix} 1 & 1 & 1 &  \cdots & 1 & 1  \\ 1 & 1 & 1 &  \cdots & 1 & 1  \\ \hdotsfor{6} \\ 1 & 1 & 1 &  \cdots & 1 & 1 \end{bmatrix}\f]
   578  
   579  where
   580  
   581  \f[\alpha = \begin{cases} \frac{1}{\texttt{ksize.width*ksize.height}} & \texttt{when } \texttt{normalize=true}  \\1 & \texttt{otherwise} \end{cases}\f]
   582  
   583  Unnormalized box filter is useful for computing various integral characteristics over each pixel
   584  neighborhood, such as covariance matrices of image derivatives (used in dense optical flow
   585  algorithms, and so on). If you need to compute pixel sums over variable-size windows, use cv::integral.
   586  
   587  Supported input matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.
   588  Output image must have the same type, size, and number of channels as the input image.
   589  @note
   590   - Rounding to nearest even is procedeed if hardware supports it, if not - to nearest.
   591   - Function textual ID is "org.opencv.imgproc.filters.boxfilter"
   592  
   593  @param src Source image.
   594  @param dtype the output image depth (-1 to set the input image data type).
   595  @param ksize blurring kernel size.
   596  @param anchor Anchor position within the kernel. The default value \f$(-1,-1)\f$ means that the anchor
   597  is at the kernel center.
   598  @param normalize flag, specifying whether the kernel is normalized by its area or not.
   599  @param borderType Pixel extrapolation method, see cv::BorderTypes
   600  @param borderValue border value in case of constant border type
   601  @sa  sepFilter, gaussianBlur, medianBlur, integral
   602   */
   603  GAPI_EXPORTS GMat boxFilter(const GMat& src, int dtype, const Size& ksize, const Point& anchor = Point(-1,-1),
   604                              bool normalize = true, int borderType = BORDER_DEFAULT,
   605                              const Scalar& borderValue = Scalar(0));
   606  
   607  /** @brief Blurs an image using the normalized box filter.
   608  
   609  The function smooths an image using the kernel:
   610  
   611  \f[\texttt{K} =  \frac{1}{\texttt{ksize.width*ksize.height}} \begin{bmatrix} 1 & 1 & 1 &  \cdots & 1 & 1  \\ 1 & 1 & 1 &  \cdots & 1 & 1  \\ \hdotsfor{6} \\ 1 & 1 & 1 &  \cdots & 1 & 1  \\ \end{bmatrix}\f]
   612  
   613  The call `blur(src, ksize, anchor, borderType)` is equivalent to `boxFilter(src, src.type(), ksize, anchor,
   614  true, borderType)`.
   615  
   616  Supported input matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.
   617  Output image must have the same type, size, and number of channels as the input image.
   618  @note
   619   - Rounding to nearest even is procedeed if hardware supports it, if not - to nearest.
   620   - Function textual ID is "org.opencv.imgproc.filters.blur"
   621  
   622  @param src Source image.
   623  @param ksize blurring kernel size.
   624  @param anchor anchor point; default value Point(-1,-1) means that the anchor is at the kernel
   625  center.
   626  @param borderType border mode used to extrapolate pixels outside of the image, see cv::BorderTypes
   627  @param borderValue border value in case of constant border type
   628  @sa  boxFilter, bilateralFilter, GaussianBlur, medianBlur
   629   */
   630  GAPI_EXPORTS GMat blur(const GMat& src, const Size& ksize, const Point& anchor = Point(-1,-1),
   631                         int borderType = BORDER_DEFAULT, const Scalar& borderValue = Scalar(0));
   632  
   633  
   634  //GAPI_EXPORTS_W void blur( InputArray src, OutputArray dst,
   635   //                       Size ksize, Point anchor = Point(-1,-1),
   636   //                       int borderType = BORDER_DEFAULT );
   637  
   638  
   639  /** @brief Blurs an image using a Gaussian filter.
   640  
   641  The function filter2Ds the source image with the specified Gaussian kernel.
   642  Output image must have the same type and number of channels an input image.
   643  
   644  Supported input matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.
   645  Output image must have the same type, size, and number of channels as the input image.
   646  @note
   647   - Rounding to nearest even is procedeed if hardware supports it, if not - to nearest.
   648   - Function textual ID is "org.opencv.imgproc.filters.gaussianBlur"
   649  
   650  @param src input image;
   651  @param ksize Gaussian kernel size. ksize.width and ksize.height can differ but they both must be
   652  positive and odd. Or, they can be zero's and then they are computed from sigma.
   653  @param sigmaX Gaussian kernel standard deviation in X direction.
   654  @param sigmaY Gaussian kernel standard deviation in Y direction; if sigmaY is zero, it is set to be
   655  equal to sigmaX, if both sigmas are zeros, they are computed from ksize.width and ksize.height,
   656  respectively (see cv::getGaussianKernel for details); to fully control the result regardless of
   657  possible future modifications of all this semantics, it is recommended to specify all of ksize,
   658  sigmaX, and sigmaY.
   659  @param borderType pixel extrapolation method, see cv::BorderTypes
   660  @param borderValue border value in case of constant border type
   661  @sa  sepFilter, boxFilter, medianBlur
   662   */
   663  GAPI_EXPORTS GMat gaussianBlur(const GMat& src, const Size& ksize, double sigmaX, double sigmaY = 0,
   664                                 int borderType = BORDER_DEFAULT, const Scalar& borderValue = Scalar(0));
   665  
   666  /** @brief Blurs an image using the median filter.
   667  
   668  The function smoothes an image using the median filter with the \f$\texttt{ksize} \times
   669  \texttt{ksize}\f$ aperture. Each channel of a multi-channel image is processed independently.
   670  Output image must have the same type, size, and number of channels as the input image.
   671  @note
   672   - Rounding to nearest even is procedeed if hardware supports it, if not - to nearest.
   673  The median filter uses cv::BORDER_REPLICATE internally to cope with border pixels, see cv::BorderTypes
   674   - Function textual ID is "org.opencv.imgproc.filters.medianBlur"
   675  
   676  @param src input matrix (image)
   677  @param ksize aperture linear size; it must be odd and greater than 1, for example: 3, 5, 7 ...
   678  @sa  boxFilter, gaussianBlur
   679   */
   680  GAPI_EXPORTS_W GMat medianBlur(const GMat& src, int ksize);
   681  
   682  /** @brief Erodes an image by using a specific structuring element.
   683  
   684  The function erodes the source image using the specified structuring element that determines the
   685  shape of a pixel neighborhood over which the minimum is taken:
   686  
   687  \f[\texttt{dst} (x,y) =  \min _{(x',y'):  \, \texttt{element} (x',y') \ne0 } \texttt{src} (x+x',y+y')\f]
   688  
   689  Erosion can be applied several (iterations) times. In case of multi-channel images, each channel is processed independently.
   690  Supported input matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, and @ref CV_32FC1.
   691  Output image must have the same type, size, and number of channels as the input image.
   692  @note
   693   - Rounding to nearest even is procedeed if hardware supports it, if not - to nearest.
   694   - Function textual ID is "org.opencv.imgproc.filters.erode"
   695  
   696  @param src input image
   697  @param kernel structuring element used for erosion; if `element=Mat()`, a `3 x 3` rectangular
   698  structuring element is used. Kernel can be created using getStructuringElement.
   699  @param anchor position of the anchor within the element; default value (-1, -1) means that the
   700  anchor is at the element center.
   701  @param iterations number of times erosion is applied.
   702  @param borderType pixel extrapolation method, see cv::BorderTypes
   703  @param borderValue border value in case of a constant border
   704  @sa  dilate, morphologyEx
   705   */
   706  GAPI_EXPORTS GMat erode(const GMat& src, const Mat& kernel, const Point& anchor = Point(-1,-1), int iterations = 1,
   707                          int borderType = BORDER_CONSTANT,
   708                          const  Scalar& borderValue = morphologyDefaultBorderValue());
   709  
   710  /** @brief Erodes an image by using 3 by 3 rectangular structuring element.
   711  
   712  The function erodes the source image using the rectangular structuring element with rectangle center as an anchor.
   713  Erosion can be applied several (iterations) times. In case of multi-channel images, each channel is processed independently.
   714  Supported input matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, and @ref CV_32FC1.
   715  Output image must have the same type, size, and number of channels as the input image.
   716  @note
   717   - Rounding to nearest even is procedeed if hardware supports it, if not - to nearest.
   718   - Function textual ID is "org.opencv.imgproc.filters.erode"
   719  
   720  @param src input image
   721  @param iterations number of times erosion is applied.
   722  @param borderType pixel extrapolation method, see cv::BorderTypes
   723  @param borderValue border value in case of a constant border
   724  @sa  erode, dilate3x3
   725   */
   726  GAPI_EXPORTS GMat erode3x3(const GMat& src, int iterations = 1,
   727                             int borderType = BORDER_CONSTANT,
   728                             const  Scalar& borderValue = morphologyDefaultBorderValue());
   729  
   730  /** @brief Dilates an image by using a specific structuring element.
   731  
   732  The function dilates the source image using the specified structuring element that determines the
   733  shape of a pixel neighborhood over which the maximum is taken:
   734  \f[\texttt{dst} (x,y) =  \max _{(x',y'):  \, \texttt{element} (x',y') \ne0 } \texttt{src} (x+x',y+y')\f]
   735  
   736  Dilation can be applied several (iterations) times. In case of multi-channel images, each channel is processed independently.
   737  Supported input matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, and @ref CV_32FC1.
   738  Output image must have the same type, size, and number of channels as the input image.
   739  @note
   740   - Rounding to nearest even is procedeed if hardware supports it, if not - to nearest.
   741   - Function textual ID is "org.opencv.imgproc.filters.dilate"
   742  
   743  @param src input image.
   744  @param kernel structuring element used for dilation; if elemenat=Mat(), a 3 x 3 rectangular
   745  structuring element is used. Kernel can be created using getStructuringElement
   746  @param anchor position of the anchor within the element; default value (-1, -1) means that the
   747  anchor is at the element center.
   748  @param iterations number of times dilation is applied.
   749  @param borderType pixel extrapolation method, see cv::BorderTypes
   750  @param borderValue border value in case of a constant border
   751  @sa  erode, morphologyEx, getStructuringElement
   752   */
   753  GAPI_EXPORTS GMat dilate(const GMat& src, const Mat& kernel, const Point& anchor = Point(-1,-1), int iterations = 1,
   754                           int borderType = BORDER_CONSTANT,
   755                           const  Scalar& borderValue = morphologyDefaultBorderValue());
   756  
   757  /** @brief Dilates an image by using 3 by 3 rectangular structuring element.
   758  
   759  The function dilates the source image using the specified structuring element that determines the
   760  shape of a pixel neighborhood over which the maximum is taken:
   761  \f[\texttt{dst} (x,y) =  \max _{(x',y'):  \, \texttt{element} (x',y') \ne0 } \texttt{src} (x+x',y+y')\f]
   762  
   763  Dilation can be applied several (iterations) times. In case of multi-channel images, each channel is processed independently.
   764  Supported input matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, and @ref CV_32FC1.
   765  Output image must have the same type, size, and number of channels as the input image.
   766  @note
   767   - Rounding to nearest even is procedeed if hardware supports it, if not - to nearest.
   768   - Function textual ID is "org.opencv.imgproc.filters.dilate"
   769  
   770  @param src input image.
   771  @param iterations number of times dilation is applied.
   772  @param borderType pixel extrapolation method, see cv::BorderTypes
   773  @param borderValue border value in case of a constant border
   774  @sa  dilate, erode3x3
   775   */
   776  
   777  GAPI_EXPORTS GMat dilate3x3(const GMat& src, int iterations = 1,
   778                              int borderType = BORDER_CONSTANT,
   779                              const  Scalar& borderValue = morphologyDefaultBorderValue());
   780  
   781  /** @brief Performs advanced morphological transformations.
   782  
   783  The function can perform advanced morphological transformations using an erosion and dilation as
   784  basic operations.
   785  
   786  Any of the operations can be done in-place. In case of multi-channel images, each channel is
   787  processed independently.
   788  
   789  @note
   790   - Function textual ID is "org.opencv.imgproc.filters.morphologyEx"
   791   - The number of iterations is the number of times erosion or dilatation operation will be
   792  applied. For instance, an opening operation (#MORPH_OPEN) with two iterations is equivalent to
   793  apply successively: erode -> erode -> dilate -> dilate
   794  (and not erode -> dilate -> erode -> dilate).
   795  
   796  @param src Input image.
   797  @param op Type of a morphological operation, see #MorphTypes
   798  @param kernel Structuring element. It can be created using #getStructuringElement.
   799  @param anchor Anchor position within the element. Both negative values mean that the anchor is at
   800  the kernel center.
   801  @param iterations Number of times erosion and dilation are applied.
   802  @param borderType Pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported.
   803  @param borderValue Border value in case of a constant border. The default value has a special
   804  meaning.
   805  @sa  dilate, erode, getStructuringElement
   806   */
   807  GAPI_EXPORTS GMat morphologyEx(const GMat &src, const MorphTypes op, const Mat &kernel,
   808                                 const Point       &anchor      = Point(-1,-1),
   809                                 const int          iterations  = 1,
   810                                 const BorderTypes  borderType  = BORDER_CONSTANT,
   811                                 const Scalar      &borderValue = morphologyDefaultBorderValue());
   812  
   813  /** @brief Calculates the first, second, third, or mixed image derivatives using an extended Sobel operator.
   814  
   815  In all cases except one, the \f$\texttt{ksize} \times \texttt{ksize}\f$ separable kernel is used to
   816  calculate the derivative. When \f$\texttt{ksize = 1}\f$, the \f$3 \times 1\f$ or \f$1 \times 3\f$
   817  kernel is used (that is, no Gaussian smoothing is done). `ksize = 1` can only be used for the first
   818  or the second x- or y- derivatives.
   819  
   820  There is also the special value `ksize = FILTER_SCHARR (-1)` that corresponds to the \f$3\times3\f$ Scharr
   821  filter that may give more accurate results than the \f$3\times3\f$ Sobel. The Scharr aperture is
   822  
   823  \f[\vecthreethree{-3}{0}{3}{-10}{0}{10}{-3}{0}{3}\f]
   824  
   825  for the x-derivative, or transposed for the y-derivative.
   826  
   827  The function calculates an image derivative by convolving the image with the appropriate kernel:
   828  
   829  \f[\texttt{dst} =  \frac{\partial^{xorder+yorder} \texttt{src}}{\partial x^{xorder} \partial y^{yorder}}\f]
   830  
   831  The Sobel operators combine Gaussian smoothing and differentiation, so the result is more or less
   832  resistant to the noise. Most often, the function is called with ( xorder = 1, yorder = 0, ksize = 3)
   833  or ( xorder = 0, yorder = 1, ksize = 3) to calculate the first x- or y- image derivative. The first
   834  case corresponds to a kernel of:
   835  
   836  \f[\vecthreethree{-1}{0}{1}{-2}{0}{2}{-1}{0}{1}\f]
   837  
   838  The second case corresponds to a kernel of:
   839  
   840  \f[\vecthreethree{-1}{-2}{-1}{0}{0}{0}{1}{2}{1}\f]
   841  
   842  @note
   843   - Rounding to nearest even is procedeed if hardware supports it, if not - to nearest.
   844   - Function textual ID is "org.opencv.imgproc.filters.sobel"
   845  
   846  @param src input image.
   847  @param ddepth output image depth, see @ref filter_depths "combinations"; in the case of
   848      8-bit input images it will result in truncated derivatives.
   849  @param dx order of the derivative x.
   850  @param dy order of the derivative y.
   851  @param ksize size of the extended Sobel kernel; it must be odd.
   852  @param scale optional scale factor for the computed derivative values; by default, no scaling is
   853  applied (see cv::getDerivKernels for details).
   854  @param delta optional delta value that is added to the results prior to storing them in dst.
   855  @param borderType pixel extrapolation method, see cv::BorderTypes
   856  @param borderValue border value in case of constant border type
   857  @sa filter2D, gaussianBlur, cartToPolar
   858   */
   859  GAPI_EXPORTS GMat Sobel(const GMat& src, int ddepth, int dx, int dy, int ksize = 3,
   860                          double scale = 1, double delta = 0,
   861                          int borderType = BORDER_DEFAULT,
   862                          const Scalar& borderValue = Scalar(0));
   863  
   864  /** @brief Calculates the first, second, third, or mixed image derivatives using an extended Sobel operator.
   865  
   866  In all cases except one, the \f$\texttt{ksize} \times \texttt{ksize}\f$ separable kernel is used to
   867  calculate the derivative. When \f$\texttt{ksize = 1}\f$, the \f$3 \times 1\f$ or \f$1 \times 3\f$
   868  kernel is used (that is, no Gaussian smoothing is done). `ksize = 1` can only be used for the first
   869  or the second x- or y- derivatives.
   870  
   871  There is also the special value `ksize = FILTER_SCHARR (-1)` that corresponds to the \f$3\times3\f$ Scharr
   872  filter that may give more accurate results than the \f$3\times3\f$ Sobel. The Scharr aperture is
   873  
   874  \f[\vecthreethree{-3}{0}{3}{-10}{0}{10}{-3}{0}{3}\f]
   875  
   876  for the x-derivative, or transposed for the y-derivative.
   877  
   878  The function calculates an image derivative by convolving the image with the appropriate kernel:
   879  
   880  \f[\texttt{dst} =  \frac{\partial^{xorder+yorder} \texttt{src}}{\partial x^{xorder} \partial y^{yorder}}\f]
   881  
   882  The Sobel operators combine Gaussian smoothing and differentiation, so the result is more or less
   883  resistant to the noise. Most often, the function is called with ( xorder = 1, yorder = 0, ksize = 3)
   884  or ( xorder = 0, yorder = 1, ksize = 3) to calculate the first x- or y- image derivative. The first
   885  case corresponds to a kernel of:
   886  
   887  \f[\vecthreethree{-1}{0}{1}{-2}{0}{2}{-1}{0}{1}\f]
   888  
   889  The second case corresponds to a kernel of:
   890  
   891  \f[\vecthreethree{-1}{-2}{-1}{0}{0}{0}{1}{2}{1}\f]
   892  
   893  @note
   894   - First returned matrix correspons to dx derivative while the second one to dy.
   895   - Rounding to nearest even is procedeed if hardware supports it, if not - to nearest.
   896   - Function textual ID is "org.opencv.imgproc.filters.sobelxy"
   897  
   898  @param src input image.
   899  @param ddepth output image depth, see @ref filter_depths "combinations"; in the case of
   900      8-bit input images it will result in truncated derivatives.
   901  @param order order of the derivatives.
   902  @param ksize size of the extended Sobel kernel; it must be odd.
   903  @param scale optional scale factor for the computed derivative values; by default, no scaling is
   904  applied (see cv::getDerivKernels for details).
   905  @param delta optional delta value that is added to the results prior to storing them in dst.
   906  @param borderType pixel extrapolation method, see cv::BorderTypes
   907  @param borderValue border value in case of constant border type
   908  @sa filter2D, gaussianBlur, cartToPolar
   909   */
   910  GAPI_EXPORTS std::tuple<GMat, GMat> SobelXY(const GMat& src, int ddepth, int order, int ksize = 3,
   911                          double scale = 1, double delta = 0,
   912                          int borderType = BORDER_DEFAULT,
   913                          const Scalar& borderValue = Scalar(0));
   914  
   915  /** @brief Calculates the Laplacian of an image.
   916  
   917  The function calculates the Laplacian of the source image by adding up the second x and y
   918  derivatives calculated using the Sobel operator:
   919  
   920  \f[\texttt{dst} =  \Delta \texttt{src} =  \frac{\partial^2 \texttt{src}}{\partial x^2} +  \frac{\partial^2 \texttt{src}}{\partial y^2}\f]
   921  
   922  This is done when `ksize > 1`. When `ksize == 1`, the Laplacian is computed by filtering the image
   923  with the following \f$3 \times 3\f$ aperture:
   924  
   925  \f[\vecthreethree {0}{1}{0}{1}{-4}{1}{0}{1}{0}\f]
   926  
   927  @note Function textual ID is "org.opencv.imgproc.filters.laplacian"
   928  
   929  @param src Source image.
   930  @param ddepth Desired depth of the destination image.
   931  @param ksize Aperture size used to compute the second-derivative filters. See #getDerivKernels for
   932  details. The size must be positive and odd.
   933  @param scale Optional scale factor for the computed Laplacian values. By default, no scaling is
   934  applied. See #getDerivKernels for details.
   935  @param delta Optional delta value that is added to the results prior to storing them in dst .
   936  @param borderType Pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported.
   937  @return Destination image of the same size and the same number of channels as src.
   938  @sa  Sobel, Scharr
   939   */
   940  GAPI_EXPORTS GMat Laplacian(const GMat& src, int ddepth, int ksize = 1,
   941                              double scale = 1, double delta = 0, int borderType = BORDER_DEFAULT);
   942  
   943  /** @brief Applies the bilateral filter to an image.
   944  
   945  The function applies bilateral filtering to the input image, as described in
   946  http://www.dai.ed.ac.uk/CVonline/LOCAL_COPIES/MANDUCHI1/Bilateral_Filtering.html
   947  bilateralFilter can reduce unwanted noise very well while keeping edges fairly sharp. However, it is
   948  very slow compared to most filters.
   949  
   950  _Sigma values_: For simplicity, you can set the 2 sigma values to be the same. If they are small (\<
   951  10), the filter will not have much effect, whereas if they are large (\> 150), they will have a very
   952  strong effect, making the image look "cartoonish".
   953  
   954  _Filter size_: Large filters (d \> 5) are very slow, so it is recommended to use d=5 for real-time
   955  applications, and perhaps d=9 for offline applications that need heavy noise filtering.
   956  
   957  This filter does not work inplace.
   958  
   959  @note Function textual ID is "org.opencv.imgproc.filters.bilateralfilter"
   960  
   961  @param src Source 8-bit or floating-point, 1-channel or 3-channel image.
   962  @param d Diameter of each pixel neighborhood that is used during filtering. If it is non-positive,
   963  it is computed from sigmaSpace.
   964  @param sigmaColor Filter sigma in the color space. A larger value of the parameter means that
   965  farther colors within the pixel neighborhood (see sigmaSpace) will be mixed together, resulting
   966  in larger areas of semi-equal color.
   967  @param sigmaSpace Filter sigma in the coordinate space. A larger value of the parameter means that
   968  farther pixels will influence each other as long as their colors are close enough (see sigmaColor
   969  ). When d\>0, it specifies the neighborhood size regardless of sigmaSpace. Otherwise, d is
   970  proportional to sigmaSpace.
   971  @param borderType border mode used to extrapolate pixels outside of the image, see #BorderTypes
   972  @return Destination image of the same size and type as src.
   973   */
   974  GAPI_EXPORTS GMat bilateralFilter(const GMat& src, int d, double sigmaColor, double sigmaSpace,
   975                                    int borderType = BORDER_DEFAULT);
   976  
   977  //! @} gapi_filters
   978  
   979  //! @addtogroup gapi_feature
   980  //! @{
   981  /** @brief Finds edges in an image using the Canny algorithm.
   982  
   983  The function finds edges in the input image and marks them in the output map edges using the
   984  Canny algorithm. The smallest value between threshold1 and threshold2 is used for edge linking. The
   985  largest value is used to find initial segments of strong edges. See
   986  <http://en.wikipedia.org/wiki/Canny_edge_detector>
   987  
   988  @note Function textual ID is "org.opencv.imgproc.feature.canny"
   989  
   990  @param image 8-bit input image.
   991  @param threshold1 first threshold for the hysteresis procedure.
   992  @param threshold2 second threshold for the hysteresis procedure.
   993  @param apertureSize aperture size for the Sobel operator.
   994  @param L2gradient a flag, indicating whether a more accurate \f$L_2\f$ norm
   995  \f$=\sqrt{(dI/dx)^2 + (dI/dy)^2}\f$ should be used to calculate the image gradient magnitude (
   996  L2gradient=true ), or whether the default \f$L_1\f$ norm \f$=|dI/dx|+|dI/dy|\f$ is enough (
   997  L2gradient=false ).
   998   */
   999  GAPI_EXPORTS GMat Canny(const GMat& image, double threshold1, double threshold2,
  1000                          int apertureSize = 3, bool L2gradient = false);
  1001  
  1002  /** @brief Determines strong corners on an image.
  1003  
  1004  The function finds the most prominent corners in the image or in the specified image region, as
  1005  described in @cite Shi94
  1006  
  1007  -   Function calculates the corner quality measure at every source image pixel using the
  1008      #cornerMinEigenVal or #cornerHarris .
  1009  -   Function performs a non-maximum suppression (the local maximums in *3 x 3* neighborhood are
  1010      retained).
  1011  -   The corners with the minimal eigenvalue less than
  1012      \f$\texttt{qualityLevel} \cdot \max_{x,y} qualityMeasureMap(x,y)\f$ are rejected.
  1013  -   The remaining corners are sorted by the quality measure in the descending order.
  1014  -   Function throws away each corner for which there is a stronger corner at a distance less than
  1015      maxDistance.
  1016  
  1017  The function can be used to initialize a point-based tracker of an object.
  1018  
  1019  @note
  1020   - If the function is called with different values A and B of the parameter qualityLevel , and
  1021  A \> B, the vector of returned corners with qualityLevel=A will be the prefix of the output vector
  1022  with qualityLevel=B .
  1023   - Function textual ID is "org.opencv.imgproc.feature.goodFeaturesToTrack"
  1024  
  1025  @param image Input 8-bit or floating-point 32-bit, single-channel image.
  1026  @param maxCorners Maximum number of corners to return. If there are more corners than are found,
  1027  the strongest of them is returned. `maxCorners <= 0` implies that no limit on the maximum is set
  1028  and all detected corners are returned.
  1029  @param qualityLevel Parameter characterizing the minimal accepted quality of image corners. The
  1030  parameter value is multiplied by the best corner quality measure, which is the minimal eigenvalue
  1031  (see #cornerMinEigenVal ) or the Harris function response (see #cornerHarris ). The corners with the
  1032  quality measure less than the product are rejected. For example, if the best corner has the
  1033  quality measure = 1500, and the qualityLevel=0.01 , then all the corners with the quality measure
  1034  less than 15 are rejected.
  1035  @param minDistance Minimum possible Euclidean distance between the returned corners.
  1036  @param mask Optional region of interest. If the image is not empty (it needs to have the type
  1037  CV_8UC1 and the same size as image ), it specifies the region in which the corners are detected.
  1038  @param blockSize Size of an average block for computing a derivative covariation matrix over each
  1039  pixel neighborhood. See cornerEigenValsAndVecs .
  1040  @param useHarrisDetector Parameter indicating whether to use a Harris detector (see #cornerHarris)
  1041  or #cornerMinEigenVal.
  1042  @param k Free parameter of the Harris detector.
  1043  
  1044  @return vector of detected corners.
  1045   */
  1046  GAPI_EXPORTS_W GArray<Point2f> goodFeaturesToTrack(const GMat  &image,
  1047                                                         int    maxCorners,
  1048                                                         double qualityLevel,
  1049                                                         double minDistance,
  1050                                                   const Mat   &mask = Mat(),
  1051                                                         int    blockSize = 3,
  1052                                                         bool   useHarrisDetector = false,
  1053                                                         double k = 0.04);
  1054  
  1055  /** @brief Equalizes the histogram of a grayscale image.
  1056  
  1057  //! @} gapi_feature
  1058  
  1059  The function equalizes the histogram of the input image using the following algorithm:
  1060  
  1061  - Calculate the histogram \f$H\f$ for src .
  1062  - Normalize the histogram so that the sum of histogram bins is 255.
  1063  - Compute the integral of the histogram:
  1064  \f[H'_i =  \sum _{0  \le j < i} H(j)\f]
  1065  - Transform the image using \f$H'\f$ as a look-up table: \f$\texttt{dst}(x,y) = H'(\texttt{src}(x,y))\f$
  1066  
  1067  The algorithm normalizes the brightness and increases the contrast of the image.
  1068  @note
  1069   - The returned image is of the same size and type as input.
  1070   - Function textual ID is "org.opencv.imgproc.equalizeHist"
  1071  
  1072  @param src Source 8-bit single channel image.
  1073   */
  1074  GAPI_EXPORTS GMat equalizeHist(const GMat& src);
  1075  
  1076  //! @addtogroup gapi_shape
  1077  //! @{
  1078  /** @brief Finds contours in a binary image.
  1079  
  1080  The function retrieves contours from the binary image using the algorithm @cite Suzuki85 .
  1081  The contours are a useful tool for shape analysis and object detection and recognition.
  1082  See squares.cpp in the OpenCV sample directory.
  1083  
  1084  @note Function textual ID is "org.opencv.imgproc.shape.findContours"
  1085  
  1086  @param src Input gray-scale image @ref CV_8UC1. Non-zero pixels are treated as 1's. Zero
  1087  pixels remain 0's, so the image is treated as binary . You can use #compare, #inRange, #threshold ,
  1088  #adaptiveThreshold, #Canny, and others to create a binary image out of a grayscale or color one.
  1089  If mode equals to #RETR_CCOMP, the input can also be a 32-bit integer
  1090  image of labels ( @ref CV_32SC1 ). If #RETR_FLOODFILL then @ref CV_32SC1 is supported only.
  1091  @param mode Contour retrieval mode, see #RetrievalModes
  1092  @param method Contour approximation method, see #ContourApproximationModes
  1093  @param offset Optional offset by which every contour point is shifted. This is useful if the
  1094  contours are extracted from the image ROI and then they should be analyzed in the whole image
  1095  context.
  1096  
  1097  @return GArray of detected contours. Each contour is stored as a GArray of points.
  1098   */
  1099  GAPI_EXPORTS GArray<GArray<Point>>
  1100  findContours(const GMat &src, const RetrievalModes mode, const ContourApproximationModes method,
  1101               const GOpaque<Point> &offset);
  1102  
  1103  // FIXME oc: make default value offset = Point()
  1104  /** @overload
  1105  @note Function textual ID is "org.opencv.imgproc.shape.findContoursNoOffset"
  1106   */
  1107  GAPI_EXPORTS GArray<GArray<Point>>
  1108  findContours(const GMat &src, const RetrievalModes mode, const ContourApproximationModes method);
  1109  
  1110  /** @brief Finds contours and their hierarchy in a binary image.
  1111  
  1112  The function retrieves contours from the binary image using the algorithm @cite Suzuki85
  1113  and calculates their hierarchy.
  1114  The contours are a useful tool for shape analysis and object detection and recognition.
  1115  See squares.cpp in the OpenCV sample directory.
  1116  
  1117  @note Function textual ID is "org.opencv.imgproc.shape.findContoursH"
  1118  
  1119  @param src Input gray-scale image @ref CV_8UC1. Non-zero pixels are treated as 1's. Zero
  1120  pixels remain 0's, so the image is treated as binary . You can use #compare, #inRange, #threshold ,
  1121  #adaptiveThreshold, #Canny, and others to create a binary image out of a grayscale or color one.
  1122  If mode equals to #RETR_CCOMP, the input can also be a 32-bit integer
  1123  image of labels ( @ref CV_32SC1 ). If #RETR_FLOODFILL -- @ref CV_32SC1 supports only.
  1124  @param mode Contour retrieval mode, see #RetrievalModes
  1125  @param method Contour approximation method, see #ContourApproximationModes
  1126  @param offset Optional offset by which every contour point is shifted. This is useful if the
  1127  contours are extracted from the image ROI and then they should be analyzed in the whole image
  1128  context.
  1129  
  1130  @return
  1131   - GArray of detected contours. Each contour is stored as a GArray of points.
  1132   - Optional output GArray of cv::Vec4i, containing information about the image topology.
  1133  It has as many elements as the number of contours. For each i-th contour contours[i], the elements
  1134  hierarchy[i][0] , hierarchy[i][1] , hierarchy[i][2] , and hierarchy[i][3] are set to 0-based
  1135  indices in contours of the next and previous contours at the same hierarchical level, the first
  1136  child contour and the parent contour, respectively. If for the contour i there are no next,
  1137  previous, parent, or nested contours, the corresponding elements of hierarchy[i] will be negative.
  1138   */
  1139  GAPI_EXPORTS std::tuple<GArray<GArray<Point>>,GArray<Vec4i>>
  1140  findContoursH(const GMat &src, const RetrievalModes mode, const ContourApproximationModes method,
  1141                const GOpaque<Point> &offset);
  1142  
  1143  // FIXME oc: make default value offset = Point()
  1144  /** @overload
  1145  @note Function textual ID is "org.opencv.imgproc.shape.findContoursHNoOffset"
  1146   */
  1147  GAPI_EXPORTS std::tuple<GArray<GArray<Point>>,GArray<Vec4i>>
  1148  findContoursH(const GMat &src, const RetrievalModes mode, const ContourApproximationModes method);
  1149  
  1150  /** @brief Calculates the up-right bounding rectangle of a point set or non-zero pixels
  1151  of gray-scale image.
  1152  
  1153  The function calculates and returns the minimal up-right bounding rectangle for the specified
  1154  point set or non-zero pixels of gray-scale image.
  1155  
  1156  @note
  1157   - Function textual ID is "org.opencv.imgproc.shape.boundingRectMat"
  1158   - In case of a 2D points' set given, Mat should be 2-dimensional, have a single row or column
  1159  if there are 2 channels, or have 2 columns if there is a single channel. Mat should have either
  1160  @ref CV_32S or @ref CV_32F depth
  1161  
  1162  @param src Input gray-scale image @ref CV_8UC1; or input set of @ref CV_32S or @ref CV_32F
  1163  2D points stored in Mat.
  1164   */
  1165  GAPI_EXPORTS_W GOpaque<Rect> boundingRect(const GMat& src);
  1166  
  1167  /** @overload
  1168  
  1169  Calculates the up-right bounding rectangle of a point set.
  1170  
  1171  @note Function textual ID is "org.opencv.imgproc.shape.boundingRectVector32S"
  1172  
  1173  @param src Input 2D point set, stored in std::vector<cv::Point2i>.
  1174   */
  1175  GAPI_EXPORTS_W GOpaque<Rect> boundingRect(const GArray<Point2i>& src);
  1176  
  1177  /** @overload
  1178  
  1179  Calculates the up-right bounding rectangle of a point set.
  1180  
  1181  @note Function textual ID is "org.opencv.imgproc.shape.boundingRectVector32F"
  1182  
  1183  @param src Input 2D point set, stored in std::vector<cv::Point2f>.
  1184   */
  1185  GAPI_EXPORTS GOpaque<Rect> boundingRect(const GArray<Point2f>& src);
  1186  
  1187  /** @brief Fits a line to a 2D point set.
  1188  
  1189  The function fits a line to a 2D point set by minimizing \f$\sum_i \rho(r_i)\f$ where
  1190  \f$r_i\f$ is a distance between the \f$i^{th}\f$ point, the line and \f$\rho(r)\f$ is a distance
  1191  function, one of the following:
  1192  -  DIST_L2
  1193  \f[\rho (r) = r^2/2  \quad \text{(the simplest and the fastest least-squares method)}\f]
  1194  - DIST_L1
  1195  \f[\rho (r) = r\f]
  1196  - DIST_L12
  1197  \f[\rho (r) = 2  \cdot ( \sqrt{1 + \frac{r^2}{2}} - 1)\f]
  1198  - DIST_FAIR
  1199  \f[\rho \left (r \right ) = C^2  \cdot \left (  \frac{r}{C} -  \log{\left(1 + \frac{r}{C}\right)} \right )  \quad \text{where} \quad C=1.3998\f]
  1200  - DIST_WELSCH
  1201  \f[\rho \left (r \right ) =  \frac{C^2}{2} \cdot \left ( 1 -  \exp{\left(-\left(\frac{r}{C}\right)^2\right)} \right )  \quad \text{where} \quad C=2.9846\f]
  1202  - DIST_HUBER
  1203  \f[\rho (r) =  \fork{r^2/2}{if \(r < C\)}{C \cdot (r-C/2)}{otherwise} \quad \text{where} \quad C=1.345\f]
  1204  
  1205  The algorithm is based on the M-estimator ( <http://en.wikipedia.org/wiki/M-estimator> ) technique
  1206  that iteratively fits the line using the weighted least-squares algorithm. After each iteration the
  1207  weights \f$w_i\f$ are adjusted to be inversely proportional to \f$\rho(r_i)\f$ .
  1208  
  1209  @note
  1210   - Function textual ID is "org.opencv.imgproc.shape.fitLine2DMat"
  1211   - In case of an N-dimentional points' set given, Mat should be 2-dimensional, have a single row
  1212  or column if there are N channels, or have N columns if there is a single channel.
  1213  
  1214  @param src Input set of 2D points stored in one of possible containers: Mat,
  1215  std::vector<cv::Point2i>, std::vector<cv::Point2f>, std::vector<cv::Point2d>.
  1216  @param distType Distance used by the M-estimator, see #DistanceTypes. @ref DIST_USER
  1217  and @ref DIST_C are not suppored.
  1218  @param param Numerical parameter ( C ) for some types of distances. If it is 0, an optimal value
  1219  is chosen.
  1220  @param reps Sufficient accuracy for the radius (distance between the coordinate origin and the
  1221  line). 1.0 would be a good default value for reps. If it is 0, a default value is chosen.
  1222  @param aeps Sufficient accuracy for the angle. 0.01 would be a good default value for aeps.
  1223  If it is 0, a default value is chosen.
  1224  
  1225  @return Output line parameters: a vector of 4 elements (like Vec4f) - (vx, vy, x0, y0),
  1226  where (vx, vy) is a normalized vector collinear to the line and (x0, y0) is a point on the line.
  1227   */
  1228  GAPI_EXPORTS GOpaque<Vec4f> fitLine2D(const GMat& src, const DistanceTypes distType,
  1229                                        const double param = 0., const double reps = 0.,
  1230                                        const double aeps = 0.);
  1231  
  1232  /** @overload
  1233  
  1234  @note Function textual ID is "org.opencv.imgproc.shape.fitLine2DVector32S"
  1235  
  1236   */
  1237  GAPI_EXPORTS GOpaque<Vec4f> fitLine2D(const GArray<Point2i>& src, const DistanceTypes distType,
  1238                                        const double param = 0., const double reps = 0.,
  1239                                        const double aeps = 0.);
  1240  
  1241  /** @overload
  1242  
  1243  @note Function textual ID is "org.opencv.imgproc.shape.fitLine2DVector32F"
  1244  
  1245   */
  1246  GAPI_EXPORTS GOpaque<Vec4f> fitLine2D(const GArray<Point2f>& src, const DistanceTypes distType,
  1247                                        const double param = 0., const double reps = 0.,
  1248                                        const double aeps = 0.);
  1249  
  1250  /** @overload
  1251  
  1252  @note Function textual ID is "org.opencv.imgproc.shape.fitLine2DVector64F"
  1253  
  1254   */
  1255  GAPI_EXPORTS GOpaque<Vec4f> fitLine2D(const GArray<Point2d>& src, const DistanceTypes distType,
  1256                                        const double param = 0., const double reps = 0.,
  1257                                        const double aeps = 0.);
  1258  
  1259  /** @brief Fits a line to a 3D point set.
  1260  
  1261  The function fits a line to a 3D point set by minimizing \f$\sum_i \rho(r_i)\f$ where
  1262  \f$r_i\f$ is a distance between the \f$i^{th}\f$ point, the line and \f$\rho(r)\f$ is a distance
  1263  function, one of the following:
  1264  -  DIST_L2
  1265  \f[\rho (r) = r^2/2  \quad \text{(the simplest and the fastest least-squares method)}\f]
  1266  - DIST_L1
  1267  \f[\rho (r) = r\f]
  1268  - DIST_L12
  1269  \f[\rho (r) = 2  \cdot ( \sqrt{1 + \frac{r^2}{2}} - 1)\f]
  1270  - DIST_FAIR
  1271  \f[\rho \left (r \right ) = C^2  \cdot \left (  \frac{r}{C} -  \log{\left(1 + \frac{r}{C}\right)} \right )  \quad \text{where} \quad C=1.3998\f]
  1272  - DIST_WELSCH
  1273  \f[\rho \left (r \right ) =  \frac{C^2}{2} \cdot \left ( 1 -  \exp{\left(-\left(\frac{r}{C}\right)^2\right)} \right )  \quad \text{where} \quad C=2.9846\f]
  1274  - DIST_HUBER
  1275  \f[\rho (r) =  \fork{r^2/2}{if \(r < C\)}{C \cdot (r-C/2)}{otherwise} \quad \text{where} \quad C=1.345\f]
  1276  
  1277  The algorithm is based on the M-estimator ( <http://en.wikipedia.org/wiki/M-estimator> ) technique
  1278  that iteratively fits the line using the weighted least-squares algorithm. After each iteration the
  1279  weights \f$w_i\f$ are adjusted to be inversely proportional to \f$\rho(r_i)\f$ .
  1280  
  1281  @note
  1282   - Function textual ID is "org.opencv.imgproc.shape.fitLine3DMat"
  1283   - In case of an N-dimentional points' set given, Mat should be 2-dimensional, have a single row
  1284  or column if there are N channels, or have N columns if there is a single channel.
  1285  
  1286  @param src Input set of 3D points stored in one of possible containers: Mat,
  1287  std::vector<cv::Point3i>, std::vector<cv::Point3f>, std::vector<cv::Point3d>.
  1288  @param distType Distance used by the M-estimator, see #DistanceTypes. @ref DIST_USER
  1289  and @ref DIST_C are not suppored.
  1290  @param param Numerical parameter ( C ) for some types of distances. If it is 0, an optimal value
  1291  is chosen.
  1292  @param reps Sufficient accuracy for the radius (distance between the coordinate origin and the
  1293  line). 1.0 would be a good default value for reps. If it is 0, a default value is chosen.
  1294  @param aeps Sufficient accuracy for the angle. 0.01 would be a good default value for aeps.
  1295  If it is 0, a default value is chosen.
  1296  
  1297  @return Output line parameters: a vector of 6 elements (like Vec6f) - (vx, vy, vz, x0, y0, z0),
  1298  where (vx, vy, vz) is a normalized vector collinear to the line and (x0, y0, z0) is a point on
  1299  the line.
  1300   */
  1301  GAPI_EXPORTS GOpaque<Vec6f> fitLine3D(const GMat& src, const DistanceTypes distType,
  1302                                        const double param = 0., const double reps = 0.,
  1303                                        const double aeps = 0.);
  1304  
  1305  /** @overload
  1306  
  1307  @note Function textual ID is "org.opencv.imgproc.shape.fitLine3DVector32S"
  1308  
  1309   */
  1310  GAPI_EXPORTS GOpaque<Vec6f> fitLine3D(const GArray<Point3i>& src, const DistanceTypes distType,
  1311                                        const double param = 0., const double reps = 0.,
  1312                                        const double aeps = 0.);
  1313  
  1314  /** @overload
  1315  
  1316  @note Function textual ID is "org.opencv.imgproc.shape.fitLine3DVector32F"
  1317  
  1318   */
  1319  GAPI_EXPORTS GOpaque<Vec6f> fitLine3D(const GArray<Point3f>& src, const DistanceTypes distType,
  1320                                        const double param = 0., const double reps = 0.,
  1321                                        const double aeps = 0.);
  1322  
  1323  /** @overload
  1324  
  1325  @note Function textual ID is "org.opencv.imgproc.shape.fitLine3DVector64F"
  1326  
  1327   */
  1328  GAPI_EXPORTS GOpaque<Vec6f> fitLine3D(const GArray<Point3d>& src, const DistanceTypes distType,
  1329                                        const double param = 0., const double reps = 0.,
  1330                                        const double aeps = 0.);
  1331  
  1332  //! @} gapi_shape
  1333  
  1334  //! @addtogroup gapi_colorconvert
  1335  //! @{
  1336  /** @brief Converts an image from BGR color space to RGB color space.
  1337  
  1338  The function converts an input image from BGR color space to RGB.
  1339  The conventional ranges for B, G, and R channel values are 0 to 255.
  1340  
  1341  Output image is 8-bit unsigned 3-channel image @ref CV_8UC3.
  1342  
  1343  @note Function textual ID is "org.opencv.imgproc.colorconvert.bgr2rgb"
  1344  
  1345  @param src input image: 8-bit unsigned 3-channel image @ref CV_8UC3.
  1346  @sa RGB2BGR
  1347  */
  1348  GAPI_EXPORTS_W GMat BGR2RGB(const GMat& src);
  1349  
  1350  /** @brief Converts an image from RGB color space to gray-scaled.
  1351  
  1352  The conventional ranges for R, G, and B channel values are 0 to 255.
  1353  Resulting gray color value computed as
  1354  \f[\texttt{dst} (I)= \texttt{0.299} * \texttt{src}(I).R + \texttt{0.587} * \texttt{src}(I).G  + \texttt{0.114} * \texttt{src}(I).B \f]
  1355  
  1356  @note Function textual ID is "org.opencv.imgproc.colorconvert.rgb2gray"
  1357  
  1358  @param src input image: 8-bit unsigned 3-channel image @ref CV_8UC1.
  1359  @sa RGB2YUV
  1360   */
  1361  GAPI_EXPORTS_W GMat RGB2Gray(const GMat& src);
  1362  
  1363  /** @overload
  1364  Resulting gray color value computed as
  1365  \f[\texttt{dst} (I)= \texttt{rY} * \texttt{src}(I).R + \texttt{gY} * \texttt{src}(I).G  + \texttt{bY} * \texttt{src}(I).B \f]
  1366  
  1367  @note Function textual ID is "org.opencv.imgproc.colorconvert.rgb2graycustom"
  1368  
  1369  @param src input image: 8-bit unsigned 3-channel image @ref CV_8UC1.
  1370  @param rY float multiplier for R channel.
  1371  @param gY float multiplier for G channel.
  1372  @param bY float multiplier for B channel.
  1373  @sa RGB2YUV
  1374   */
  1375  GAPI_EXPORTS GMat RGB2Gray(const GMat& src, float rY, float gY, float bY);
  1376  
  1377  /** @brief Converts an image from BGR color space to gray-scaled.
  1378  
  1379  The conventional ranges for B, G, and R channel values are 0 to 255.
  1380  Resulting gray color value computed as
  1381  \f[\texttt{dst} (I)= \texttt{0.114} * \texttt{src}(I).B + \texttt{0.587} * \texttt{src}(I).G  + \texttt{0.299} * \texttt{src}(I).R \f]
  1382  
  1383  @note Function textual ID is "org.opencv.imgproc.colorconvert.bgr2gray"
  1384  
  1385  @param src input image: 8-bit unsigned 3-channel image @ref CV_8UC1.
  1386  @sa BGR2LUV
  1387   */
  1388  GAPI_EXPORTS GMat BGR2Gray(const GMat& src);
  1389  
  1390  /** @brief Converts an image from RGB color space to YUV color space.
  1391  
  1392  The function converts an input image from RGB color space to YUV.
  1393  The conventional ranges for R, G, and B channel values are 0 to 255.
  1394  
  1395  In case of linear transformations, the range does not matter. But in case of a non-linear
  1396  transformation, an input RGB image should be normalized to the proper value range to get the correct
  1397  results, like here, at RGB \f$\rightarrow\f$ Y\*u\*v\* transformation.
  1398  Output image must be 8-bit unsigned 3-channel image @ref CV_8UC3.
  1399  
  1400  @note Function textual ID is "org.opencv.imgproc.colorconvert.rgb2yuv"
  1401  
  1402  @param src input image: 8-bit unsigned 3-channel image @ref CV_8UC3.
  1403  @sa YUV2RGB, RGB2Lab
  1404  */
  1405  GAPI_EXPORTS GMat RGB2YUV(const GMat& src);
  1406  
  1407  /** @brief Converts an image from BGR color space to I420 color space.
  1408  
  1409  The function converts an input image from BGR color space to I420.
  1410  The conventional ranges for R, G, and B channel values are 0 to 255.
  1411  
  1412  Output image must be 8-bit unsigned 1-channel image. @ref CV_8UC1.
  1413  Width of I420 output image must be the same as width of input image.
  1414  Height of I420 output image must be equal 3/2 from height of input image.
  1415  
  1416  @note Function textual ID is "org.opencv.imgproc.colorconvert.bgr2i420"
  1417  
  1418  @param src input image: 8-bit unsigned 3-channel image @ref CV_8UC3.
  1419  @sa I4202BGR
  1420  */
  1421  GAPI_EXPORTS GMat BGR2I420(const GMat& src);
  1422  
  1423  /** @brief Converts an image from RGB color space to I420 color space.
  1424  
  1425  The function converts an input image from RGB color space to I420.
  1426  The conventional ranges for R, G, and B channel values are 0 to 255.
  1427  
  1428  Output image must be 8-bit unsigned 1-channel image. @ref CV_8UC1.
  1429  Width of I420 output image must be the same as width of input image.
  1430  Height of I420 output image must be equal 3/2 from height of input image.
  1431  
  1432  @note Function textual ID is "org.opencv.imgproc.colorconvert.rgb2i420"
  1433  
  1434  @param src input image: 8-bit unsigned 3-channel image @ref CV_8UC3.
  1435  @sa I4202RGB
  1436  */
  1437  GAPI_EXPORTS GMat RGB2I420(const GMat& src);
  1438  
  1439  /** @brief Converts an image from I420 color space to BGR color space.
  1440  
  1441  The function converts an input image from I420 color space to BGR.
  1442  The conventional ranges for B, G, and R channel values are 0 to 255.
  1443  
  1444  Output image must be 8-bit unsigned 3-channel image. @ref CV_8UC3.
  1445  Width of BGR output image must be the same as width of input image.
  1446  Height of BGR output image must be equal 2/3 from height of input image.
  1447  
  1448  @note Function textual ID is "org.opencv.imgproc.colorconvert.i4202bgr"
  1449  
  1450  @param src input image: 8-bit unsigned 1-channel image @ref CV_8UC1.
  1451  @sa BGR2I420
  1452  */
  1453  GAPI_EXPORTS GMat I4202BGR(const GMat& src);
  1454  
  1455  /** @brief Converts an image from I420 color space to BGR color space.
  1456  
  1457  The function converts an input image from I420 color space to BGR.
  1458  The conventional ranges for B, G, and R channel values are 0 to 255.
  1459  
  1460  Output image must be 8-bit unsigned 3-channel image. @ref CV_8UC3.
  1461  Width of RGB output image must be the same as width of input image.
  1462  Height of RGB output image must be equal 2/3 from height of input image.
  1463  
  1464  @note Function textual ID is "org.opencv.imgproc.colorconvert.i4202rgb"
  1465  
  1466  @param src input image: 8-bit unsigned 1-channel image @ref CV_8UC1.
  1467  @sa RGB2I420
  1468  */
  1469  GAPI_EXPORTS GMat I4202RGB(const GMat& src);
  1470  
  1471  /** @brief Converts an image from BGR color space to LUV color space.
  1472  
  1473  The function converts an input image from BGR color space to LUV.
  1474  The conventional ranges for B, G, and R channel values are 0 to 255.
  1475  
  1476  Output image must be 8-bit unsigned 3-channel image @ref CV_8UC3.
  1477  
  1478  @note Function textual ID is "org.opencv.imgproc.colorconvert.bgr2luv"
  1479  
  1480  @param src input image: 8-bit unsigned 3-channel image @ref CV_8UC3.
  1481  @sa RGB2Lab, RGB2LUV
  1482  */
  1483  GAPI_EXPORTS GMat BGR2LUV(const GMat& src);
  1484  
  1485  /** @brief Converts an image from LUV color space to BGR color space.
  1486  
  1487  The function converts an input image from LUV color space to BGR.
  1488  The conventional ranges for B, G, and R channel values are 0 to 255.
  1489  
  1490  Output image must be 8-bit unsigned 3-channel image @ref CV_8UC3.
  1491  
  1492  @note Function textual ID is "org.opencv.imgproc.colorconvert.luv2bgr"
  1493  
  1494  @param src input image: 8-bit unsigned 3-channel image @ref CV_8UC3.
  1495  @sa BGR2LUV
  1496  */
  1497  GAPI_EXPORTS GMat LUV2BGR(const GMat& src);
  1498  
  1499  /** @brief Converts an image from YUV color space to BGR color space.
  1500  
  1501  The function converts an input image from YUV color space to BGR.
  1502  The conventional ranges for B, G, and R channel values are 0 to 255.
  1503  
  1504  Output image must be 8-bit unsigned 3-channel image @ref CV_8UC3.
  1505  
  1506  @note Function textual ID is "org.opencv.imgproc.colorconvert.yuv2bgr"
  1507  
  1508  @param src input image: 8-bit unsigned 3-channel image @ref CV_8UC3.
  1509  @sa BGR2YUV
  1510  */
  1511  GAPI_EXPORTS GMat YUV2BGR(const GMat& src);
  1512  
  1513  /** @brief Converts an image from BGR color space to YUV color space.
  1514  
  1515  The function converts an input image from BGR color space to YUV.
  1516  The conventional ranges for B, G, and R channel values are 0 to 255.
  1517  
  1518  Output image must be 8-bit unsigned 3-channel image @ref CV_8UC3.
  1519  
  1520  @note Function textual ID is "org.opencv.imgproc.colorconvert.bgr2yuv"
  1521  
  1522  @param src input image: 8-bit unsigned 3-channel image @ref CV_8UC3.
  1523  @sa YUV2BGR
  1524  */
  1525  GAPI_EXPORTS GMat BGR2YUV(const GMat& src);
  1526  
  1527  /** @brief Converts an image from RGB color space to Lab color space.
  1528  
  1529  The function converts an input image from BGR color space to Lab.
  1530  The conventional ranges for R, G, and B channel values are 0 to 255.
  1531  
  1532  Output image must be 8-bit unsigned 3-channel image @ref CV_8UC1.
  1533  
  1534  @note Function textual ID is "org.opencv.imgproc.colorconvert.rgb2lab"
  1535  
  1536  @param src input image: 8-bit unsigned 3-channel image @ref CV_8UC1.
  1537  @sa RGB2YUV, RGB2LUV
  1538  */
  1539  GAPI_EXPORTS GMat RGB2Lab(const GMat& src);
  1540  
  1541  /** @brief Converts an image from YUV color space to RGB.
  1542  The function converts an input image from YUV color space to RGB.
  1543  The conventional ranges for Y, U, and V channel values are 0 to 255.
  1544  
  1545  Output image must be 8-bit unsigned 3-channel image @ref CV_8UC3.
  1546  
  1547  @note Function textual ID is "org.opencv.imgproc.colorconvert.yuv2rgb"
  1548  
  1549  @param src input image: 8-bit unsigned 3-channel image @ref CV_8UC3.
  1550  
  1551  @sa RGB2Lab, RGB2YUV
  1552  */
  1553  GAPI_EXPORTS GMat YUV2RGB(const GMat& src);
  1554  
  1555  /** @brief Converts an image from NV12 (YUV420p) color space to RGB.
  1556  The function converts an input image from NV12 color space to RGB.
  1557  The conventional ranges for Y, U, and V channel values are 0 to 255.
  1558  
  1559  Output image must be 8-bit unsigned 3-channel image @ref CV_8UC3.
  1560  
  1561  @note Function textual ID is "org.opencv.imgproc.colorconvert.nv12torgb"
  1562  
  1563  @param src_y input image: 8-bit unsigned 1-channel image @ref CV_8UC1.
  1564  @param src_uv input image: 8-bit unsigned 2-channel image @ref CV_8UC2.
  1565  
  1566  @sa YUV2RGB, NV12toBGR
  1567  */
  1568  GAPI_EXPORTS GMat NV12toRGB(const GMat& src_y, const GMat& src_uv);
  1569  
  1570  /** @brief Converts an image from NV12 (YUV420p) color space to gray-scaled.
  1571  The function converts an input image from NV12 color space to gray-scaled.
  1572  The conventional ranges for Y, U, and V channel values are 0 to 255.
  1573  
  1574  Output image must be 8-bit unsigned 1-channel image @ref CV_8UC1.
  1575  
  1576  @note Function textual ID is "org.opencv.imgproc.colorconvert.nv12togray"
  1577  
  1578  @param src_y input image: 8-bit unsigned 1-channel image @ref CV_8UC1.
  1579  @param src_uv input image: 8-bit unsigned 2-channel image @ref CV_8UC2.
  1580  
  1581  @sa YUV2RGB, NV12toBGR
  1582  */
  1583  GAPI_EXPORTS GMat NV12toGray(const GMat& src_y, const GMat& src_uv);
  1584  
  1585  /** @brief Converts an image from NV12 (YUV420p) color space to BGR.
  1586  The function converts an input image from NV12 color space to RGB.
  1587  The conventional ranges for Y, U, and V channel values are 0 to 255.
  1588  
  1589  Output image must be 8-bit unsigned 3-channel image @ref CV_8UC3.
  1590  
  1591  @note Function textual ID is "org.opencv.imgproc.colorconvert.nv12tobgr"
  1592  
  1593  @param src_y input image: 8-bit unsigned 1-channel image @ref CV_8UC1.
  1594  @param src_uv input image: 8-bit unsigned 2-channel image @ref CV_8UC2.
  1595  
  1596  @sa YUV2BGR, NV12toRGB
  1597  */
  1598  GAPI_EXPORTS GMat NV12toBGR(const GMat& src_y, const GMat& src_uv);
  1599  
  1600  /** @brief Converts an image from BayerGR color space to RGB.
  1601  The function converts an input image from BayerGR color space to RGB.
  1602  The conventional ranges for G, R, and B channel values are 0 to 255.
  1603  
  1604  Output image must be 8-bit unsigned 3-channel image @ref CV_8UC3.
  1605  
  1606  @note Function textual ID is "org.opencv.imgproc.colorconvert.bayergr2rgb"
  1607  
  1608  @param src_gr input image: 8-bit unsigned 1-channel image @ref CV_8UC1.
  1609  
  1610  @sa YUV2BGR, NV12toRGB
  1611  */
  1612  GAPI_EXPORTS GMat BayerGR2RGB(const GMat& src_gr);
  1613  
  1614  /** @brief Converts an image from RGB color space to HSV.
  1615  The function converts an input image from RGB color space to HSV.
  1616  The conventional ranges for R, G, and B channel values are 0 to 255.
  1617  
  1618  Output image must be 8-bit unsigned 3-channel image @ref CV_8UC3.
  1619  
  1620  @note Function textual ID is "org.opencv.imgproc.colorconvert.rgb2hsv"
  1621  
  1622  @param src input image: 8-bit unsigned 3-channel image @ref CV_8UC3.
  1623  
  1624  @sa YUV2BGR, NV12toRGB
  1625  */
  1626  GAPI_EXPORTS GMat RGB2HSV(const GMat& src);
  1627  
  1628  /** @brief Converts an image from RGB color space to YUV422.
  1629  The function converts an input image from RGB color space to YUV422.
  1630  The conventional ranges for R, G, and B channel values are 0 to 255.
  1631  
  1632  Output image must be 8-bit unsigned 2-channel image @ref CV_8UC2.
  1633  
  1634  @note Function textual ID is "org.opencv.imgproc.colorconvert.rgb2yuv422"
  1635  
  1636  @param src input image: 8-bit unsigned 3-channel image @ref CV_8UC3.
  1637  
  1638  @sa YUV2BGR, NV12toRGB
  1639  */
  1640  GAPI_EXPORTS GMat RGB2YUV422(const GMat& src);
  1641  
  1642  /** @brief Converts an image from NV12 (YUV420p) color space to RGB.
  1643  The function converts an input image from NV12 color space to RGB.
  1644  The conventional ranges for Y, U, and V channel values are 0 to 255.
  1645  
  1646  Output image must be 8-bit unsigned planar 3-channel image @ref CV_8UC1.
  1647  Planar image memory layout is three planes laying in the memory contiguously,
  1648  so the image height should be plane_height*plane_number,
  1649  image type is @ref CV_8UC1.
  1650  
  1651  @note Function textual ID is "org.opencv.imgproc.colorconvert.nv12torgbp"
  1652  
  1653  @param src_y input image: 8-bit unsigned 1-channel image @ref CV_8UC1.
  1654  @param src_uv input image: 8-bit unsigned 2-channel image @ref CV_8UC2.
  1655  
  1656  @sa YUV2RGB, NV12toBGRp, NV12toRGB
  1657  */
  1658  GAPI_EXPORTS GMatP NV12toRGBp(const GMat &src_y, const GMat &src_uv);
  1659  
  1660  /** @brief Converts an image from NV12 (YUV420p) color space to BGR.
  1661  The function converts an input image from NV12 color space to BGR.
  1662  The conventional ranges for Y, U, and V channel values are 0 to 255.
  1663  
  1664  Output image must be 8-bit unsigned planar 3-channel image @ref CV_8UC1.
  1665  Planar image memory layout is three planes laying in the memory contiguously,
  1666  so the image height should be plane_height*plane_number,
  1667  image type is @ref CV_8UC1.
  1668  
  1669  @note Function textual ID is "org.opencv.imgproc.colorconvert.nv12torgbp"
  1670  
  1671  @param src_y input image: 8-bit unsigned 1-channel image @ref CV_8UC1.
  1672  @param src_uv input image: 8-bit unsigned 2-channel image @ref CV_8UC2.
  1673  
  1674  @sa YUV2RGB, NV12toRGBp, NV12toBGR
  1675  */
  1676  GAPI_EXPORTS GMatP NV12toBGRp(const GMat &src_y, const GMat &src_uv);
  1677  
  1678  //! @} gapi_colorconvert
  1679  } //namespace gapi
  1680  } //namespace cv
  1681  
  1682  #endif // OPENCV_GAPI_IMGPROC_HPP